From 3ff7e0a0657c41e5597b5bc3eb6720f681623be7 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 24 Jul 2024 20:30:43 +0400 Subject: [PATCH 01/54] add parallel test execution, fix https://github.com/Altinity/clickhouse-backup/issues/888 --- .github/workflows/build.yaml | 9 +- test/integration/docker-compose.yml | 50 +- test/integration/docker-compose_advanced.yml | 65 +- test/integration/integration_test.go | 1694 +++++++++--------- test/integration/run.sh | 4 +- 5 files changed, 909 insertions(+), 913 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 221e9b6b..572621d8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -7,7 +7,7 @@ on: push: branches: - - master + - * jobs: build: @@ -311,12 +311,9 @@ jobs: export COMPOSE_FILE=docker-compose.yml fi - command -v docker-compose || (apt-get update && apt-get install -y python3-pip && pip3 install -U docker-compose) - + export CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" - docker-compose -f test/integration/${COMPOSE_FILE} up -d || ( docker-compose -f test/integration/${COMPOSE_FILE} ps -a && docker-compose -f test/integration/${COMPOSE_FILE} logs clickhouse && exit 1 ) - docker-compose -f test/integration/${COMPOSE_FILE} ps -a - go test -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go + go test -parallel 4 -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage env: GOROOT: ${{ env.GOROOT_1_22_X64 }} diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index c662959f..54c4abcd 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -1,18 +1,15 @@ services: sshd: image: docker.io/panubo/sshd:latest - container_name: sshd + hostname: sshd environment: SSH_ENABLE_ROOT: "true" SSH_ENABLE_PASSWORD_AUTH: "true" command: sh -c 'echo "PermitRootLogin yes" >> /etc/ssh/sshd_config && echo "LogLevel DEBUG3" >> /etc/ssh/sshd_config && echo "root:JFzMHfVpvTgEd74XXPq6wARA2Qg3AutJ" | chpasswd && /usr/sbin/sshd -D -e -f /etc/ssh/sshd_config' - networks: - - clickhouse-backup ftp: image: docker.io/fauria/vsftpd:latest hostname: ftp - container_name: ftp environment: FTP_USER: test_backup FTP_PASS: test_backup @@ -21,12 +18,10 @@ services: PASV_ADDR_RESOLVE: "YES" PASV_MIN_PORT: 20000 PASV_MAX_PORT: 21000 - networks: - - clickhouse-backup minio: image: docker.io/bitnami/minio:${MINIO_VERSION:-latest} - container_name: minio + hostname: minio environment: MINIO_ACCESS_KEY: access_key MINIO_SECRET_KEY: it_is_my_super_secret_key @@ -39,35 +34,27 @@ services: retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh - networks: - - clickhouse-backup # todo need to reproduce download after upload gcs: image: fsouza/fake-gcs-server:latest hostname: gcs - container_name: gcs entrypoint: - /bin/sh command: - -c - "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080" - networks: - - clickhouse-backup environment: QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" azure: image: mcr.microsoft.com/azure-storage/azurite:latest - container_name: azure hostname: devstoreaccount1.blob.azure healthcheck: test: nc 127.0.0.1 10000 -z interval: 1s retries: 30 command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] - networks: - - clickhouse-backup # azure_init: # image: mcr.microsoft.com/azure-cli:latest @@ -83,8 +70,6 @@ services: # environment: # # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools # AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1; - # networks: - # - clickhouse-backup zookeeper: # @TODO back :latest default value after resolve https://github.com/ClickHouse/ClickHouse/issues/53749 @@ -92,8 +77,6 @@ services: hostname: zookeeper environment: ZOO_4LW_COMMANDS_WHITELIST: "*" - networks: - - clickhouse-backup healthcheck: test: bash -c 'if [[ "$$(echo 'ruok' | nc 127.0.0.1 2181)" == "imok" ]]; then exit 0; else exit 1; fi' interval: 3s @@ -105,7 +88,6 @@ services: clickhouse-backup: image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-1.1.54390} hostname: clickhouse-backup - container_name: clickhouse-backup user: root entrypoint: - /bin/bash @@ -113,7 +95,7 @@ services: - sleep infinity healthcheck: test: bash -c "exit 0" - interval: 30s + interval: 3s timeout: 1s retries: 5 start_period: 1s @@ -139,11 +121,9 @@ services: volumes_from: - clickhouse ports: - - "7171:7171" + - "7171" # for delve debugger -# - "40001:40001" - networks: - - clickhouse-backup +# - "40001" depends_on: clickhouse: condition: service_healthy @@ -151,7 +131,6 @@ services: clickhouse: image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-1.1.54390} hostname: clickhouse - container_name: clickhouse restart: always user: root environment: @@ -222,22 +201,20 @@ services: # - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log # - ./clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log ports: - - "8123:8123" - - "9000:9000" + - "8123" + - "9000" # for delve debugger - - "40001:40001" - networks: - - clickhouse-backup + - "40001" links: - zookeeper - minio - sshd - ftp - azure -# - gcs + - gcs healthcheck: test: clickhouse client -q "SELECT 1" - interval: 10s + interval: 3s timeout: 2s retries: 30 start_period: 5s @@ -248,6 +225,8 @@ services: condition: service_healthy azure: condition: service_healthy + gcs: + condition: service_healthy # azure_init: # condition: service_completed_successfully @@ -255,7 +234,4 @@ services: image: hello-world depends_on: clickhouse-backup: - condition: service_healthy - -networks: - clickhouse-backup: + condition: service_healthy \ No newline at end of file diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 7a2769ec..869872bf 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -1,18 +1,15 @@ services: sshd: image: docker.io/panubo/sshd:latest - container_name: sshd + hostname: sshd environment: SSH_ENABLE_ROOT: "true" SSH_ENABLE_PASSWORD_AUTH: "true" command: sh -c 'echo "PermitRootLogin yes" >> /etc/ssh/sshd_config && echo "LogLevel DEBUG3" >> /etc/ssh/sshd_config && echo "root:JFzMHfVpvTgEd74XXPq6wARA2Qg3AutJ" | chpasswd && /usr/sbin/sshd -D -e -f /etc/ssh/sshd_config' - networks: - - clickhouse-backup # ftp: # image: docker.io/fauria/vsftpd:latest # hostname: ftp -# container_name: ftp # environment: # FTP_USER: test_backup # FTP_PASS: test_backup @@ -21,24 +18,19 @@ services: # PASV_ADDR_RESOLVE: "YES" # PASV_MIN_PORT: 21100 # PASV_MAX_PORT: 21110 -# networks: -# - clickhouse-backup ftp: image: docker.io/iradu/proftpd:latest hostname: ftp - container_name: ftp environment: FTP_USER_NAME: "test_backup" FTP_USER_PASS: "test_backup" FTP_MASQUERADEADDRESS: "yes" FTP_PASSIVE_PORTS: "21100 31100" - networks: - - clickhouse-backup minio: image: docker.io/bitnami/minio:${MINIO_VERSION:-latest} - container_name: minio + hostname: minio environment: MINIO_ACCESS_KEY: access_key MINIO_SECRET_KEY: it_is_my_super_secret_key @@ -47,33 +39,27 @@ services: MINIO_ROOT_PASSWORD: it_is_my_super_secret_key healthcheck: test: curl -sL http://localhost:9000/ - interval: 10s + interval: 3s retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh ports: - - "9001:9001" - networks: - - clickhouse-backup + - "9001" # todo need to reproduce download after upload gcs: image: fsouza/fake-gcs-server:latest hostname: gcs - container_name: gcs entrypoint: - /bin/sh command: - -c - "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080" - networks: - - clickhouse-backup environment: QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" azure: image: mcr.microsoft.com/azure-storage/azurite:latest - container_name: azure hostname: devstoreaccount1.blob.azure healthcheck: test: nc 127.0.0.1 10000 -z @@ -82,8 +68,6 @@ services: command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] # environment: # - AZURITE_DB="mysql://root:root@mysql:3306/azurite_blob" - networks: - - clickhouse-backup # azure_init: # image: mcr.microsoft.com/azure-cli:latest @@ -99,20 +83,15 @@ services: # environment: # # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools # AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1; - # networks: - # - clickhouse-backup mysql: image: docker.io/mysql:${MYSQL_VERSION:-latest} command: --gtid_mode=on --enforce_gtid_consistency=ON hostname: mysql - container_name: mysql environment: MYSQL_ROOT_PASSWORD: "root" ports: - - "3306:3306" - networks: - - clickhouse-backup + - "3306" healthcheck: test: mysqladmin -p=root ping -h localhost timeout: 20s @@ -121,16 +100,13 @@ services: pgsql: image: docker.io/postgres:${PGSQL_VERSION:-latest} hostname: pgsql - container_name: pgsql environment: POSTGRES_USER: "root" POSTGRES_PASSWORD: "root" # to allow connection from clickhouse 21.3 POSTGRES_HOST_AUTH_METHOD: "md5" ports: - - "5432:5432" - networks: - - clickhouse-backup + - "5432" command: [ "postgres", "-c", "wal_level=logical" ] healthcheck: test: pg_isready @@ -147,8 +123,6 @@ services: environment: - CLICKHOUSE_UID=0 - CLICKHOUSE_GID=0 - networks: - - clickhouse-backup healthcheck: test: bash -c 'if [[ "$$(echo 'ruok' | nc 127.0.0.1 2181)" == "imok" ]]; then exit 0; else exit 1; fi' interval: 3s @@ -160,7 +134,6 @@ services: clickhouse-backup: image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-19.17} hostname: clickhouse-backup - container_name: clickhouse-backup user: root entrypoint: - /bin/bash @@ -198,11 +171,9 @@ services: volumes_from: - clickhouse ports: - - "7171:7171" + - "7171" # for delve debugger - - "40001:40001" - networks: - - clickhouse-backup + - "40001" depends_on: clickhouse: condition: service_healthy @@ -210,7 +181,6 @@ services: clickhouse: image: docker.io/${CLICKHOUSE_IMAGE:-yandex/clickhouse-server}:${CLICKHOUSE_VERSION:-19.17} hostname: clickhouse - container_name: clickhouse restart: always user: root environment: @@ -292,12 +262,10 @@ services: # - ./clickhouse-server.log:/var/log/clickhouse-server/clickhouse-server.log # - ./clickhouse-server.err.log:/var/log/clickhouse-server/clickhouse-server.err.log ports: - - "8123:8123" - - "9000:9000" + - "8123" + - "9000" # for delve debugger - - "40002:40002" - networks: - - clickhouse-backup + - "40002" links: - zookeeper - minio @@ -306,13 +274,13 @@ services: - pgsql - ftp - azure -# - gcs + - gcs healthcheck: test: clickhouse client -q "SELECT 1" - interval: 10s + interval: 3s timeout: 2s retries: 30 - start_period: 5s + start_period: 3s depends_on: mysql: condition: service_healthy @@ -331,7 +299,4 @@ services: image: hello-world depends_on: clickhouse-backup: - condition: service_healthy - -networks: - clickhouse-backup: + condition: service_healthy \ No newline at end of file diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index e16bb007..20afdd23 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -10,6 +10,7 @@ import ( "math/rand" "os" "os/exec" + "path" "reflect" "regexp" "strconv" @@ -33,6 +34,16 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/utils" ) +//setup log level +func init() { + log.SetHandler(logcli.New(os.Stdout)) + logLevel := "info" + if os.Getenv("LOG_LEVEL") != "" { + logLevel = os.Getenv("LOG_LEVEL") + } + log.SetLevelFromString(logLevel) +} + const dbNameAtomic = "_test#$.ДБ_atomic_" const dbNameOrdinary = "_test#$.ДБ_ordinary_" const dbNameMySQL = "mysql_db" @@ -40,6 +51,7 @@ const dbNamePostgreSQL = "pgsql_db" const Issue331Atomic = "_issue331._atomic_" const Issue331Ordinary = "_issue331.ordinary_" + type TestDataStruct struct { Database string DatabaseEngine string @@ -56,6 +68,11 @@ type TestDataStruct struct { CheckDatabaseOnly bool } +type TestEnvironment struct { + ch *clickhouse.ClickHouse + ProjectName string +} + var defaultTestData = []TestDataStruct{ { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", @@ -394,30 +411,27 @@ var defaultIncrementData = []TestDataStruct{ }, } -func init() { - log.SetHandler(logcli.New(os.Stdout)) - logLevel := "info" - if os.Getenv("LOG_LEVEL") != "" { - logLevel = os.Getenv("LOG_LEVEL") +func NewTestEnvironment(t *testing.T, r *require.Assertions) *TestEnvironment { + t.Helper() + if os.Getenv("COMPOSE_FILE") == "" || os.Getenv("CUR_DIR") == "" { + t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } - log.SetLevelFromString(logLevel) + env := TestEnvironment{ + ProjectName: strings.ToLower(t.Name()), + } + upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") + r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", upCmd...)) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "1.1.54394") <= 0 { r := require.New(&testing.T{}) - installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) - } - /* - r.NoError(dockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) - installDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") - // rsync - installDebIfNotExists(r, "clickhouse-backup", "openssh-client", "rsync") - // kopia - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) - installDebIfNotExists(r, "clickhouse-backup", "kopia", "xxd", "bsdmainutils", "parallel") - // restic - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) - */ + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) + } + t.Cleanup(func() { + downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes") + r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", downCmd...)) + }) + return &env } // TestS3NoDeletePermission - no parallel @@ -426,25 +440,27 @@ func TestS3NoDeletePermission(t *testing.T) { t.Skip("Skipping Advanced integration tests...") return } + t.Parallel() r := require.New(t) - r.NoError(dockerExec("minio", "/bin/minio_nodelete.sh")) - r.NoError(dockerCP("config-s3-nodelete.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 2*time.Second) - defer ch.chbackend.Close() - generateTestData(t, r, ch, "S3", defaultTestData) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "no_delete_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "no_delete_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) - r.Error(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 2*time.Second) + defer env.ch.Close() + + r.NoError(env.DockerExec("minio", "/bin/minio_nodelete.sh")) + r.NoError(env.DockerCP("config-s3-nodelete.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + + generateTestData(t, r, env, "S3", defaultTestData) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "no_delete_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "no_delete_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) + r.Error(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} - dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) - checkObjectStorageIsEmpty(t, r, "S3") + dropDatabasesFromTestDataDataSet(t, r, env, databaseList) + r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) + env.checkObjectStorageIsEmpty(t, r, "S3") } // TestRBAC need clickhouse-server restart, no parallel @@ -453,66 +469,67 @@ func TestRBAC(t *testing.T) { if compareVersion(chVersion, "20.4") < 0 { t.Skipf("Test skipped, RBAC not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) + env := NewTestEnvironment(t, r) testRBACScenario := func(config string) { - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") - ch.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - ch.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS `test.rbac-name`") - ch.queryWithNoError(r, "DROP QUOTA IF EXISTS `test.rbac-name`") - ch.queryWithNoError(r, "DROP ROW POLICY IF EXISTS `test.rbac-name` ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE IF EXISTS `test.rbac-name`") - ch.queryWithNoError(r, "DROP USER IF EXISTS `test.rbac-name`") + env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") + env.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") + env.queryWithNoError(r, "DROP SETTINGS PROFILE IF EXISTS `test.rbac-name`") + env.queryWithNoError(r, "DROP QUOTA IF EXISTS `test.rbac-name`") + env.queryWithNoError(r, "DROP ROW POLICY IF EXISTS `test.rbac-name` ON default.test_rbac") + env.queryWithNoError(r, "DROP ROLE IF EXISTS `test.rbac-name`") + env.queryWithNoError(r, "DROP USER IF EXISTS `test.rbac-name`") createRBACObjects := func(drop bool) { if drop { log.Info("drop all RBAC related objects") - ch.queryWithNoError(r, "DROP SETTINGS PROFILE `test.rbac-name`") - ch.queryWithNoError(r, "DROP QUOTA `test.rbac-name`") - ch.queryWithNoError(r, "DROP ROW POLICY `test.rbac-name` ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE `test.rbac-name`") - ch.queryWithNoError(r, "DROP USER `test.rbac-name`") + env.queryWithNoError(r, "DROP SETTINGS PROFILE `test.rbac-name`") + env.queryWithNoError(r, "DROP QUOTA `test.rbac-name`") + env.queryWithNoError(r, "DROP ROW POLICY `test.rbac-name` ON default.test_rbac") + env.queryWithNoError(r, "DROP ROLE `test.rbac-name`") + env.queryWithNoError(r, "DROP USER `test.rbac-name`") } log.Info("create RBAC related objects") - ch.queryWithNoError(r, "CREATE SETTINGS PROFILE `test.rbac-name` SETTINGS max_execution_time=60") - ch.queryWithNoError(r, "CREATE ROLE `test.rbac-name` SETTINGS PROFILE `test.rbac-name`") - ch.queryWithNoError(r, "CREATE USER `test.rbac-name` IDENTIFIED BY 'test_rbac_password' DEFAULT ROLE `test.rbac-name`") - ch.queryWithNoError(r, "CREATE QUOTA `test.rbac-name` KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO `test.rbac-name`") - ch.queryWithNoError(r, "CREATE ROW POLICY `test.rbac-name` ON default.test_rbac USING 1=1 AS RESTRICTIVE TO `test.rbac-name`") + env.queryWithNoError(r, "CREATE SETTINGS PROFILE `test.rbac-name` SETTINGS max_execution_time=60") + env.queryWithNoError(r, "CREATE ROLE `test.rbac-name` SETTINGS PROFILE `test.rbac-name`") + env.queryWithNoError(r, "CREATE USER `test.rbac-name` IDENTIFIED BY 'test_rbac_password' DEFAULT ROLE `test.rbac-name`") + env.queryWithNoError(r, "CREATE QUOTA `test.rbac-name` KEYED BY user_name FOR INTERVAL 1 hour NO LIMITS TO `test.rbac-name`") + env.queryWithNoError(r, "CREATE ROW POLICY `test.rbac-name` ON default.test_rbac USING 1=1 AS RESTRICTIVE TO `test.rbac-name`") } createRBACObjects(false) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "create", "--rbac", "--rbac-only", "--env", "S3_COMPRESSION_FORMAT=zstd", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup upload test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "create", "--rbac", "--rbac-only", "--env", "S3_COMPRESSION_FORMAT=zstd", "test_rbac_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup upload test_rbac_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) + r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) log.Info("create conflicted RBAC objects") createRBACObjects(true) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) log.Info("download+restore RBAC") - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup")) - out, err := dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac test_rbac_backup") + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac test_rbac_backup") log.Debug(out) r.Contains(out, "RBAC successfully restored") r.NoError(err) - out, err = dockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac-only test_rbac_backup") + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac-only test_rbac_backup") log.Debug(out) r.Contains(out, "RBAC successfully restored") r.NoError(err) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - ch.chbackend.Close() + env.ch.Close() // r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) - ch.connectWithWait(r, 2*time.Second, 2*time.Second, 8*time.Second) + env.connectWithWait(r, 2*time.Second, 2*time.Second, 8*time.Second) - r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) rbacTypes := map[string]string{ "PROFILES": "test.rbac-name", @@ -525,7 +542,7 @@ func TestRBAC(t *testing.T) { var rbacRows []struct { Name string `ch:"name"` } - err := ch.chbackend.Select(&rbacRows, fmt.Sprintf("SHOW %s", rbacType)) + err := env.ch.Select(&rbacRows, fmt.Sprintf("SHOW %s", rbacType)) r.NoError(err) found := false for _, row := range rbacRows { @@ -536,20 +553,20 @@ func TestRBAC(t *testing.T) { } } if !found { - //r.NoError(dockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) + //r.NoError(env.DockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) r.Failf("wrong RBAC", "SHOW %s, %#v doesn't contain %#v", rbacType, rbacRows, expectedValue) } } - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "remote", "test_rbac_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "remote", "test_rbac_backup")) - ch.queryWithNoError(r, "DROP SETTINGS PROFILE `test.rbac-name`") - ch.queryWithNoError(r, "DROP QUOTA `test.rbac-name`") - ch.queryWithNoError(r, "DROP ROW POLICY `test.rbac-name` ON default.test_rbac") - ch.queryWithNoError(r, "DROP ROLE `test.rbac-name`") - ch.queryWithNoError(r, "DROP USER `test.rbac-name`") - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") - ch.chbackend.Close() + env.queryWithNoError(r, "DROP SETTINGS PROFILE `test.rbac-name`") + env.queryWithNoError(r, "DROP QUOTA `test.rbac-name`") + env.queryWithNoError(r, "DROP ROW POLICY `test.rbac-name` ON default.test_rbac") + env.queryWithNoError(r, "DROP ROLE `test.rbac-name`") + env.queryWithNoError(r, "DROP USER `test.rbac-name`") + env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") + env.ch.Close() } testRBACScenario("/etc/clickhouse-backup/config-s3.yml") if compareVersion(chVersion, "24.1") >= 0 { @@ -565,64 +582,65 @@ func TestRBAC(t *testing.T) { // TestConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container // need clickhouse-server restart, no parallel func TestConfigs(t *testing.T) { - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) + env := NewTestEnvironment(t, r) testConfigsScenario := func(config string) { - ch.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Second) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") - ch.queryWithNoError(r, "CREATE TABLE default.test_configs (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") + env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Second) + env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") + env.queryWithNoError(r, "CREATE TABLE default.test_configs (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - r.NoError(dockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) + r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "create", "--configs", "--configs-only", "test_configs_backup")) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") + r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "create", "--configs", "--configs-only", "test_configs_backup")) + env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") compression := "" if !strings.Contains(config, "embedded") { compression = "--env AZBLOB_COMPRESSION_FORMAT=zstd --env S3_COMPRESSION_FORMAT=zstd" } - r.NoError(dockerExec("clickhouse", "bash", "-xec", "clickhouse-backup upload "+compression+" --env CLICKHOUSE_BACKUP_CONFIG="+config+" --env S3_COMPRESSION_FORMAT=none --env ALLOW_EMPTY_BACKUPS=1 test_configs_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "clickhouse-backup upload "+compression+" --env CLICKHOUSE_BACKUP_CONFIG="+config+" --env S3_COMPRESSION_FORMAT=none --env ALLOW_EMPTY_BACKUPS=1 test_configs_backup")) + r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) - ch.queryWithNoError(r, "SYSTEM RELOAD CONFIG") - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + env.queryWithNoError(r, "SYSTEM RELOAD CONFIG") + env.ch.Close() + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" var settings string - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) + r.NoError(env.ch.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) if settings != "1" { - r.NoError(dockerExec("clickhouse", "grep", "empty_result_for_aggregation_by_empty_set", "-r", "/var/lib/clickhouse/preprocessed_configs/")) + r.NoError(env.DockerExec("clickhouse", "grep", "empty_result_for_aggregation_by_empty_set", "-r", "/var/lib/clickhouse/preprocessed_configs/")) } r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) + r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) - r.NoError(ch.chbackend.Query("SYSTEM RELOAD CONFIG")) - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + r.NoError(env.ch.Query("SYSTEM RELOAD CONFIG")) + env.ch.Close() + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) settings = "" - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) r.Equal("0", settings, "expect empty_result_for_aggregation_by_empty_set=0") - r.NoError(dockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) - ch.chbackend.Close() - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + env.ch.Close() + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) settings = "" - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") isTestConfigsTablePresent := 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) + r.NoError(env.ch.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) r.Equal(0, isTestConfigsTablePresent, "expect default.test_configs is not present") - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) - r.NoError(dockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "remote", "test_configs_backup")) - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) + r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) + r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "remote", "test_configs_backup")) + r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - ch.chbackend.Close() + env.ch.Close() } testConfigsScenario("/etc/clickhouse-backup/config-s3.yml") chVersion := os.Getenv("CLICKHOUSE_VERSION") @@ -638,39 +656,40 @@ func TestConfigs(t *testing.T) { // TestLongListRemote - no parallel, cause need to restart minio func TestLongListRemote(t *testing.T) { - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) + defer env.ch.Close() totalCacheCount := 20 testBackupName := "test_list_remote" for i := 0; i < totalCacheCount; i++ { - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i))) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i))) } - r.NoError(dockerExec("clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(env.DockerExec("clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) time.Sleep(2 * time.Second) startFirst := time.Now() - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) noCacheDuration := time.Since(startFirst) - r.NoError(dockerExec("clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(env.DockerExec("clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")) startCashed := time.Now() - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) cashedDuration := time.Since(startCashed) r.Greater(noCacheDuration, cashedDuration) - r.NoError(dockerExec("clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) + r.NoError(env.DockerExec("clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) time.Sleep(2 * time.Second) startCacheClear := time.Now() - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) cacheClearDuration := time.Since(startCacheClear) r.Greater(cacheClearDuration, cashedDuration) @@ -680,63 +699,66 @@ func TestLongListRemote(t *testing.T) { for i := 0; i < totalCacheCount; i++ { testListRemoteAllBackups[i] = fmt.Sprintf("%s_%d", testBackupName, i) } - fullCleanup(t, r, ch, testListRemoteAllBackups, []string{"remote", "local"}, []string{}, true, true, "config-s3.yml") + fullCleanup(t, r, env, testListRemoteAllBackups, []string{"remote", "local"}, []string{}, true, true, "config-s3.yml") } +const apiBackupNumber = 5 + func TestServerAPI(t *testing.T) { - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second, 10*time.Second) + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 10*time.Second) defer func() { - ch.chbackend.Close() + env.ch.Close() }() - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) fieldTypes := []string{"UInt64", "String", "Int"} - installDebIfNotExists(r, "clickhouse-backup", "curl", "jq") + env.InstallDebIfNotExists(r, "clickhouse-backup", "curl", "jq") maxTables := 10 minFields := 10 randFields := 10 - fillDatabaseForAPIServer(maxTables, minFields, randFields, ch, r, fieldTypes) + fillDatabaseForAPIServer(maxTables, minFields, randFields, env, r, fieldTypes) log.Info("Run `clickhouse-backup server --watch` in background") - r.NoError(dockerExec("-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) + r.NoError(env.DockerExec("-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) time.Sleep(1 * time.Second) - testAPIBackupVersion(r) + testAPIBackupVersion(r, env) - testAPIBackupCreate(r) + testAPIBackupCreate(r, env) - testAPIBackupTables(r) + testAPIBackupTables(r, env) - testAPIBackupUpload(r) + testAPIBackupUpload(r, env) - testAPIBackupTablesRemote(r) + testAPIBackupTablesRemote(r, env) log.Info("Check /backup/actions") - ch.queryWithNoError(r, "SELECT count() FROM system.backup_actions") + env.queryWithNoError(r, "SELECT count() FROM system.backup_actions") - testAPIBackupList(t, r) + testAPIBackupList(t, r, env) - testAPIDeleteLocalDownloadRestore(r) + testAPIDeleteLocalDownloadRestore(r, env) - testAPIMetrics(r, ch) + testAPIMetrics(r, env) - testAPIWatchAndKill(r, ch) + testAPIWatchAndKill(r, env) - testAPIBackupActions(r, ch) + testAPIBackupActions(r, env) - testAPIRestart(r, ch) + testAPIRestart(r, env) - testAPIBackupDelete(r) + testAPIBackupDelete(r, env) - testAPIBackupClean(r, ch) + testAPIBackupClean(r, env) - r.NoError(dockerExec("clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup")) - r.NoError(ch.dropDatabase("long_schema")) + r.NoError(env.DockerExec("clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup")) + r.NoError(env.dropDatabase("long_schema")) } -func testAPIRestart(r *require.Assertions, ch *TestClickHouse) { - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") +func testAPIRestart(r *require.Assertions, env *TestEnvironment) { + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") log.Debug(out) r.NoError(err) r.Contains(out, "acknowledged") @@ -745,13 +767,13 @@ func testAPIRestart(r *require.Assertions, ch *TestClickHouse) { time.Sleep(6 * time.Second) var inProgressActions uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&inProgressActions, "SELECT count() FROM system.backup_actions WHERE status!=?", status.CancelStatus)) + r.NoError(env.ch.SelectSingleRowNoCtx(&inProgressActions, "SELECT count() FROM system.backup_actions WHERE status!=?", status.CancelStatus)) r.Equal(uint64(0), inProgressActions) } -func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, ch *TestClickHouse, commands []string, needWait bool) { +func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, env *TestEnvironment, commands []string, needWait bool) { sql := "INSERT INTO system.backup_actions(command) " + "VALUES ('" + strings.Join(commands, "'),('") + "')" - out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("clickhouse client --echo -mn -q \"%s\"", sql)) + out, err := env.DockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("clickhouse client --echo -mn -q \"%s\"", sql)) log.Debug(out) r.NoError(err) if needWait { @@ -759,7 +781,7 @@ func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, ch *Tes for { time.Sleep(500 * time.Millisecond) var commandStatus string - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&commandStatus, "SELECT status FROM system.backup_actions WHERE command=?", command)) + r.NoError(env.ch.SelectSingleRowNoCtx(&commandStatus, "SELECT status FROM system.backup_actions WHERE command=?", command)) if commandStatus != status.InProgressStatus { break } @@ -767,30 +789,31 @@ func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, ch *Tes } } } -func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create_remote actions_backup1"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "restore_remote --rm actions_backup1"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup1", "delete remote actions_backup1"}, false) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"create actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"upload actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup2"}, false) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"download actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"restore --rm actions_backup2"}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"delete local actions_backup2", "delete remote actions_backup2"}, false) +func testAPIBackupActions(r *require.Assertions, env *TestEnvironment) { + runClickHouseClientInsertSystemBackupActions(r, env, []string{"create_remote actions_backup1"}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"delete local actions_backup1", "restore_remote --rm actions_backup1"}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"delete local actions_backup1", "delete remote actions_backup1"}, false) + + runClickHouseClientInsertSystemBackupActions(r, env, []string{"create actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"upload actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"delete local actions_backup2"}, false) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"download actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"restore --rm actions_backup2"}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"delete local actions_backup2", "delete remote actions_backup2"}, false) inProgressActions := make([]struct { Command string `ch:"command"` Status string `ch:"status"` }, 0) - r.NoError(ch.chbackend.StructSelect(&inProgressActions, "SELECT command, status FROM system.backup_actions WHERE command LIKE '%actions%' AND status IN (?,?)", status.InProgressStatus, status.ErrorStatus)) + r.NoError(env.ch.StructSelect(&inProgressActions, "SELECT command, status FROM system.backup_actions WHERE command LIKE '%actions%' AND status IN (?,?)", status.InProgressStatus, status.ErrorStatus)) r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) var actionsBackups uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&actionsBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'backup_action%'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&actionsBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'backup_action%'")) r.Equal(uint64(0), actionsBackups) - out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + out, err := env.DockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_create_remote_status 1") r.Contains(out, "clickhouse_backup_last_create_status 1") @@ -800,16 +823,16 @@ func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { r.Contains(out, "clickhouse_backup_last_restore_status 1") } -func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { +func testAPIWatchAndKill(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/watch + /backup/kill") runKillCommand := func(command string) { - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) log.Debug(out) r.NoError(err) } checkWatchBackup := func(expectedCount uint64) { var watchBackups uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&watchBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'shard%'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&watchBackups, "SELECT count() FROM system.backup_list WHERE name LIKE 'shard%'")) r.Equal(expectedCount, watchBackups) } @@ -818,7 +841,7 @@ func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { Status string `ch:"status"` Command string `ch:"command"` }, 0) - r.NoError(ch.chbackend.StructSelect(&canceledCommands, "SELECT status, command FROM system.backup_actions WHERE command LIKE 'watch%'")) + r.NoError(env.ch.StructSelect(&canceledCommands, "SELECT status, command FROM system.backup_actions WHERE command LIKE 'watch%'")) r.Equal(expectedCount, len(canceledCommands)) for i := range canceledCommands { r.Equal("watch", canceledCommands[i].Command) @@ -830,7 +853,7 @@ func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { runKillCommand("watch") checkCanceledCommand(1) - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") log.Debug(out) r.NoError(err) time.Sleep(7 * time.Second) @@ -840,25 +863,25 @@ func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { checkCanceledCommand(2) } -func testAPIBackupDelete(r *require.Assertions) { +func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/delete/{where}/{name}") for i := 1; i <= apiBackupNumber; i++ { - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) log.Infof(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) log.Infof(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") } - out, err := dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + out, err := env.DockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_delete_status 1") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XGET 'http://localhost:7171/backup/list'")) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XGET 'http://localhost:7171/backup/list'")) log.Infof(out) r.NoError(err) scanner := bufio.NewScanner(strings.NewReader(out)) @@ -873,7 +896,7 @@ func testAPIBackupDelete(r *require.Assertions) { } listItem := backupJSON{} r.NoError(json.Unmarshal(scanner.Bytes(), &listItem)) - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/%s/%s'", listItem.Location, listItem.Name)) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/%s/%s'", listItem.Location, listItem.Name)) log.Infof(out) r.NoError(err) } @@ -882,38 +905,38 @@ func testAPIBackupDelete(r *require.Assertions) { } -func testAPIBackupClean(r *require.Assertions, ch *TestClickHouse) { +func testAPIBackupClean(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/clean/ /backup/clean_remote_broken/ and /backup/actions fot these two commands") - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean'")) + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean'")) log.Infof(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean/remote_broken'")) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean/remote_broken'")) log.Infof(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") - runClickHouseClientInsertSystemBackupActions(r, ch, []string{"clean", "clean_remote_broken"}, false) + runClickHouseClientInsertSystemBackupActions(r, env, []string{"clean", "clean_remote_broken"}, false) } -func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { +func testAPIMetrics(r *require.Assertions, env *TestEnvironment) { log.Info("Check /metrics clickhouse_backup_last_backup_size_remote") var lastRemoteSize int64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&lastRemoteSize, "SELECT size FROM system.backup_list WHERE name='z_backup_5' AND location='remote'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&lastRemoteSize, "SELECT size FROM system.backup_list WHERE name='z_backup_5' AND location='remote'")) var realTotalBytes uint64 if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(total_bytes) FROM system.tables WHERE database='long_schema'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(total_bytes) FROM system.tables WHERE database='long_schema'")) } else { - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(bytes_on_disk) FROM system.parts WHERE database='long_schema'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&realTotalBytes, "SELECT sum(bytes_on_disk) FROM system.parts WHERE database='long_schema'")) } r.Greater(realTotalBytes, uint64(0)) r.Greater(uint64(lastRemoteSize), realTotalBytes) - out, err := dockerExecOut("clickhouse-backup", "curl", "-sL", "http://localhost:7171/metrics") + out, err := env.DockerExecOut("clickhouse-backup", "curl", "-sL", "http://localhost:7171/metrics") log.Debug(out) r.NoError(err) r.Contains(out, fmt.Sprintf("clickhouse_backup_last_backup_size_remote %d", lastRemoteSize)) @@ -921,15 +944,15 @@ func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { log.Info("Check /metrics clickhouse_backup_number_backups_*") r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_local %d", apiBackupNumber)) // +1 watch backup - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_remote %d", apiBackupNumber+1)) r.Contains(out, "clickhouse_backup_number_backups_local_expected 0") r.Contains(out, "clickhouse_backup_number_backups_remote_expected 0") } -func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { +func testAPIDeleteLocalDownloadRestore(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") - out, err := dockerExecOut( + out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/delete/local/z_backup_$i\"; curl -sfL -XPOST \"http://localhost:7171/backup/download/z_backup_$i\"; sleep 2; curl -sfL -XPOST \"http://localhost:7171/backup/restore/z_backup_$i?rm=1\"; sleep 8; done", apiBackupNumber), @@ -939,20 +962,20 @@ func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { r.NotContains(out, "another operation is currently running") r.NotContains(out, "error") - out, err = dockerExecOut("clickhouse-backup", "curl", "-sfL", "http://localhost:7171/backup/actions?filter=download") + out, err = env.DockerExecOut("clickhouse-backup", "curl", "-sfL", "http://localhost:7171/backup/actions?filter=download") r.NoError(err) r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + out, err = env.DockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_delete_status 1") r.Contains(out, "clickhouse_backup_last_download_status 1") r.Contains(out, "clickhouse_backup_last_restore_status 1") } -func testAPIBackupList(t *testing.T, r *require.Assertions) { +func testAPIBackupList(t *testing.T, r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/list") - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") log.Debug(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { @@ -961,7 +984,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions) { } log.Info("Check /backup/list/local") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") log.Debug(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { @@ -970,7 +993,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions) { } log.Info("Check /backup/list/remote") - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") log.Debug(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { @@ -979,9 +1002,9 @@ func testAPIBackupList(t *testing.T, r *require.Assertions) { } } -func testAPIBackupUpload(r *require.Assertions) { +func testAPIBackupUpload(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/upload") - out, err := dockerExecOut( + out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/upload/z_backup_$i\"; sleep 2; done", apiBackupNumber), @@ -991,18 +1014,18 @@ func testAPIBackupUpload(r *require.Assertions) { r.NotContains(out, "error") r.NotContains(out, "another operation is currently running") - out, err = dockerExecOut("clickhouse-backup", "curl", "-sfL", "http://localhost:7171/backup/actions?filter=upload") + out, err = env.DockerExecOut("clickhouse-backup", "curl", "-sfL", "http://localhost:7171/backup/actions?filter=upload") r.NoError(err) r.NotContains(out, "error") - out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + out, err = env.DockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_upload_status 1") } -func testAPIBackupTables(r *require.Assertions) { +func testAPIBackupTables(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/tables") - out, err := dockerExecOut( + out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables\"", ) @@ -1017,7 +1040,7 @@ func testAPIBackupTables(r *require.Assertions) { r.NotContains(out, "information_schema") log.Info("Check /backup/tables/all") - out, err = dockerExecOut( + out, err = env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables/all\"", ) @@ -1034,10 +1057,10 @@ func testAPIBackupTables(r *require.Assertions) { } } -func testAPIBackupTablesRemote(r *require.Assertions) { +func testAPIBackupTablesRemote(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/tables?remote_backup=z_backup_1") - out, err := dockerExecOut( + out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables?remote_backup=z_backup_1\"", ) @@ -1053,21 +1076,21 @@ func testAPIBackupTablesRemote(r *require.Assertions) { } -func testAPIBackupVersion(r *require.Assertions) { +func testAPIBackupVersion(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/version") - cliVersion, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup --version 2>/dev/null --version | grep 'Version' | cut -d ':' -f 2 | xargs") + cliVersion, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup --version 2>/dev/null --version | grep 'Version' | cut -d ':' -f 2 | xargs") r.NoError(err) - apiVersion, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sL http://localhost:7171/backup/version | jq -r .version") + apiVersion, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sL http://localhost:7171/backup/version | jq -r .version") r.NoError(err) r.Equal(cliVersion, apiVersion) - tablesVersion, err := dockerExecOut("clickhouse", "bash", "-ce", "clickhouse client -q 'SELECT * FROM system.backup_version FORMAT TSVRaw'") + tablesVersion, err := env.DockerExecOut("clickhouse", "bash", "-ce", "clickhouse client -q 'SELECT * FROM system.backup_version FORMAT TSVRaw'") r.NoError(err) r.Equal(cliVersion, tablesVersion) } -func testAPIBackupCreate(r *require.Assertions) { +func testAPIBackupCreate(r *require.Assertions, env *TestEnvironment) { log.Info("Check /backup/create") - out, err := dockerExecOut( + out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("sleep 3; for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/create?table=long_schema.*&name=z_backup_$i\"; sleep 1.5; done", apiBackupNumber), @@ -1077,13 +1100,12 @@ func testAPIBackupCreate(r *require.Assertions) { r.NotContains(out, "Connection refused") r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") - out, err = dockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") + out, err = env.DockerExecOut("clickhouse-backup", "curl", "http://localhost:7171/metrics") r.NoError(err) r.Contains(out, "clickhouse_backup_last_create_status 1") - } -func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch *TestClickHouse, r *require.Assertions, fieldTypes []string) { +func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch *TestEnvironment, r *require.Assertions, fieldTypes []string) { log.Infof("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS long_schema") for i := 0; i < maxTables; i++ { @@ -1105,23 +1127,23 @@ func TestSkipNotExistsTable(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.1") < 0 { t.Skip("TestSkipNotExistsTable too small time between `SELECT DISTINCT partition_id` and `ALTER TABLE ... FREEZE PARTITION`") } - //t.Parallel() - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) + defer env.ch.Close() log.Info("Check skip not exist errors") - ch.queryWithNoError(r, "CREATE DATABASE freeze_not_exists") + env.queryWithNoError(r, "CREATE DATABASE freeze_not_exists") ifNotExistsCreateSQL := "CREATE TABLE IF NOT EXISTS freeze_not_exists.freeze_not_exists (id UInt64) ENGINE=MergeTree() ORDER BY id" ifNotExistsInsertSQL := "INSERT INTO freeze_not_exists.freeze_not_exists SELECT number FROM numbers(1000)" - chVersion, err := ch.chbackend.GetVersion(context.Background()) + chVersion, err := env.ch.GetVersion(context.Background()) r.NoError(err) freezeErrorHandled := false pauseChannel := make(chan int64) resumeChannel := make(chan int64) - ch.chbackend.Config.LogSQLQueries = true + env.ch.Config.LogSQLQueries = true wg := sync.WaitGroup{} wg.Add(2) go func() { @@ -1133,9 +1155,9 @@ func TestSkipNotExistsTable(t *testing.T) { // pausePercent := int64(90) for i := int64(0); i < 100; i++ { testBackupName := fmt.Sprintf("not_exists_%d", i) - err = ch.chbackend.Query(ifNotExistsCreateSQL) + err = env.ch.Query(ifNotExistsCreateSQL) r.NoError(err) - err = ch.chbackend.Query(ifNotExistsInsertSQL) + err = env.ch.Query(ifNotExistsInsertSQL) r.NoError(err) if i < 5 { log.Infof("pauseChannel <- %d", 0) @@ -1145,7 +1167,7 @@ func TestSkipNotExistsTable(t *testing.T) { pauseChannel <- pause / i } startTime := time.Now() - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "LOG_LEVEL=debug CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create --table freeze_not_exists.freeze_not_exists "+testBackupName) + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "LOG_LEVEL=debug CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create --table freeze_not_exists.freeze_not_exists "+testBackupName) log.Info(out) if (err != nil && (strings.Contains(out, "can't freeze") || strings.Contains(out, "no tables for backup"))) || (err == nil && !strings.Contains(out, "can't freeze")) { @@ -1181,11 +1203,11 @@ func TestSkipNotExistsTable(t *testing.T) { freezeErrorHandled = true log.Info("CODE 60 catched") <-resumeChannel - r.NoError(dockerExec("clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName)) break } if err == nil { - err = dockerExec("clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName) + err = env.DockerExec("clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName) assert.NoError(t, err) } <-resumeChannel @@ -1202,7 +1224,7 @@ func TestSkipNotExistsTable(t *testing.T) { pauseStart := time.Now() time.Sleep(time.Duration(pause) * time.Nanosecond) log.Infof("pause=%s pauseStart=%s", time.Duration(pause).String(), pauseStart.String()) - err = ch.chbackend.DropTable(clickhouse.Table{Database: "freeze_not_exists", Name: "freeze_not_exists"}, ifNotExistsCreateSQL, "", false, chVersion, "") + err = env.ch.DropTable(clickhouse.Table{Database: "freeze_not_exists", Name: "freeze_not_exists"}, ifNotExistsCreateSQL, "", false, chVersion, "") r.NoError(err) } resumeChannel <- 1 @@ -1211,165 +1233,165 @@ func TestSkipNotExistsTable(t *testing.T) { wg.Wait() r.True(freezeErrorHandled, "freezeErrorHandled false") dropDbSQL := "DROP DATABASE IF EXISTS freeze_not_exists" - if isAtomic, err := ch.chbackend.IsAtomic("freeze_not_exists"); err == nil && isAtomic { + if isAtomic, err := env.ch.IsAtomic("freeze_not_exists"); err == nil && isAtomic { dropDbSQL += " SYNC" } - // ch.queryWithNoError(r, dropDbSQL) - err = ch.chbackend.Query(dropDbSQL) + // env.queryWithNoError(r, dropDbSQL) + err = env.ch.Query(dropDbSQL) if err != nil { - ch.chbackend.Log.Errorf("%s error: %v", dropDbSQL, err) + env.ch.Log.Errorf("%s error: %v", dropDbSQL, err) } r.NoError(err) t.Log("TestSkipNotExistsTable DONE, ALL OK") } func TestSkipTablesAndSkipTableEngines(t *testing.T) { - //t.Parallel() - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() - version, err := ch.chbackend.GetVersion(context.Background()) + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) + defer env.ch.Close() + version, err := env.ch.GetVersion(context.Background()) r.NoError(err) - ch.queryWithNoError(r, "CREATE DATABASE test_skip_tables") - ch.queryWithNoError(r, "CREATE TABLE IF NOT EXISTS test_skip_tables.test_merge_tree (id UInt64, s String) ENGINE=MergeTree() ORDER BY id") - ch.queryWithNoError(r, "CREATE TABLE IF NOT EXISTS test_skip_tables.test_memory (id UInt64) ENGINE=Memory") - ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW IF NOT EXISTS test_skip_tables.test_mv (id UInt64) ENGINE=MergeTree() ORDER BY id AS SELECT * FROM test_skip_tables.test_merge_tree") + env.queryWithNoError(r, "CREATE DATABASE test_skip_tables") + env.queryWithNoError(r, "CREATE TABLE IF NOT EXISTS test_skip_tables.test_merge_tree (id UInt64, s String) ENGINE=MergeTree() ORDER BY id") + env.queryWithNoError(r, "CREATE TABLE IF NOT EXISTS test_skip_tables.test_memory (id UInt64) ENGINE=Memory") + env.queryWithNoError(r, "CREATE MATERIALIZED VIEW IF NOT EXISTS test_skip_tables.test_mv (id UInt64) ENGINE=MergeTree() ORDER BY id AS SELECT * FROM test_skip_tables.test_merge_tree") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { query := "CREATE LIVE VIEW IF NOT EXISTS test_skip_tables.test_live_view AS SELECT count() FROM test_skip_tables.test_merge_tree" - allowExperimentalAnalyzer, err := ch.chbackend.TurnAnalyzerOffIfNecessary(version, query, "") + allowExperimentalAnalyzer, err := env.ch.TurnAnalyzerOffIfNecessary(version, query, "") r.NoError(err) - ch.queryWithNoError(r, query) - r.NoError(ch.chbackend.TurnAnalyzerOnIfNecessary(version, query, allowExperimentalAnalyzer)) + env.queryWithNoError(r, query) + r.NoError(env.ch.TurnAnalyzerOnIfNecessary(version, query, allowExperimentalAnalyzer)) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { query := "CREATE WINDOW VIEW IF NOT EXISTS test_skip_tables.test_window_view ENGINE=MergeTree() ORDER BY s AS SELECT count(), s, tumbleStart(w_id) as w_start FROM test_skip_tables.test_merge_tree GROUP BY s, tumble(now(), INTERVAL '5' SECOND) AS w_id" - allowExperimentalAnalyzer, err := ch.chbackend.TurnAnalyzerOffIfNecessary(version, query, "") + allowExperimentalAnalyzer, err := env.ch.TurnAnalyzerOffIfNecessary(version, query, "") r.NoError(err) - ch.queryWithNoError(r, query) - r.NoError(ch.chbackend.TurnAnalyzerOnIfNecessary(version, query, allowExperimentalAnalyzer)) + env.queryWithNoError(r, query) + r.NoError(env.ch.TurnAnalyzerOnIfNecessary(version, query, allowExperimentalAnalyzer)) } // create - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_table_pattern")) - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_memory.json")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_table_pattern")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_merge_tree.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_memory.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_live_view.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_window_view.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,windowview,liveview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_engines")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_merge_tree.json")) - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_memory.json")) - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,windowview,liveview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_engines")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_merge_tree.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_memory.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_live_view.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_window_view.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_table_pattern")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_engines")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_table_pattern")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_engines")) //upload - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.Error(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.Error(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.Error(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) + r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.Error(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.Error(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(dockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } //download - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) - - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) + + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.Error(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(dockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } //restore if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.1") >= 0 { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") } else { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup")) result := uint64(0) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND name!='test_memory'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND name!='test_memory'")) expectedTables := uint64(3) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { expectedTables = 4 @@ -1383,35 +1405,35 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { } r.Equal(expectedTables, result) result = uint64(1) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND name='test_memory'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND name='test_memory'")) r.Equal(uint64(0), result) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.1") >= 0 { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") } else { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --schema test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --data test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --schema test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --data test_skip_full_backup")) result = uint64(0) expectedTables = uint64(2) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { expectedTables = 3 } - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND engine='MergeTree'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND engine='MergeTree'")) r.Equal(expectedTables, result) result = uint64(1) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND engine IN ('Memory','MaterializedView','LiveView','WindowView')")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND engine IN ('Memory','MaterializedView','LiveView','WindowView')")) r.Equal(uint64(0), result) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.1") >= 0 { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") } else { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup")) result = uint64(0) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables'")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables'")) expectedTables = uint64(4) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { expectedTables = 5 @@ -1426,20 +1448,20 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { r.Equal(expectedTables, result) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.1") >= 0 { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables NO DELAY") } else { - ch.queryWithNoError(r, "DROP DATABASE test_skip_tables") + env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) } func TestTablePatterns(t *testing.T) { - //t.Parallel() - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) + defer env.ch.Close() testBackupName := "test_backup_patterns" databaseList := []string{dbNameOrdinary, dbNameAtomic} @@ -1447,64 +1469,64 @@ func TestTablePatterns(t *testing.T) { var dbNameAtomicTest = dbNameAtomic + "_" + t.Name() for _, createPattern := range []bool{true, false} { for _, restorePattern := range []bool{true, false} { - fullCleanup(t, r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") - generateTestData(t, r, ch, "S3", defaultTestData) + fullCleanup(t, r, env, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") + generateTestData(t, r, env, "S3", defaultTestData) if createPattern { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) - out, err := dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + out, err := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) r.NoError(err) r.Contains(out, dbNameOrdinaryTest) r.NotContains(out, dbNameAtomicTest) - out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--remote-backup", testBackupName, "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) + out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--remote-backup", testBackupName, "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) r.NoError(err) r.Contains(out, dbNameOrdinaryTest) r.NotContains(out, dbNameAtomicTest) } else { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) - out, err := dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", testBackupName) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) + out, err := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", testBackupName) r.NoError(err) r.Contains(out, dbNameOrdinaryTest) r.Contains(out, dbNameAtomicTest) - out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--remote-backup", testBackupName, testBackupName) + out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--remote-backup", testBackupName, testBackupName) r.NoError(err) r.Contains(out, dbNameOrdinaryTest) r.Contains(out, dbNameAtomicTest) } - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", testBackupName)) - dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", testBackupName)) + dropDatabasesFromTestDataDataSet(t, r, env, databaseList) if restorePattern { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) } else { - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", testBackupName)) } restored := uint64(0) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameOrdinaryTest))) + r.NoError(env.ch.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameOrdinaryTest))) r.NotZero(restored) if createPattern || restorePattern { restored = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomicTest))) + r.NoError(env.ch.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomicTest))) // todo, old versions of clickhouse will return empty recordset r.Zero(restored) restored = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.databases WHERE name='%s'", dbNameAtomicTest))) + r.NoError(env.ch.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.databases WHERE name='%s'", dbNameAtomicTest))) // todo, old versions of clickhouse will return empty recordset r.Zero(restored) } else { restored = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomicTest))) + r.NoError(env.ch.SelectSingleRowNoCtx(&restored, fmt.Sprintf("SELECT count() FROM system.tables WHERE database='%s'", dbNameAtomicTest))) r.NotZero(restored) } - fullCleanup(t, r, ch, []string{testBackupName}, []string{"remote", "local"}, databaseList, true, true, "config-s3.yml") + fullCleanup(t, r, env, []string{testBackupName}, []string{"remote", "local"}, databaseList, true, true, "config-s3.yml") } } - checkObjectStorageIsEmpty(t, r, "S3") + env.checkObjectStorageIsEmpty(t, r, "S3") } func TestProjections(t *testing.T) { @@ -1512,43 +1534,43 @@ func TestProjections(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") == -1 { t.Skipf("Test skipped, PROJECTION available only 21.8+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) } - //t.Parallel() - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) + defer env.ch.Close() - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - err = ch.chbackend.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") + r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + err = env.ch.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") r.NoError(err) - ch.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "test_backup_projection_full")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_full")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full")) + env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "test_backup_projection_full")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_full")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full")) - ch.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number WEEK, number FROM numbers(5)") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--diff-from-remote", "test_backup_projection_full", "test_backup_projection_increment")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_increment")) + env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number WEEK, number FROM numbers(5)") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--diff-from-remote", "test_backup_projection_full", "test_backup_projection_increment")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_increment")) var counts uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) + r.NoError(env.ch.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) r.Equal(uint64(10), counts) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.9") >= 0 { counts = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&counts, "SELECT count() FROM system.parts WHERE database='default' AND table='table_with_projection' AND has(projections,'x')")) + r.NoError(env.ch.SelectSingleRowNoCtx(&counts, "SELECT count() FROM system.parts WHERE database='default' AND table='table_with_projection' AND has(projections,'x')")) r.Equal(uint64(10), counts) } - err = ch.chbackend.Query("DROP TABLE default.table_with_projection NO DELAY") + err = env.ch.Query("DROP TABLE default.table_with_projection NO DELAY") r.NoError(err) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_increment")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_increment")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) } func TestCheckSystemPartsColumns(t *testing.T) { @@ -1557,85 +1579,86 @@ func TestCheckSystemPartsColumns(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "23.3") == -1 { t.Skipf("Test skipped, system.parts_columns have inconsistency only in 23.3+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) } - //t.Parallel() - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() - version, err = ch.chbackend.GetVersion(context.Background()) + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) + defer env.ch.Close() + version, err = env.ch.GetVersion(context.Background()) r.NoError(err) - r.NoError(dockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+t.Name()) + r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+t.Name()) // test compatible data types createSQL := "CREATE TABLE " + t.Name() + ".test_system_parts_columns(dt DateTime, v UInt64, e Enum('test' = 1)) ENGINE=MergeTree() ORDER BY tuple()" - ch.queryWithNoError(r, createSQL) - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number, 'test' FROM numbers(10)") + env.queryWithNoError(r, createSQL) + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number, 'test' FROM numbers(10)") - ch.queryWithNoError(r, "ALTER TABLE "+t.Name()+".test_system_parts_columns MODIFY COLUMN dt Nullable(DateTime('Europe/Moscow')), MODIFY COLUMN v Nullable(UInt64), MODIFY COLUMN e Enum16('test2'=1, 'test'=2)", t.Name()) - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number, 'test2' FROM numbers(10)") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_system_parts_columns")) + env.queryWithNoError(r, "ALTER TABLE "+t.Name()+".test_system_parts_columns MODIFY COLUMN dt Nullable(DateTime('Europe/Moscow')), MODIFY COLUMN v Nullable(UInt64), MODIFY COLUMN e Enum16('test2'=1, 'test'=2)", t.Name()) + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number, 'test2' FROM numbers(10)") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_system_parts_columns")) - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_system_parts_columns"}, createSQL, "", false, version, "")) + r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_system_parts_columns"}, createSQL, "", false, version, "")) // test incompatible data types - ch.queryWithNoError(r, "CREATE TABLE "+t.Name()+".test_system_parts_columns(dt Date, v String) ENGINE=MergeTree() PARTITION BY dt ORDER BY tuple()") - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, if(number>0,'a',toString(number)) FROM numbers(2)") + env.queryWithNoError(r, "CREATE TABLE "+t.Name()+".test_system_parts_columns(dt Date, v String) ENGINE=MergeTree() PARTITION BY dt ORDER BY tuple()") + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, if(number>0,'a',toString(number)) FROM numbers(2)") mutationSQL := "ALTER TABLE " + t.Name() + ".test_system_parts_columns MODIFY COLUMN v UInt64" - err = ch.chbackend.QueryContext(context.Background(), mutationSQL) + err = env.ch.QueryContext(context.Background(), mutationSQL) if err != nil { errStr := strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "code: 524") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number FROM numbers(10)") - r.Error(dockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns")) - r.Error(dockerExec("clickhouse-backup", "ls", "-lah", "/var/lib/clickhouse/backup/test_system_parts_columns")) - r.Error(dockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_system_parts_columns")) + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number FROM numbers(10)") + r.Error(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns")) + r.Error(env.DockerExec("clickhouse-backup", "ls", "-lah", "/var/lib/clickhouse/backup/test_system_parts_columns")) + r.Error(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_system_parts_columns")) - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_system_parts_columns"}, createSQL, "", false, version, "")) - r.NoError(ch.dropDatabase(t.Name())) + r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_system_parts_columns"}, createSQL, "", false, version, "")) + r.NoError(env.dropDatabase(t.Name())) } + func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { if isTestShouldSkip("RUN_ADVANCED_TESTS") { t.Skip("Skipping Advanced integration tests...") return } - //t.Parallel() + t.Parallel() r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) backupNames := make([]string, 5) for i := 0; i < 5; i++ { backupNames[i] = fmt.Sprintf("keep_remote_backup_%d", i) } databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} - fullCleanup(t, r, ch, backupNames, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") + fullCleanup(t, r, env, backupNames, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") incrementData := defaultIncrementData - generateTestData(t, r, ch, "S3", defaultTestData) + generateTestData(t, r, env, "S3", defaultTestData) for backupNumber, backupName := range backupNames { if backupNumber == 0 { - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote %s", backupName))) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote %s", backupName))) } else { - incrementData = generateIncrementTestData(t, r, ch, "S3", incrementData, backupNumber) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[backupNumber-1], backupName))) + incrementData = generateIncrementTestData(t, r, env, "S3", incrementData, backupNumber) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[backupNumber-1], backupName))) } } - out, err := dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") r.NoError(err) // shall not delete any backup, cause all deleted backups have links as required in other backups for _, backupName := range backupNames { r.Contains(out, backupName) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", backupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", backupName)) } latestIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-1) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", latestIncrementBackup)) - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", latestIncrementBackup)) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") r.NoError(err) prevIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-2) for _, backupName := range backupNames { @@ -1647,67 +1670,67 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { r.NotContains(out, backupName) } } - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", latestIncrementBackup)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", latestIncrementBackup)) var res uint64 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name()))) + r.NoError(env.ch.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name()))) r.Equal(uint64(100+20*4), res) - fullCleanup(t, r, ch, []string{latestIncrementBackup}, []string{"local"}, nil, true, true, "config-s3.yml") - fullCleanup(t, r, ch, backupNames, []string{"remote"}, databaseList, true, true, "config-s3.yml") - checkObjectStorageIsEmpty(t, r, "S3") + fullCleanup(t, r, env, []string{latestIncrementBackup}, []string{"local"}, nil, true, true, "config-s3.yml") + fullCleanup(t, r, env, backupNames, []string{"remote"}, databaseList, true, true, "config-s3.yml") + env.checkObjectStorageIsEmpty(t, r, "S3") } func TestSyncReplicaTimeout(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.11") == -1 { t.Skipf("Test skipped, SYNC REPLICA ignore receive_timeout for %s version", os.Getenv("CLICKHOUSE_VERSION")) } - //t.Parallel() + t.Parallel() r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 2*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 2*time.Second) + defer env.ch.Close() - ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+t.Name()) + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+t.Name()) dropReplTables := func() { for _, table := range []string{"repl1", "repl2"} { query := "DROP TABLE IF EXISTS " + t.Name() + "." + table if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.3") == 1 { query += " NO DELAY" } - ch.queryWithNoError(r, query) + env.queryWithNoError(r, query) } } dropReplTables() - ch.queryWithNoError(r, "CREATE TABLE "+t.Name()+".repl1 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/"+t.Name()+"/repl','repl1') ORDER BY tuple()") - ch.queryWithNoError(r, "CREATE TABLE "+t.Name()+".repl2 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/"+t.Name()+"/repl','repl2') ORDER BY tuple()") + env.queryWithNoError(r, "CREATE TABLE "+t.Name()+".repl1 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/"+t.Name()+"/repl','repl1') ORDER BY tuple()") + env.queryWithNoError(r, "CREATE TABLE "+t.Name()+".repl2 (v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/"+t.Name()+"/repl','repl2') ORDER BY tuple()") - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".repl1 SELECT number FROM numbers(10)") + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".repl1 SELECT number FROM numbers(10)") - ch.queryWithNoError(r, "SYSTEM STOP REPLICATED SENDS "+t.Name()+".repl1") - ch.queryWithNoError(r, "SYSTEM STOP FETCHES "+t.Name()+".repl2") + env.queryWithNoError(r, "SYSTEM STOP REPLICATED SENDS "+t.Name()+".repl1") + env.queryWithNoError(r, "SYSTEM STOP FETCHES "+t.Name()+".repl2") - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".repl1 SELECT number FROM numbers(100)") + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".repl1 SELECT number FROM numbers(100)") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".repl*", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_not_synced_backup")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_not_synced_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".repl*", "test_not_synced_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_not_synced_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_not_synced_backup")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_not_synced_backup")) - ch.queryWithNoError(r, "SYSTEM START REPLICATED SENDS "+t.Name()+".repl1") - ch.queryWithNoError(r, "SYSTEM START FETCHES "+t.Name()+".repl2") + env.queryWithNoError(r, "SYSTEM START REPLICATED SENDS "+t.Name()+".repl1") + env.queryWithNoError(r, "SYSTEM START FETCHES "+t.Name()+".repl2") dropReplTables() - r.NoError(ch.dropDatabase(t.Name())) + r.NoError(env.dropDatabase(t.Name())) } func TestGetPartitionId(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.17") == -1 { t.Skipf("Test skipped, is_in_partition_key not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } - //t.Parallel() + t.Parallel() r := require.New(t) - ch := &TestClickHouse{} + ch := NewTestEnvironment(t, r) ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() + defer ch.ch.Close() type testData struct { CreateTableSQL string @@ -1759,11 +1782,11 @@ func TestGetPartitionId(t *testing.T) { "", }, } - if isAtomic, _ := ch.chbackend.IsAtomic("default"); !isAtomic { + if isAtomic, _ := ch.ch.IsAtomic("default"); !isAtomic { testCases[0].CreateTableSQL = strings.Replace(testCases[0].CreateTableSQL, "UUID 'b45e751f-6c06-42a3-ab4a-f5bb9ac3716e'", "", 1) } for _, tc := range testCases { - partitionId, partitionName, err := partition.GetPartitionIdAndName(context.Background(), ch.chbackend, tc.Database, tc.Table, tc.CreateTableSQL, tc.Partition) + partitionId, partitionName, err := partition.GetPartitionIdAndName(context.Background(), ch.ch, tc.Database, tc.Table, tc.CreateTableSQL, tc.Partition) assert.NoError(t, err) assert.Equal(t, tc.ExpectedId, partitionId) assert.Equal(t, tc.ExpectedName, partitionName) @@ -1771,11 +1794,11 @@ func TestGetPartitionId(t *testing.T) { } func TestRestoreMutationInProgress(t *testing.T) { - //t.Parallel() + t.Parallel() r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 0*time.Second, 1*time.Second, 5*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 5*time.Second) + defer env.ch.Close() zkPath := "/clickhouse/tables/{shard}/" + t.Name() + "/test_restore_mutation_in_progress" onCluster := "" if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { @@ -1786,16 +1809,16 @@ func TestRestoreMutationInProgress(t *testing.T) { onCluster = " ON CLUSTER '{cluster}'" } createDbSQL := "CREATE DATABASE IF NOT EXISTS " + t.Name() - ch.queryWithNoError(r, createDbSQL) - version, err := ch.chbackend.GetVersion(context.Background()) + env.queryWithNoError(r, createDbSQL) + version, err := env.ch.GetVersion(context.Background()) r.NoError(err) createSQL := fmt.Sprintf("CREATE TABLE %s.test_restore_mutation_in_progress %s (id UInt64, attr String) ENGINE=ReplicatedMergeTree('%s','{replica}') PARTITION BY id ORDER BY id", t.Name(), onCluster, zkPath) - ch.queryWithNoError(r, createSQL) - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_restore_mutation_in_progress SELECT number, if(number>0,'a',toString(number)) FROM numbers(2)") + env.queryWithNoError(r, createSQL) + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_restore_mutation_in_progress SELECT number, if(number>0,'a',toString(number)) FROM numbers(2)") mutationSQL := "ALTER TABLE " + t.Name() + ".test_restore_mutation_in_progress MODIFY COLUMN attr UInt64" - err = ch.chbackend.QueryContext(context.Background(), mutationSQL) + err = env.ch.QueryContext(context.Background(), mutationSQL) if err != nil { errStr := strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) @@ -1805,7 +1828,7 @@ func TestRestoreMutationInProgress(t *testing.T) { attrs := make([]struct { Attr uint64 `ch:"attr"` }, 0) - err = ch.chbackend.Select(&attrs, "SELECT attr FROM "+t.Name()+".test_restore_mutation_in_progress ORDER BY id") + err = env.ch.Select(&attrs, "SELECT attr FROM "+t.Name()+".test_restore_mutation_in_progress ORDER BY id") r.NotEqual(nil, err) errStr := strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 53") || strings.Contains(errStr, "code: 6")) @@ -1813,29 +1836,29 @@ func TestRestoreMutationInProgress(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { mutationSQL = "ALTER TABLE " + t.Name() + ".test_restore_mutation_in_progress RENAME COLUMN attr TO attr_1" - err = ch.chbackend.QueryContext(context.Background(), mutationSQL) + err = env.ch.QueryContext(context.Background(), mutationSQL) r.NotEqual(nil, err) errStr = strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout")) t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } - r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical")) + r.NoError(env.DockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical")) // backup with check consistency - out, createErr := dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + out, createErr := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") r.NotEqual(createErr, nil) r.Contains(out, "have inconsistent data types") t.Log(out) // backup without check consistency - out, createErr = dockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "-c", "/etc/clickhouse-backup/config-s3.yml", "--skip-check-parts-columns", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + out, createErr = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "-c", "/etc/clickhouse-backup/config-s3.yml", "--skip-check-parts-columns", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") t.Log(out) r.NoError(createErr) r.NotContains(out, "have inconsistent data types") - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version, "")) + r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version, "")) var restoreErr error - restoreErr = dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") + restoreErr = env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.8") < 0 { r.NotEqual(restoreErr, nil) } else { @@ -1852,7 +1875,7 @@ func TestRestoreMutationInProgress(t *testing.T) { } } selectSQL := fmt.Sprintf("SELECT %s FROM "+t.Name()+".test_restore_mutation_in_progress ORDER BY id", checkRestoredData) - selectErr := ch.chbackend.Select(&attrs, selectSQL) + selectErr := env.ch.Select(&attrs, selectSQL) expectedSelectResults := make([]struct { Attr uint64 `ch:"attr"` }, 1) @@ -1884,132 +1907,132 @@ func TestRestoreMutationInProgress(t *testing.T) { r.NoError(selectErr) } - r.NoError(dockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations FORMAT Vertical")) + r.NoError(env.DockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations FORMAT Vertical")) - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version, "")) - r.NoError(ch.dropDatabase(t.Name())) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_restore_mutation_in_progress")) + r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version, "")) + r.NoError(env.dropDatabase(t.Name())) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_restore_mutation_in_progress")) } func TestInnerTablesMaterializedView(t *testing.T) { - //t.Parallel() - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) - defer ch.chbackend.Close() - - ch.queryWithNoError(r, "CREATE DATABASE test_mv") - ch.queryWithNoError(r, "CREATE TABLE test_mv.src_table (v UInt64) ENGINE=MergeTree() ORDER BY v") - ch.queryWithNoError(r, "CREATE TABLE test_mv.dst_table (v UInt64) ENGINE=MergeTree() ORDER BY v") - ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_inner (v UInt64) ENGINE=MergeTree() ORDER BY v AS SELECT v FROM test_mv.src_table") - ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_dst TO test_mv.dst_table AS SELECT v FROM test_mv.src_table") - ch.queryWithNoError(r, "INSERT INTO test_mv.src_table SELECT number FROM numbers(100)") - - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) + defer env.ch.Close() + + env.queryWithNoError(r, "CREATE DATABASE test_mv") + env.queryWithNoError(r, "CREATE TABLE test_mv.src_table (v UInt64) ENGINE=MergeTree() ORDER BY v") + env.queryWithNoError(r, "CREATE TABLE test_mv.dst_table (v UInt64) ENGINE=MergeTree() ORDER BY v") + env.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_inner (v UInt64) ENGINE=MergeTree() ORDER BY v AS SELECT v FROM test_mv.src_table") + env.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_dst TO test_mv.dst_table AS SELECT v FROM test_mv.src_table") + env.queryWithNoError(r, "INSERT INTO test_mv.src_table SELECT number FROM numbers(100)") + + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) dropSQL := "DROP DATABASE test_mv" - isAtomic, err := ch.chbackend.IsAtomic("test_mv") + isAtomic, err := env.ch.IsAtomic("test_mv") r.NoError(err) if isAtomic { dropSQL += " NO DELAY" } - ch.queryWithNoError(r, dropSQL) + env.queryWithNoError(r, dropSQL) var rowCnt uint64 - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) r.Equal(uint64(100), rowCnt) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) + r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) r.Equal(uint64(100), rowCnt) - r.NoError(ch.dropDatabase("test_mv")) + r.NoError(env.dropDatabase("test_mv")) // https://github.com/Altinity/clickhouse-backup/issues/777 - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_mv", "--delete-source", "--tables=test_mv.mv_with*,test_mv.dst*")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_mv", "--delete-source", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) r.Equal(uint64(100), rowCnt) - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) + r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) r.Equal(uint64(100), rowCnt) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_mv")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_mv")) } func TestFIPS(t *testing.T) { if os.Getenv("QA_AWS_ACCESS_KEY") == "" { t.Skip("QA_AWS_ACCESS_KEY is empty, TestFIPS will skip") } - //t.Parallel() - ch := &TestClickHouse{} + t.Parallel() r := require.New(t) - ch.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) + defer env.ch.Close() fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) - r.NoError(dockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) - installDebIfNotExists(r, "clickhouse", "ca-certificates", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") - r.NoError(dockerExec("clickhouse", "update-ca-certificates")) - r.NoError(dockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) - r.NoError(dockerExec("clickhouse", "git", "clone", "--depth", "1", "--branch", "v3.2rc3", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl")) - r.NoError(dockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh")) + r.NoError(env.DockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) + env.InstallDebIfNotExists(r, "clickhouse", "ca-certificates", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") + r.NoError(env.DockerExec("clickhouse", "update-ca-certificates")) + r.NoError(env.DockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) + r.NoError(env.DockerExec("clickhouse", "git", "clone", "--depth", "1", "--branch", "v3.2rc3", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl")) + r.NoError(env.DockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh")) generateCerts := func(certType, keyLength, curveType string) { - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl rand -out /root/.rnd 2048")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl rand -out /root/.rnd 2048")) switch certType { case "rsa": - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/ca-key.pem %s", keyLength))) - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/server-key.pem %s", keyLength))) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/ca-key.pem %s", keyLength))) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/server-key.pem %s", keyLength))) case "ecdsa": - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/ca-key.pem", curveType))) - r.NoError(dockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/server-key.pem", curveType))) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/ca-key.pem", curveType))) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/server-key.pem", curveType))) } - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/O=altinity\" -x509 -new -nodes -key /etc/clickhouse-backup/ca-key.pem -sha256 -days 365000 -out /etc/clickhouse-backup/ca-cert.pem")) - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/CN=localhost\" -addext \"subjectAltName = DNS:localhost,DNS:*.cluster.local\" -new -key /etc/clickhouse-backup/server-key.pem -out /etc/clickhouse-backup/server-req.csr")) - r.NoError(dockerExec("clickhouse", "bash", "-xce", "openssl x509 -req -days 365000 -extensions SAN -extfile <(printf \"\\n[SAN]\\nsubjectAltName=DNS:localhost,DNS:*.cluster.local\") -in /etc/clickhouse-backup/server-req.csr -out /etc/clickhouse-backup/server-cert.pem -CA /etc/clickhouse-backup/ca-cert.pem -CAkey /etc/clickhouse-backup/ca-key.pem -CAcreateserial")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/O=altinity\" -x509 -new -nodes -key /etc/clickhouse-backup/ca-key.pem -sha256 -days 365000 -out /etc/clickhouse-backup/ca-cert.pem")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/CN=localhost\" -addext \"subjectAltName = DNS:localhost,DNS:*.cluster.local\" -new -key /etc/clickhouse-backup/server-key.pem -out /etc/clickhouse-backup/server-req.csr")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl x509 -req -days 365000 -extensions SAN -extfile <(printf \"\\n[SAN]\\nsubjectAltName=DNS:localhost,DNS:*.cluster.local\") -in /etc/clickhouse-backup/server-req.csr -out /etc/clickhouse-backup/server-cert.pem -CA /etc/clickhouse-backup/ca-cert.pem -CAkey /etc/clickhouse-backup/ca-key.pem -CAcreateserial")) } - r.NoError(dockerExec("clickhouse", "bash", "-xec", "cat /etc/clickhouse-backup/config-s3-fips.yml.template | envsubst > /etc/clickhouse-backup/config-s3-fips.yml")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "cat /etc/clickhouse-backup/config-s3-fips.yml.template | envsubst > /etc/clickhouse-backup/config-s3-fips.yml")) generateCerts("rsa", "4096", "") - ch.queryWithNoError(r, "CREATE DATABASE "+t.Name()) + env.queryWithNoError(r, "CREATE DATABASE "+t.Name()) createSQL := "CREATE TABLE " + t.Name() + ".fips_table (v UInt64) ENGINE=MergeTree() ORDER BY tuple()" - ch.queryWithNoError(r, createSQL) - ch.queryWithNoError(r, "INSERT INTO "+t.Name()+".fips_table SELECT number FROM numbers(1000)") - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml create_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml restore_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete remote "+fipsBackupName)) + env.queryWithNoError(r, createSQL) + env.queryWithNoError(r, "INSERT INTO "+t.Name()+".fips_table SELECT number FROM numbers(1000)") + r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml create_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) + r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) + r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml restore_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) + r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) + r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete remote "+fipsBackupName)) log.Info("Run `clickhouse-backup-fips server` in background") - r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) + r.NoError(env.DockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) time.Sleep(1 * time.Second) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("create_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("restore_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) - runClickHouseClientInsertSystemBackupActions(r, ch, []string{fmt.Sprintf("delete remote %s", fipsBackupName)}, false) + runClickHouseClientInsertSystemBackupActions(r, env, []string{fmt.Sprintf("create_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) + runClickHouseClientInsertSystemBackupActions(r, env, []string{fmt.Sprintf("restore_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) + runClickHouseClientInsertSystemBackupActions(r, env, []string{fmt.Sprintf("delete local %s", fipsBackupName)}, false) + runClickHouseClientInsertSystemBackupActions(r, env, []string{fmt.Sprintf("delete remote %s", fipsBackupName)}, false) inProgressActions := make([]struct { Command string `ch:"command"` Status string `ch:"status"` }, 0) - r.NoError(ch.chbackend.StructSelect(&inProgressActions, + r.NoError(env.ch.StructSelect(&inProgressActions, "SELECT command, status FROM system.backup_actions WHERE command LIKE ? AND status IN (?,?)", fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, )) r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) - r.NoError(dockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) + r.NoError(env.DockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) testTLSCerts := func(certType, keyLength, curveName string, cipherList ...string) { generateCerts(certType, keyLength, curveName) log.Infof("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) - r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) + r.NoError(env.DockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) time.Sleep(1 * time.Second) - r.NoError(dockerExec("clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7172")) - r.NoError(dockerExec("clickhouse", "cat", "/tmp/testssl.csv")) - out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("grep -o -E '%s' /tmp/testssl.csv | uniq | wc -l", strings.Join(cipherList, "|"))) + r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7172")) + r.NoError(env.DockerExec("clickhouse", "cat", "/tmp/testssl.csv")) + out, err := env.DockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("grep -o -E '%s' /tmp/testssl.csv | uniq | wc -l", strings.Join(cipherList, "|"))) r.NoError(err) r.Equal(strconv.Itoa(len(cipherList)), strings.Trim(out, " \t\r\n")) @@ -2017,18 +2040,18 @@ func TestFIPS(t *testing.T) { Command string `ch:"command"` Status string `ch:"status"` }, 0) - r.NoError(ch.chbackend.StructSelect(&inProgressActions, + r.NoError(env.ch.StructSelect(&inProgressActions, "SELECT command, status FROM system.backup_actions WHERE command LIKE ? AND status IN (?,?)", fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, )) r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) - r.NoError(dockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) + r.NoError(env.DockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) } // https://www.perplexity.ai/search/0920f1e8-59ec-4e14-b779-ba7b2e037196 testTLSCerts("rsa", "4096", "", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384", "AES_128_GCM_SHA256", "AES_256_GCM_SHA384") testTLSCerts("ecdsa", "", "prime256v1", "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384") - r.NoError(ch.chbackend.DropTable(clickhouse.Table{Database: t.Name(), Name: "fips_table"}, createSQL, "", false, 0, "")) - r.NoError(ch.dropDatabase(t.Name())) + r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "fips_table"}, createSQL, "", false, 0, "")) + r.NoError(env.dropDatabase(t.Name())) } @@ -2037,12 +2060,14 @@ func TestIntegrationS3Glacier(t *testing.T) { t.Skip("Skipping GLACIER integration tests...") return } + t.Parallel() r := require.New(t) - r.NoError(dockerCP("config-s3-glacier.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml.s3glacier-template")) - installDebIfNotExists(r, "clickhouse-backup", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config-s3-glacier.yml")) + env := NewTestEnvironment(t, r) + r.NoError(env.DockerCP("config-s3-glacier.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml.s3glacier-template")) + env.InstallDebIfNotExists(r, "clickhouse-backup", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config-s3-glacier.yml")) dockerExecTimeout = 60 * time.Minute - runMainIntegrationScenario(t, "GLACIER", "config-s3-glacier.yml") + env.runMainIntegrationScenario(t, "GLACIER", "config-s3-glacier.yml") dockerExecTimeout = 3 * time.Minute } @@ -2051,14 +2076,18 @@ func TestIntegrationAzure(t *testing.T) { t.Skip("Skipping Azure integration tests...") return } - //t.Parallel() - runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") + t.Parallel() + r := require.New(t) + env := NewTestEnvironment(t, r) + env.runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") } func TestIntegrationS3(t *testing.T) { - //t.Parallel() - checkObjectStorageIsEmpty(t, require.New(t), "S3") - runMainIntegrationScenario(t, "S3", "config-s3.yml") + t.Parallel() + r := require.New(t) + env := NewTestEnvironment(t, r) + env.checkObjectStorageIsEmpty(t, r, "S3") + env.runMainIntegrationScenario(t, "S3", "config-s3.yml") } func TestIntegrationGCS(t *testing.T) { @@ -2066,8 +2095,10 @@ func TestIntegrationGCS(t *testing.T) { t.Skip("Skipping GCS integration tests...") return } - //t.Parallel() - runMainIntegrationScenario(t, "GCS", "config-gcs.yml") + t.Parallel() + r := require.New(t) + env := NewTestEnvironment(t, r) + env.runMainIntegrationScenario(t, "GCS", "config-gcs.yml") } func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { @@ -2075,70 +2106,83 @@ func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { t.Skip("Skipping GCS_EMULATOR integration tests...") return } - //t.Parallel() - runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") + t.Parallel() + r := require.New(t) + env := NewTestEnvironment(t, r) + env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") } func TestIntegrationSFTPAuthPassword(t *testing.T) { - //t.Parallel() - runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") + t.Parallel() + r := require.New(t) + env := NewTestEnvironment(t, r) + env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") } func TestIntegrationFTP(t *testing.T) { - //t.Parallel() + t.Parallel() + r := require.New(t) + env := NewTestEnvironment(t, r) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { - runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") + env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") } else { - runMainIntegrationScenario(t, "FTP", "config-ftp-old.yaml") + env.runMainIntegrationScenario(t, "FTP", "config-ftp-old.yaml") } } func TestIntegrationSFTPAuthKey(t *testing.T) { - uploadSSHKeys(require.New(t), "clickhouse-backup") - //t.Parallel() - runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") + t.Parallel() + r := require.New(t) + env := NewTestEnvironment(t, r) + env.uploadSSHKeys(r, "clickhouse-backup") + env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") } func TestIntegrationCustomKopia(t *testing.T) { - //t.Parallel() + t.Parallel() r := require.New(t) - installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) - installDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") + env := NewTestEnvironment(t, r) + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) + env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) - installDebIfNotExists(r, "clickhouse-backup", "kopia", "xxd", "bsdmainutils", "parallel") + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) + env.InstallDebIfNotExists(r, "clickhouse-backup", "kopia", "xxd", "bsdmainutils", "parallel") - runIntegrationCustom(t, r, "kopia") + env.runIntegrationCustom(t, r, "kopia") } + func TestIntegrationCustomRestic(t *testing.T) { - //t.Parallel() + t.Parallel() r := require.New(t) - installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) - installDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") - r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) - runIntegrationCustom(t, r, "restic") + env := NewTestEnvironment(t,r) + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) + env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) + env.runIntegrationCustom(t, r, "restic") } func TestIntegrationCustomRsync(t *testing.T) { + t.Parallel() r := require.New(t) - uploadSSHKeys(r, "clickhouse-backup") - installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(dockerExec("clickhouse-backup", "update-ca-certificates")) - r.NoError(dockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) - installDebIfNotExists(r, "clickhouse-backup", "jq", "openssh-client", "rsync") - //t.Parallel() - runIntegrationCustom(t, r, "rsync") + env := NewTestEnvironment(t,r) + env.uploadSSHKeys(r, "clickhouse-backup") + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) + env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "openssh-client", "rsync") + env.runIntegrationCustom(t, r, "rsync") } -func runIntegrationCustom(t *testing.T, r *require.Assertions, customType string) { - r.NoError(dockerExec("clickhouse-backup", "mkdir", "-pv", "/custom/"+customType)) - r.NoError(dockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) - runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") +func (env *TestEnvironment) runIntegrationCustom(t *testing.T, r *require.Assertions, customType string) { + r.NoError(env.DockerExec("clickhouse-backup", "mkdir", "-pv", "/custom/"+customType)) + r.NoError(env.DockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) + env.runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") } func TestIntegrationEmbedded(t *testing.T) { @@ -2148,92 +2192,94 @@ func TestIntegrationEmbedded(t *testing.T) { if compareVersion(version, "23.3") < 0 { t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) } - //t.Parallel() + t.Parallel() r := require.New(t) + env := NewTestEnvironment(t, r) + //CUSTOM backup creates folder in each disk, need to clear - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) - runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") + r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) + env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") //@TODO think about how to implements embedded backup for s3_plain disks - //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) + //r.NoError(env.DockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") //@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053 //CUSTOM backup create folder in each disk - //r.NoError(dockerExec("azure", "apk", "add", "tcpdump")) - //r.NoError(dockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) - //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) - //if compareVersion(version, "24.2") >= 0 { - // runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") - //} - //runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") - //r.NoError(dockerExec("azure", "pkill", "tcpdump")) - //r.NoError(dockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) + r.NoError(env.DockerExec("azure", "apk", "add", "tcpdump")) + r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) + r.NoError(env.DockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) + if compareVersion(version, "24.2") >= 0 { + env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + } + env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + r.NoError(env.DockerExec("azure", "pkill", "tcpdump")) + r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) if compareVersion(version, "24.3") >= 0 { //CUSTOM backup creates folder in each disk, need to clear - r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/")) - runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") + r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/")) + env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") } if compareVersion(version, "23.8") >= 0 { //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 - //installDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") - //r.NoError(dockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml")) - //runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") - runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") + r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml")) + env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") + env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") } } func TestRestoreMapping(t *testing.T) { - //t.Parallel() + t.Parallel() r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) + defer env.ch.Close() checkRecordset := func(expectedRows int, expectedCount uint64, query string) { result := make([]struct { Count uint64 `ch:"count()"` }, 0) - r.NoError(ch.chbackend.Select(&result, query)) + r.NoError(env.ch.Select(&result, query)) r.Equal(expectedRows, len(result), "expect %d row", expectedRows) r.Equal(expectedCount, result[0].Count, "expect count=%d", expectedCount) } testBackupName := "test_restore_database_mapping" databaseList := []string{"database1", "database-2"} - fullCleanup(t, r, ch, []string{testBackupName}, []string{"local"}, databaseList, false, false, "config-database-mapping.yml") + fullCleanup(t, r, env, []string{testBackupName}, []string{"local"}, databaseList, false, false, "config-database-mapping.yml") - ch.queryWithNoError(r, "CREATE DATABASE database1") - ch.queryWithNoError(r, "CREATE TABLE database1.t1 (dt DateTime, v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t1','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") - ch.queryWithNoError(r, "CREATE TABLE database1.d1 AS database1.t1 ENGINE=Distributed('{cluster}', 'database1', 't1')") + env.queryWithNoError(r, "CREATE DATABASE database1") + env.queryWithNoError(r, "CREATE TABLE database1.t1 (dt DateTime, v UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t1','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "CREATE TABLE database1.d1 AS database1.t1 ENGINE=Distributed('{cluster}', 'database1', 't1')") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.3") < 0 { - ch.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t2','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/database1/t2','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") } else { - ch.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") + env.queryWithNoError(r, "CREATE TABLE database1.t2 AS database1.t1 ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/{table}','{replica}') PARTITION BY toYYYYMM(dt) ORDER BY dt") } - ch.queryWithNoError(r, "CREATE MATERIALIZED VIEW database1.mv1 TO database1.t2 AS SELECT * FROM database1.t1") - ch.queryWithNoError(r, "CREATE VIEW database1.v1 AS SELECT * FROM database1.t1") - ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") + env.queryWithNoError(r, "CREATE MATERIALIZED VIEW database1.mv1 TO database1.t2 AS SELECT * FROM database1.t1") + env.queryWithNoError(r, "CREATE VIEW database1.v1 AS SELECT * FROM database1.t1") + env.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") log.Info("Create backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName)) log.Info("Restore schema") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName)) log.Info("Check result database1") - ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") + env.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") checkRecordset(1, 20, "SELECT count() FROM database1.t1") checkRecordset(1, 20, "SELECT count() FROM database1.d1") checkRecordset(1, 20, "SELECT count() FROM database1.mv1") checkRecordset(1, 20, "SELECT count() FROM database1.v1") log.Info("Drop database1") - r.NoError(ch.dropDatabase("database1")) + r.NoError(env.dropDatabase("database1")) log.Info("Restore data") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName)) log.Info("Check result database-2") checkRecordset(1, 10, "SELECT count() FROM `database-2`.t3") @@ -2244,7 +2290,7 @@ func TestRestoreMapping(t *testing.T) { log.Info("Check database1 not exists") checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1'") - fullCleanup(t, r, ch, []string{testBackupName}, []string{"local"}, databaseList, true, true, "config-database-mapping.yml") + fullCleanup(t, r, env, []string{testBackupName}, []string{"local"}, databaseList, true, true, "config-database-mapping.yml") } func TestMySQLMaterialized(t *testing.T) { @@ -2252,30 +2298,30 @@ func TestMySQLMaterialized(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.12") == -1 { t.Skipf("MaterializedMySQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) } - //t.Parallel() + t.Parallel() r := require.New(t) - r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl")) - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() + env := NewTestEnvironment(t, r) + r.NoError(env.DockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl")) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) + defer env.ch.Close() engine := "MaterializedMySQL" if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.9") == -1 { engine = "MaterializeMySQL" } - ch.queryWithNoError(r, fmt.Sprintf("CREATE DATABASE ch_mysql_repl ENGINE=%s('mysql:3306','ch_mysql_repl','root','root')", engine)) - r.NoError(dockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE TABLE ch_mysql_repl.t1 (id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, s VARCHAR(255)); INSERT INTO ch_mysql_repl.t1(s) VALUES('s1'),('s2'),('s3')")) + env.queryWithNoError(r, fmt.Sprintf("CREATE DATABASE ch_mysql_repl ENGINE=%s('mysql:3306','ch_mysql_repl','root','root')", engine)) + r.NoError(env.DockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE TABLE ch_mysql_repl.t1 (id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, s VARCHAR(255)); INSERT INTO ch_mysql_repl.t1(s) VALUES('s1'),('s2'),('s3')")) time.Sleep(1 * time.Second) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mysql_materialized")) - ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mysql_materialized")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mysql_materialized")) + env.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mysql_materialized")) result := 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_mysql_repl.t1")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_mysql_repl.t1")) r.Equal(3, result, "expect count=3") - ch.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mysql_materialized")) + env.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mysql_materialized")) } func TestPostgreSQLMaterialized(t *testing.T) { @@ -2287,21 +2333,21 @@ func TestPostgreSQLMaterialized(t *testing.T) { } t.Skip("FREEZE don't support for MaterializedPostgreSQL, https://github.com/ClickHouse/ClickHouse/issues/32902") - //t.Parallel() + t.Parallel() r := require.New(t) - r.NoError(dockerExec("pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root")) - r.NoError(dockerExec("pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl")) - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer ch.chbackend.Close() - ch.queryWithNoError(r, + env := NewTestEnvironment(t, r) + r.NoError(env.DockerExec("pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root")) + r.NoError(env.DockerExec("pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl")) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) + defer env.ch.Close() + env.queryWithNoError(r, "CREATE DATABASE ch_pgsql_repl ENGINE=MaterializedPostgreSQL('pgsql:5432','ch_pgsql_repl','root','root') "+ "SETTINGS materialized_postgresql_schema = 'public'", ) // time to initial snapshot count := uint64(0) for { - err := ch.chbackend.SelectSingleRowNoCtx(&count, "SELECT count() FROM system.tables WHERE database='ch_pgsql_repl'") + err := env.ch.SelectSingleRowNoCtx(&count, "SELECT count() FROM system.tables WHERE database='ch_pgsql_repl'") r.NoError(err) if count > 0 { break @@ -2310,37 +2356,36 @@ func TestPostgreSQLMaterialized(t *testing.T) { time.Sleep(5 * time.Second) } - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_pgsql_materialized")) - ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_pgsql_materialized")) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_pgsql_materialized")) + env.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_pgsql_materialized")) result := 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_pgsql_repl.t1")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_pgsql_repl.t1")) r.Equal(3, result, "expect count=3") - ch.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_pgsql_materialized")) + env.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_pgsql_materialized")) } -func uploadSSHKeys(r *require.Assertions, container string) { - r.NoError(dockerCP("sftp/clickhouse-backup_rsa", container+":/id_rsa")) - r.NoError(dockerExec(container, "cp", "-vf", "/id_rsa", "/tmp/id_rsa")) - r.NoError(dockerExec(container, "chmod", "-v", "0600", "/tmp/id_rsa")) +func (env *TestEnvironment) uploadSSHKeys(r *require.Assertions, container string) { + r.NoError(env.DockerCP("sftp/clickhouse-backup_rsa", container+":/id_rsa")) + r.NoError(env.DockerExec(container, "cp", "-vf", "/id_rsa", "/tmp/id_rsa")) + r.NoError(env.DockerExec(container, "chmod", "-v", "0600", "/tmp/id_rsa")) - r.NoError(dockerCP("sftp/clickhouse-backup_rsa.pub", "sshd:/root/.ssh/authorized_keys")) - r.NoError(dockerExec("sshd", "chown", "-v", "root:root", "/root/.ssh/authorized_keys")) - r.NoError(dockerExec("sshd", "chmod", "-v", "0600", "/root/.ssh/authorized_keys")) + r.NoError(env.DockerCP("sftp/clickhouse-backup_rsa.pub", "sshd:/root/.ssh/authorized_keys")) + r.NoError(env.DockerExec("sshd", "chown", "-v", "root:root", "/root/.ssh/authorized_keys")) + r.NoError(env.DockerExec("sshd", "chmod", "-v", "0600", "/root/.ssh/authorized_keys")) } -func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig string) { +func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig string) { var out string var err error r := require.New(t) - ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1500*time.Millisecond, 1*time.Minute) - defer ch.chbackend.Close() + env.connectWithWait(r, 500*time.Millisecond, 1500*time.Millisecond, 1*time.Minute) + defer env.ch.Close() // test for specified partitions backup - testBackupSpecifiedPartitions(t, r, ch, remoteStorageType, backupConfig) + testBackupSpecifiedPartitions(t, r, env, remoteStorageType, backupConfig) // main test scenario testBackupName := fmt.Sprintf("%s_full_%d", t.Name(), rand.Int()) @@ -2349,94 +2394,94 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} tablesPattern := fmt.Sprintf("*_%s.*", t.Name()) log.Info("Clean before start") - fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) + fullCleanup(t, r, env, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) - r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) - testData := generateTestData(t, r, ch, remoteStorageType, defaultTestData) + r.NoError(env.DockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) + testData := generateTestData(t, r, env, remoteStorageType, defaultTestData) - r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) + r.NoError(env.DockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) log.Info("Create backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, testBackupName)) - incrementData := generateIncrementTestData(t, r, ch, remoteStorageType, defaultIncrementData, 1) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, incrementBackupName)) + incrementData := generateIncrementTestData(t, r, env, remoteStorageType, defaultIncrementData, 1) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, incrementBackupName)) log.Info("Upload full") uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/%s clickhouse-backup upload --resume %s", remoteStorageType, backupConfig, testBackupName) - checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) + env.checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) // https://github.com/Altinity/clickhouse-backup/pull/900 if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") >= 0 { log.Info("create --diff-from-remote backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--diff-from-remote", testBackupName, "--tables", tablesPattern, incrementBackupName2)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", incrementBackupName2)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", incrementBackupName2)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName2)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--diff-from-remote", testBackupName, "--tables", tablesPattern, incrementBackupName2)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", incrementBackupName2)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", incrementBackupName2)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName2)) } log.Info("Upload increment") uploadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s upload %s --diff-from-remote %s --resume", backupConfig, incrementBackupName, testBackupName) - checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) + env.checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) backupDir := "/var/lib/clickhouse/backup" if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) } - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) r.NoError(err) r.Equal(2, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") log.Info("Delete backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName)) - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName)) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) r.NotNil(err) r.Equal("", strings.Trim(out, " \t\r\n"), "expect '0' backup exists in backup directory") - dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) + dropDatabasesFromTestDataDataSet(t, r, env, databaseList) log.Info("Download") - replaceStorageDiskNameForReBalance(r, ch, remoteStorageType, false) + replaceStorageDiskNameForReBalance(r, env, remoteStorageType, false) downloadCmd := fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, testBackupName) - checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) + env.checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) log.Info("Restore schema") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", testBackupName)) log.Info("Restore data") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", testBackupName)) log.Info("Full restore with rm") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", testBackupName)) log.Info("Check data") for i := range testData { if testData[i].CheckDatabaseOnly { - r.NoError(ch.checkDatabaseEngine(t, testData[i])) + r.NoError(env.checkDatabaseEngine(t, testData[i])) } else { - if isTableSkip(ch, testData[i], true) { + if isTableSkip(env, testData[i], true) { continue } - r.NoError(ch.checkData(t, r, testData[i])) + r.NoError(env.checkData(t, r, testData[i])) } } // test increment - dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) + dropDatabasesFromTestDataDataSet(t, r, env, databaseList) log.Info("Delete backup") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) log.Info("Download increment") downloadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, incrementBackupName) - checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) + env.checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) log.Info("Restore") - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", "--data", incrementBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", "--data", incrementBackupName)) log.Info("Check increment data") for i := range testData { testDataItem := testData[i] - if isTableSkip(ch, testDataItem, true) || testDataItem.IsDictionary { + if isTableSkip(env, testDataItem, true) || testDataItem.IsDictionary { continue } for _, incrementDataItem := range incrementData { @@ -2445,34 +2490,34 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig st } } if testDataItem.CheckDatabaseOnly { - r.NoError(ch.checkDatabaseEngine(t, testDataItem)) + r.NoError(env.checkDatabaseEngine(t, testDataItem)) } else { - r.NoError(ch.checkData(t, r, testDataItem)) + r.NoError(env.checkData(t, r, testDataItem)) } } // test end log.Info("Clean after finish") // during download increment, partially downloaded full will clean - fullCleanup(t, r, ch, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) - fullCleanup(t, r, ch, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) - replaceStorageDiskNameForReBalance(r, ch, remoteStorageType, true) - checkObjectStorageIsEmpty(t, r, remoteStorageType) + fullCleanup(t, r, env, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) + fullCleanup(t, r, env, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) + replaceStorageDiskNameForReBalance(r, env, remoteStorageType, true) + env.checkObjectStorageIsEmpty(t, r, remoteStorageType) } -func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorageType string) { +func (env *TestEnvironment) checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorageType string) { if remoteStorageType == "AZBLOB" || remoteStorageType == "AZBLOB_EMBEDDED_URL" { - t.Log("wait when resolve https://github.com/Azure/Azurite/issues/2362") + t.Log("wait when resolve https://github.com/Azure/Azurite/issues/2362, todo try to use mysql as azurite storage") /* - r.NoError(dockerExec("azure", "apk", "add", "jq")) + r.NoError(env.DockerExec("azure", "apk", "add", "jq")) checkBlobCollection := func(containerName string, expected string) { - out, err := dockerExecOut("azure", "sh", "-c", "jq '.collections[] | select(.name == \"$BLOBS_COLLECTION$\") | .data[] | select(.containerName == \""+containerName+"\") | .name' /data/__azurite_db_blob__.json") + out, err := env.DockerExecOut("azure", "sh", "-c", "jq '.collections[] | select(.name == \"$BLOBS_COLLECTION$\") | .data[] | select(.containerName == \""+containerName+"\") | .name' /data/__azurite_db_blob__.json") r.NoError(err) actual := strings.Trim(out, "\n\r\t ") if expected != actual { - r.NoError(dockerExec("azure", "sh", "-c", "cat /data/__azurite_db_blob__.json | jq")) - r.NoError(dockerExec("azure", "sh", "-c", "stat -c '%y' /data/__azurite_db_blob__.json")) - r.NoError(dockerExec("azure", "sh", "-c", "cat /data/debug.log")) + r.NoError(env.DockerExec("azure", "sh", "-c", "cat /data/__azurite_db_blob__.json | jq")) + r.NoError(env.DockerExec("azure", "sh", "-c", "stat -c '%y' /data/__azurite_db_blob__.json")) + r.NoError(env.DockerExec("azure", "sh", "-c", "cat /data/debug.log")) } r.Equal(expected, actual) } @@ -2488,7 +2533,7 @@ func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorag */ } checkRemoteDir := func(expected string, container string, cmd ...string) { - out, err := dockerExecOut(container, cmd...) + out, err := env.DockerExecOut(container, cmd...) r.NoError(err) r.Equal(expected, strings.Trim(out, "\r\n\t ")) } @@ -2510,7 +2555,7 @@ func checkObjectStorageIsEmpty(t *testing.T, r *require.Assertions, remoteStorag } } -func replaceStorageDiskNameForReBalance(r *require.Assertions, ch *TestClickHouse, remoteStorageType string, isRebalanced bool) { +func replaceStorageDiskNameForReBalance(r *require.Assertions, env *TestEnvironment, remoteStorageType string, isRebalanced bool) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "23.3") < 0 { return } @@ -2531,19 +2576,19 @@ func replaceStorageDiskNameForReBalance(r *require.Assertions, ch *TestClickHous origFile := "/etc/clickhouse-server/config.d/" + fileName dstFile := "/var/lib/clickhouse/" + fileName sedCmd := fmt.Sprintf("s/<%s>/<%s>/g; s/<\\/%s>/<\\/%s>/g; s/%s<\\/disk>/%s<\\/disk>/g", oldDisk, newDisk, oldDisk, newDisk, oldDisk, newDisk) - r.NoError(dockerExec("clickhouse", "sed", "-i", sedCmd, origFile)) - r.NoError(dockerExec("clickhouse", "cp", "-vf", origFile, dstFile)) + r.NoError(env.DockerExec("clickhouse", "sed", "-i", sedCmd, origFile)) + r.NoError(env.DockerExec("clickhouse", "cp", "-vf", origFile, dstFile)) } if isRebalanced { - r.NoError(dockerExec("clickhouse", "bash", "-xc", "cp -aflv -t /var/lib/clickhouse/disks/"+newDisk+"/ /var/lib/clickhouse/disks/"+oldDisk+"/*")) - r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/"+oldDisk+"")) + r.NoError(env.DockerExec("clickhouse", "bash", "-xc", "cp -aflv -t /var/lib/clickhouse/disks/"+newDisk+"/ /var/lib/clickhouse/disks/"+oldDisk+"/*")) + r.NoError(env.DockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/"+oldDisk+"")) } - ch.chbackend.Close() + env.ch.Close() r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) - ch.connectWithWait(r, 3*time.Second, 1500*time.Millisecond, 3*time.Minute) + env.connectWithWait(r, 3*time.Second, 1500*time.Millisecond, 3*time.Minute) } -func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *TestClickHouse, remoteStorageType string, backupConfig string) { +func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, backupConfig string) { log.Info("testBackupSpecifiedPartitions started") var err error var out string @@ -2553,20 +2598,20 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test fullBackupName := fmt.Sprintf("full_backup_%d", rand.Int()) dbName := "test_partitions_" + t.Name() // Create and fill tables - ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+dbName) - ch.queryWithNoError(r, "DROP TABLE IF EXISTS "+dbName+".t1") - ch.queryWithNoError(r, "DROP TABLE IF EXISTS "+dbName+".t2") - ch.queryWithNoError(r, "CREATE TABLE "+dbName+".t1 (dt Date, category Int64, v UInt64) ENGINE=MergeTree() PARTITION BY (category, toYYYYMMDD(dt)) ORDER BY dt") - ch.queryWithNoError(r, "CREATE TABLE "+dbName+".t2 (dt String, category Int64, v UInt64) ENGINE=MergeTree() PARTITION BY (category, dt) ORDER BY dt") + env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+dbName) + env.queryWithNoError(r, "DROP TABLE IF EXISTS "+dbName+".t1") + env.queryWithNoError(r, "DROP TABLE IF EXISTS "+dbName+".t2") + env.queryWithNoError(r, "CREATE TABLE "+dbName+".t1 (dt Date, category Int64, v UInt64) ENGINE=MergeTree() PARTITION BY (category, toYYYYMMDD(dt)) ORDER BY dt") + env.queryWithNoError(r, "CREATE TABLE "+dbName+".t2 (dt String, category Int64, v UInt64) ENGINE=MergeTree() PARTITION BY (category, dt) ORDER BY dt") for _, dt := range []string{"2022-01-01", "2022-01-02", "2022-01-03", "2022-01-04"} { - ch.queryWithNoError(r, fmt.Sprintf("INSERT INTO "+dbName+".t1(dt, v) SELECT '%s', number FROM numbers(10)", dt)) - ch.queryWithNoError(r, fmt.Sprintf("INSERT INTO "+dbName+".t2(dt, v) SELECT '%s', number FROM numbers(10)", dt)) + env.queryWithNoError(r, fmt.Sprintf("INSERT INTO "+dbName+".t1(dt, v) SELECT '%s', number FROM numbers(10)", dt)) + env.queryWithNoError(r, fmt.Sprintf("INSERT INTO "+dbName+".t2(dt, v) SELECT '%s', number FROM numbers(10)", dt)) } // check create_remote full > download + partitions > restore --data --partitions > delete local > download > restore --partitions > restore - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create_remote", "--tables="+dbName+".t*", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", "--partitions="+dbName+".t?:(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create_remote", "--tables="+dbName+".t*", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", "--partitions="+dbName+".t?:(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName)) fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" // embedded storage with embedded disks contains object disk files and will download additional data parts if strings.HasPrefix(remoteStorageType, "EMBEDDED") { @@ -2576,7 +2621,7 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test if strings.HasPrefix(remoteStorageType, "EMBEDDED") && strings.HasSuffix(remoteStorageType, "_URL") { fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/metadata/" + dbName + "/t?.json" } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") r.NoError(err) expectedLines := "13" // custom storage doesn't support --partitions for upload / download now @@ -2591,17 +2636,17 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) checkRestoredDataWithPartitions := func(expectedCount uint64) { result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT sum(c) FROM (SELECT count() AS c FROM "+dbName+".t1 UNION ALL SELECT count() AS c FROM "+dbName+".t2)")) r.Equal(expectedCount, result, "expect count=%d", expectedCount) } if remoteStorageType == "FTP" && !strings.Contains(backupConfig, "old") { // during DROP PARTITION, we create empty covered part, and cant restore via ATTACH TABLE properly, https://github.com/Altinity/clickhouse-backup/issues/756 - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) r.Error(err) - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "CLICKHOUSE_RESTORE_AS_ATTACH=0 clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "CLICKHOUSE_RESTORE_AS_ATTACH=0 clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) } else { - out, err = dockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) } t.Log(out) r.NoError(err) @@ -2609,8 +2654,8 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test // we just replace data in exists table checkRestoredDataWithPartitions(80) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName)) expectedLines = "17" fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" @@ -2623,23 +2668,23 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/metadata/" + dbName + "/t?.json" expectedLines = "2" } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - out, err = dockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) + out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) r.NoError(err) r.NotContains(out, "DROP PARTITION") checkRestoredDataWithPartitions(40) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) checkRestoredDataWithPartitions(80) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) // check create + partitions - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", partitionBackupName)) expectedLines = "5" partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { @@ -2650,13 +2695,13 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/metadata/" + dbName + "/t?.json" expectedLines = "1" } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") r.NoError(err) r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) // check create > upload + partitions - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", partitionBackupName)) partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" expectedLines = "7" if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { @@ -2667,18 +2712,18 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/metadata/" + dbName + "/t?.json" expectedLines = "1" } - out, err = dockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") + out, err = env.DockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") r.NoError(err) r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", "--tables="+dbName+".t1", "--partitions=0-20220102,0-20220103", partitionBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", "--tables="+dbName+".t1", "--partitions=0-20220102,0-20220103", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) // restore partial uploaded - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore_remote", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore_remote", partitionBackupName)) // Check partial restored t1 result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM "+dbName+".t1")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM "+dbName+".t1")) expectedCount = 20 // custom and embedded doesn't support --partitions in upload and download @@ -2689,7 +2734,7 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test // Check only selected partitions restored result = 0 - r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM "+dbName+".t1 WHERE dt NOT IN ('2022-01-02','2022-01-03')")) + r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM "+dbName+".t1 WHERE dt NOT IN ('2022-01-02','2022-01-03')")) expectedCount = 0 // custom and embedded doesn't support --partitions in upload and download if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { @@ -2698,23 +2743,23 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, ch *Test r.Equal(expectedCount, result, "expect count=0") // DELETE backup. - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", partitionBackupName)) - r.NoError(dockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", partitionBackupName)) + r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) - if err = ch.dropDatabase(dbName); err != nil { + if err = env.dropDatabase(dbName); err != nil { t.Fatal(err) } log.Info("testBackupSpecifiedPartitions finish") } -func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { +func (env *TestEnvironment) checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { // backupCmd = fmt.Sprintf("%s & PID=$!; sleep 0.7; kill -9 $PID; cat /var/lib/clickhouse/backup/%s/upload.state; sleep 0.3; %s", backupCmd, testBackupName, backupCmd) if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { backupCmd = strings.Replace(backupCmd, "--resume", "", 1) } else { backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) } - out, err := dockerExecOut("clickhouse-backup", "bash", "-xce", backupCmd) + out, err := env.DockerExecOut("clickhouse-backup", "bash", "-xce", backupCmd) log.Info(out) r.NoError(err) if strings.Contains(backupCmd, "--resume") { @@ -2722,20 +2767,20 @@ func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r } } -func fullCleanup(t *testing.T, r *require.Assertions, ch *TestClickHouse, backupNames, backupTypes, databaseList []string, checkDeleteErr, checkDropErr bool, backupConfig string) { +func fullCleanup(t *testing.T, r *require.Assertions, env *TestEnvironment, backupNames, backupTypes, databaseList []string, checkDeleteErr, checkDropErr bool, backupConfig string) { for _, backupName := range backupNames { for _, backupType := range backupTypes { - err := dockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete "+backupType+" "+backupName) + err := env.DockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete "+backupType+" "+backupName) if checkDeleteErr { r.NoError(err) } } } - otherBackupList, err := dockerExecOut("clickhouse", "ls", "-1", "/var/lib/clickhouse/backup/*"+t.Name()+"*") + otherBackupList, err := env.DockerExecOut("clickhouse", "ls", "-1", "/var/lib/clickhouse/backup/*"+t.Name()+"*") if err == nil { for _, backupName := range strings.Split(otherBackupList, "\n") { if backupName != "" { - err := dockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete local "+backupName) + err := env.DockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete local "+backupName) if checkDropErr { r.NoError(err) } @@ -2743,23 +2788,23 @@ func fullCleanup(t *testing.T, r *require.Assertions, ch *TestClickHouse, backup } } - dropDatabasesFromTestDataDataSet(t, r, ch, databaseList) + dropDatabasesFromTestDataDataSet(t, r, env, databaseList) } -func generateTestData(t *testing.T, r *require.Assertions, ch *TestClickHouse, remoteStorageType string, testData []TestDataStruct) []TestDataStruct { +func generateTestData(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, testData []TestDataStruct) []TestDataStruct { log.Infof("Generate test data %s with _%s suffix", remoteStorageType, t.Name()) testData = generateTestDataWithDifferentStoragePolicy(remoteStorageType, 0, 5, testData) for _, data := range testData { - if isTableSkip(ch, data, false) { + if isTableSkip(env, data, false) { continue } - r.NoError(ch.createTestSchema(t, data, remoteStorageType)) + r.NoError(env.createTestSchema(t, data, remoteStorageType)) } for _, data := range testData { - if isTableSkip(ch, data, false) { + if isTableSkip(env, data, false) { continue } - r.NoError(ch.createTestData(t, data)) + r.NoError(env.createTestData(t, data)) } return testData } @@ -2830,7 +2875,7 @@ func generateTestDataWithDifferentStoragePolicy(remoteStorageType string, offset return testData } -func generateIncrementTestData(t *testing.T, r *require.Assertions, ch *TestClickHouse, remoteStorageType string, incrementData []TestDataStruct, incrementNumber int) []TestDataStruct { +func generateIncrementTestData(t *testing.T, r *require.Assertions, ch *TestEnvironment, remoteStorageType string, incrementData []TestDataStruct, incrementNumber int) []TestDataStruct { log.Infof("Generate increment test data for %s", remoteStorageType) incrementData = generateTestDataWithDifferentStoragePolicy(remoteStorageType, 5*incrementNumber, 5, incrementData) for _, data := range incrementData { @@ -2842,7 +2887,7 @@ func generateIncrementTestData(t *testing.T, r *require.Assertions, ch *TestClic return incrementData } -func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *TestClickHouse, databaseList []string) { +func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *TestEnvironment, databaseList []string) { log.Info("Drop all databases") for _, db := range databaseList { db = db + "_" + t.Name() @@ -2850,26 +2895,21 @@ func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *T } } -const apiBackupNumber = 5 - -type TestClickHouse struct { - chbackend *clickhouse.ClickHouse -} -func (ch *TestClickHouse) connectWithWait(r *require.Assertions, sleepBefore, pollInterval, timeOut time.Duration) { +func (env *TestEnvironment) connectWithWait(r *require.Assertions, sleepBefore, pollInterval, timeOut time.Duration) { time.Sleep(sleepBefore) for i := 1; i < 11; i++ { - err := ch.connect(timeOut.String()) + err := env.connect(timeOut.String()) if i == 10 { - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "logs", "clickhouse")) - out, dockerErr := dockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "'SELECT version()'") + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(),"logs", "clickhouse")...)) + out, dockerErr := env.DockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "'SELECT version()'") r.NoError(dockerErr) - ch.chbackend.Log.Debug(out) + env.ch.Log.Debug(out) r.NoError(err) } if err != nil { r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "ps", "-a")) - if out, dockerErr := dockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "SELECT version()"); dockerErr == nil { + if out, dockerErr := env.DockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "SELECT version()"); dockerErr == nil { log.Info(out) } else { log.Warn(out) @@ -2877,9 +2917,9 @@ func (ch *TestClickHouse) connectWithWait(r *require.Assertions, sleepBefore, po log.Warnf("clickhouse not ready %v, wait %v seconds", err, (pollInterval).Seconds()) time.Sleep(pollInterval) } else { - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") == 1 { + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") > 0 { var count uint64 - err = ch.chbackend.SelectSingleRowNoCtx(&count, "SELECT count() FROM mysql('mysql:3306','mysql','user','root','root')") + err = env.ch.SelectSingleRowNoCtx(&count, "SELECT count() FROM mysql('mysql:3306','mysql','user','root','root')") if err == nil { break } else { @@ -2893,18 +2933,31 @@ func (ch *TestClickHouse) connectWithWait(r *require.Assertions, sleepBefore, po } } -func (ch *TestClickHouse) connect(timeOut string) error { - ch.chbackend = &clickhouse.ClickHouse{ +func (env *TestEnvironment) connect(timeOut string) error { + portOut, err := utils.ExecCmdOut(context.Background(), 10*time.Second, "docker", append(env.GetDefaultComposeCommand(), "port", "clickhouse","9000")...) + if err != nil { + log.Error(portOut) + log.Fatalf("can't get port for clickhouse: %v", err) + } + hostAndPort := strings.Split(strings.Trim(portOut," \r\n\t"),":") + if len(hostAndPort) < 1 { + log.Error(portOut) + log.Fatalf("invalid port for clickhouse: %v", err) + } + port, err := strconv.Atoi(hostAndPort[1]) + if err != nil { + return err + } + env.ch = &clickhouse.ClickHouse{ Config: &config.ClickHouseConfig{ - Host: "127.0.0.1", - Port: 9000, + Host: hostAndPort[0], + Port: uint(port), Timeout: timeOut, }, Log: log.WithField("logger", "integration-test"), } - var err error for i := 0; i < 3; i++ { - err = ch.chbackend.Connect() + err = env.ch.Connect() if err == nil { return nil } else { @@ -2916,7 +2969,7 @@ func (ch *TestClickHouse) connect(timeOut string) error { var mergeTreeOldSyntax = regexp.MustCompile(`(?m)MergeTree\(([^,]+),([\w\s,)(]+),(\s*\d+\s*)\)`) -func (ch *TestClickHouse) createTestSchema(t *testing.T, data TestDataStruct, remoteStorageType string) error { +func (env *TestEnvironment) createTestSchema(t *testing.T, data TestDataStruct, remoteStorageType string) error { origDatabase := data.Database origName := data.Name if !data.IsFunction { @@ -2924,11 +2977,11 @@ func (ch *TestClickHouse) createTestSchema(t *testing.T, data TestDataStruct, re data.Name = data.Name + "_" + t.Name() // 20.8 doesn't respect DROP TABLE ... NO DELAY, so Atomic works but --rm is not applicable if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") > 0 { - if err := ch.chbackend.CreateDatabaseWithEngine(data.Database, data.DatabaseEngine, "cluster"); err != nil { + if err := env.ch.CreateDatabaseWithEngine(data.Database, data.DatabaseEngine, "cluster"); err != nil { return err } } else { - if err := ch.chbackend.CreateDatabase(data.Database, "cluster"); err != nil { + if err := env.ch.CreateDatabase(data.Database, "cluster"); err != nil { return err } } @@ -2962,7 +3015,7 @@ func (ch *TestClickHouse) createTestSchema(t *testing.T, data TestDataStruct, re // old 1.x clickhouse versions doesn't contains {table} and {database} macros if strings.Contains(createSQL, "{table}") || strings.Contains(createSQL, "{database}") { var isMacrosExists uint64 - if err := ch.chbackend.SelectSingleRowNoCtx(&isMacrosExists, "SELECT count() FROM system.functions WHERE name='getMacro'"); err != nil { + if err := env.ch.SelectSingleRowNoCtx(&isMacrosExists, "SELECT count() FROM system.functions WHERE name='getMacro'"); err != nil { return err } if isMacrosExists == 0 { @@ -2993,7 +3046,7 @@ func (ch *TestClickHouse) createTestSchema(t *testing.T, data TestDataStruct, re createSQL = strings.NewReplacer("."+origName, "."+data.Name, "`"+origName+"`", "`"+data.Name+"`", "'"+origName+"'", "'"+data.Name+"'").Replace(createSQL) } createSQL = strings.Replace(createSQL, "{test}", t.Name(), -1) - err := ch.chbackend.CreateTable( + err := env.ch.CreateTable( clickhouse.Table{ Database: data.Database, Name: data.Name, @@ -3004,7 +3057,7 @@ func (ch *TestClickHouse) createTestSchema(t *testing.T, data TestDataStruct, re return err } -func (ch *TestClickHouse) createTestData(t *testing.T, data TestDataStruct) error { +func (env *TestEnvironment) createTestData(t *testing.T, data TestDataStruct) error { data.Database = data.Database + "_" + t.Name() data.Name = data.Name + "_" + t.Name() if data.SkipInsert || data.CheckDatabaseOnly { @@ -3012,7 +3065,7 @@ func (ch *TestClickHouse) createTestData(t *testing.T, data TestDataStruct) erro } insertSQL := fmt.Sprintf("INSERT INTO `%s`.`%s`", data.Database, data.Name) log.Debug(insertSQL) - batch, err := ch.chbackend.GetConn().PrepareBatch(context.Background(), insertSQL) + batch, err := env.ch.GetConn().PrepareBatch(context.Background(), insertSQL) if err != nil { return err @@ -3031,18 +3084,18 @@ func (ch *TestClickHouse) createTestData(t *testing.T, data TestDataStruct) erro return batch.Send() } -func (ch *TestClickHouse) dropDatabase(database string) (err error) { +func (env *TestEnvironment) dropDatabase(database string) (err error) { var isAtomic bool dropDatabaseSQL := fmt.Sprintf("DROP DATABASE IF EXISTS `%s`", database) - if isAtomic, err = ch.chbackend.IsAtomic(database); isAtomic { + if isAtomic, err = env.ch.IsAtomic(database); isAtomic { dropDatabaseSQL += " SYNC" } else if err != nil { return err } - return ch.chbackend.Query(dropDatabaseSQL) + return env.ch.Query(dropDatabaseSQL) } -func (ch *TestClickHouse) checkData(t *testing.T, r *require.Assertions, data TestDataStruct) error { +func (env *TestEnvironment) checkData(t *testing.T, r *require.Assertions, data TestDataStruct) error { assert.NotNil(t, data.Rows) data.Database += "_" + t.Name() data.Name += "_" + t.Name() @@ -3056,7 +3109,7 @@ func (ch *TestClickHouse) checkData(t *testing.T, r *require.Assertions, data Te selectSQL = fmt.Sprintf("SELECT %s(number, number+1) AS test_result FROM numbers(%d)", data.Name, len(data.Rows)) } log.Debug(selectSQL) - rows, err := ch.chbackend.GetConn().Query(context.Background(), selectSQL) + rows, err := env.ch.GetConn().Query(context.Background(), selectSQL) if err != nil { return err } @@ -3104,14 +3157,14 @@ func (ch *TestClickHouse) checkData(t *testing.T, r *require.Assertions, data Te return nil } -func (ch *TestClickHouse) checkDatabaseEngine(t *testing.T, data TestDataStruct) error { +func (env *TestEnvironment) checkDatabaseEngine(t *testing.T, data TestDataStruct) error { data.Database += "_" + t.Name() if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") <= 0 { return nil } selectSQL := fmt.Sprintf("SELECT engine FROM system.databases WHERE name='%s'", data.Database) var engine string - if err := ch.chbackend.SelectSingleRowNoCtx(&engine, selectSQL); err != nil { + if err := env.ch.SelectSingleRowNoCtx(&engine, selectSQL); err != nil { return err } assert.True( @@ -3121,43 +3174,48 @@ func (ch *TestClickHouse) checkDatabaseEngine(t *testing.T, data TestDataStruct) return nil } -func (ch *TestClickHouse) queryWithNoError(r *require.Assertions, query string, args ...interface{}) { - err := ch.chbackend.Query(query, args...) +func (env *TestEnvironment) queryWithNoError(r *require.Assertions, query string, args ...interface{}) { + err := env.ch.Query(query, args...) if err != nil { - ch.chbackend.Log.Errorf("queryWithNoError error: %v", err) + env.ch.Log.Errorf("queryWithNoError error: %v", err) } r.NoError(err) } var dockerExecTimeout = 180 * time.Second -//func dockerExecBackground(container string, cmd ...string) error { -// out, err := dockerExecBackgroundOut(container, cmd...) -// log.Info(out) -// return err -//} +func (env *TestEnvironment) DockerExecBackground(container string, cmd ...string) error { + out, err := env.DockerExecBackgroundOut(container, cmd...) + log.Info(out) + return err +} + +func (env *TestEnvironment) DockerExecBackgroundOut(container string, cmd ...string) (string, error) { + dcmd := append(env.GetDefaultComposeCommand(), "exec", "-d", container) + dcmd = append(dcmd, cmd...) + return utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", dcmd...) +} -//func dockerExecBackgroundOut(container string, cmd ...string) (string, error) { -// dcmd := []string{"exec", "-d", container} -// dcmd = append(dcmd, cmd...) -// return utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", dcmd...) -//} +func (env *TestEnvironment) GetDefaultComposeCommand() []string { + return []string{"compose", "-f", path.Join(os.Getenv("CUR_DIR"),os.Getenv("COMPOSE_FILE")), "--progress", "plain", "--project-name", env.ProjectName} +} -func dockerExec(container string, cmd ...string) error { - out, err := dockerExecOut(container, cmd...) +func (env *TestEnvironment) DockerExec(container string, cmd ...string) error { + out, err := env.DockerExecOut(container, cmd...) log.Info(out) return err } -func dockerExecOut(container string, cmd ...string) (string, error) { - dcmd := []string{"exec", container} +func (env *TestEnvironment) DockerExecOut(container string, cmd ...string) (string, error) { + dcmd := append(env.GetDefaultComposeCommand(), "exec", container) dcmd = append(dcmd, cmd...) return utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", dcmd...) } -func dockerCP(src, dst string) error { +func (env *TestEnvironment) DockerCP(src, dst string) error { ctx, cancel := context.WithTimeout(context.Background(), 180*time.Second) - dcmd := []string{"cp", src, dst} + dcmd := append(env.GetDefaultComposeCommand(), "cp", src, dst) + log.Infof("docker %s", strings.Join(dcmd, " ")) out, err := exec.CommandContext(ctx, "docker", dcmd...).CombinedOutput() log.Info(string(out)) @@ -3165,6 +3223,19 @@ func dockerCP(src, dst string) error { return err } +func (env *TestEnvironment) InstallDebIfNotExists(r *require.Assertions, container string, pkgs ...string) { + err := env.DockerExec( + container, + "bash", "-xec", + fmt.Sprintf( + "export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then rm -fv /etc/apt/sources.list.d/clickhouse.list; find /etc/apt/ -type f -name *.list -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} +; apt-get -y update; apt-get install --no-install-recommends -y %s; fi", + len(pkgs), "^ii\\s+"+strings.Join(pkgs, "|^ii\\s+"), strings.Join(pkgs, " "), + ), + ) + r.NoError(err) +} + + func toDate(s string) time.Time { result, _ := time.Parse("2006-01-02", s) return result @@ -3175,7 +3246,7 @@ func toTS(s string) time.Time { return result } -func isTableSkip(ch *TestClickHouse, data TestDataStruct, dataExists bool) bool { +func isTableSkip(ch *TestEnvironment, data TestDataStruct, dataExists bool) bool { if strings.Contains(data.DatabaseEngine, "PostgreSQL") && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") <= 0 { return true } @@ -3187,7 +3258,7 @@ func isTableSkip(ch *TestClickHouse, data TestDataStruct, dataExists bool) bool "SELECT engine FROM system.tables WHERE name='%s' AND database='%s'", data.Name, data.Database, ) - _ = ch.chbackend.Select(&dictEngines, dictSQL) + _ = ch.ch.Select(&dictEngines, dictSQL) return len(dictEngines) == 0 } return os.Getenv("COMPOSE_FILE") == "docker-compose.yml" && (strings.Contains(data.Name, "jbod#$_table") || data.IsDictionary) @@ -3216,14 +3287,3 @@ func isTestShouldSkip(envName string) bool { return isSkip } -func installDebIfNotExists(r *require.Assertions, container string, pkgs ...string) { - err := dockerExec( - container, - "bash", "-xec", - fmt.Sprintf( - "export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then rm -fv /etc/apt/sources.list.d/clickhouse.list; find /etc/apt/ -type f -name *.list -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} +; apt-get -y update; apt-get install --no-install-recommends -y %s; fi", - len(pkgs), "^ii\\s+"+strings.Join(pkgs, "|^ii\\s+"), strings.Join(pkgs, " "), - ), - ) - r.NoError(err) -} diff --git a/test/integration/run.sh b/test/integration/run.sh index bf1a468c..3d091565 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -2,7 +2,7 @@ set -x set -e -CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +export CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" mkdir -p "${CUR_DIR}/_coverage_/" rm -rf "${CUR_DIR}/_coverage_/*" @@ -44,8 +44,6 @@ fi docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} down --remove-orphans docker volume prune -f make clean build-race-docker build-race-fips-docker -docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} up -d -docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} exec minio mc alias list go test -parallel ${RUN_PARALLEL:-$(nproc)} -timeout ${TESTS_TIMEOUT:-60m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" From ef5e757b44e142499b362e42c28cd339c186eff5 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 24 Jul 2024 20:32:27 +0400 Subject: [PATCH 02/54] debug https://github.com/Altinity/clickhouse-backup/issues/888 --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 572621d8..283307b6 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -7,7 +7,7 @@ on: push: branches: - - * + - "*" jobs: build: From fd22115388d56d4303673a827b64cc189dc7cc66 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 10:38:09 +0400 Subject: [PATCH 03/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, refactoring and try to execute RUN_PARELLEL: 4 --- .github/workflows/build.yaml | 7 +- pkg/utils/utils.go | 4 +- test/integration/docker-compose.yml | 2 +- test/integration/docker-compose_advanced.yml | 2 +- test/integration/integration_test.go | 837 +++++++++---------- test/integration/run.sh | 14 +- 6 files changed, 417 insertions(+), 449 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 283307b6..d085cdde 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -269,8 +269,10 @@ jobs: - name: Running integration tests env: + RUN_PARALLEL: 4 GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} + # TEST_LOG_LEVEL: "debug" # options for advanced debug CI/CD # RUN_TESTS: "TestIntegrationS3" # LOG_LEVEL: "debug" @@ -311,9 +313,10 @@ jobs: export COMPOSE_FILE=docker-compose.yml fi - export CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" + export CUR_DIR="${CUR_DIR}/test/integration" export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" - go test -parallel 4 -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go + go test -parallel ${RUN_PARALLEL} -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage env: GOROOT: ${{ env.GOROOT_1_22_X64 }} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index e41eb5ca..3028df6a 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -61,13 +61,13 @@ func HumanizeDuration(d time.Duration) string { func ExecCmd(ctx context.Context, timeout time.Duration, cmd string, args ...string) error { out, err := ExecCmdOut(ctx, timeout, cmd, args...) - log.Info(out) + log.Debug(out) return err } func ExecCmdOut(ctx context.Context, timeout time.Duration, cmd string, args ...string) (string, error) { ctx, cancel := context.WithTimeout(ctx, timeout) - log.Infof("%s %s", cmd, strings.Join(args, " ")) + log.Debugf("%s %s", cmd, strings.Join(args, " ")) out, err := exec.CommandContext(ctx, cmd, args...).CombinedOutput() cancel() return string(out), err diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 54c4abcd..5a3267b9 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -30,7 +30,7 @@ services: MINIO_ROOT_PASSWORD: it_is_my_super_secret_key healthcheck: test: curl -sL http://localhost:9000/ - interval: 10s + interval: 3s retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 869872bf..84e8bcf9 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -141,7 +141,7 @@ services: - sleep infinity healthcheck: test: bash -c "exit 0" - interval: 30s + interval: 3s timeout: 1s retries: 5 start_period: 1s diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 20afdd23..ab8cd5e0 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -41,6 +41,9 @@ func init() { if os.Getenv("LOG_LEVEL") != "" { logLevel = os.Getenv("LOG_LEVEL") } + if os.Getenv("TEST_LOG_LEVEL") != "" { + logLevel = os.Getenv("TEST_LOG_LEVEL") + } log.SetLevelFromString(logLevel) } @@ -411,27 +414,35 @@ var defaultIncrementData = []TestDataStruct{ }, } -func NewTestEnvironment(t *testing.T, r *require.Assertions) *TestEnvironment { +func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { t.Helper() + r := require.New(t) if os.Getenv("COMPOSE_FILE") == "" || os.Getenv("CUR_DIR") == "" { t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } env := TestEnvironment{ - ProjectName: strings.ToLower(t.Name()), + ProjectName: "all", + } + if os.Getenv("RUN_PARALLEL") != "1" { + t.Logf("[%s] executing in parallel mode", t.Name()) + t.Parallel() + env.ProjectName = strings.ToLower(t.Name()) + upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") + r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", upCmd...)) + t.Cleanup(func() { + downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes") + r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", downCmd...)) + }) + } else { + t.Logf("[%s] executing in sequence mode", t.Name()) } - upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") - r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", upCmd...)) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "1.1.54394") <= 0 { r := require.New(&testing.T{}) env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) + env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") } - t.Cleanup(func() { - downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes") - r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", downCmd...)) - }) - return &env + return &env, r } // TestS3NoDeletePermission - no parallel @@ -440,26 +451,24 @@ func TestS3NoDeletePermission(t *testing.T) { t.Skip("Skipping Advanced integration tests...") return } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 2*time.Second) defer env.ch.Close() - r.NoError(env.DockerExec("minio", "/bin/minio_nodelete.sh")) + env.DockerExecNoError(r, "minio", "/bin/minio_nodelete.sh") r.NoError(env.DockerCP("config-s3-nodelete.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) generateTestData(t, r, env, "S3", defaultTestData) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "no_delete_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "no_delete_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create_remote", "no_delete_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore_remote", "no_delete_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup") r.Error(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} dropDatabasesFromTestDataDataSet(t, r, env, databaseList) r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "list", "remote") env.checkObjectStorageIsEmpty(t, r, "S3") } @@ -469,9 +478,7 @@ func TestRBAC(t *testing.T) { if compareVersion(chVersion, "20.4") < 0 { t.Skipf("Test skipped, RBAC not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) testRBACScenario := func(config string) { env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) @@ -485,14 +492,14 @@ func TestRBAC(t *testing.T) { createRBACObjects := func(drop bool) { if drop { - log.Info("drop all RBAC related objects") + log.Debug("drop all RBAC related objects") env.queryWithNoError(r, "DROP SETTINGS PROFILE `test.rbac-name`") env.queryWithNoError(r, "DROP QUOTA `test.rbac-name`") env.queryWithNoError(r, "DROP ROW POLICY `test.rbac-name` ON default.test_rbac") env.queryWithNoError(r, "DROP ROLE `test.rbac-name`") env.queryWithNoError(r, "DROP USER `test.rbac-name`") } - log.Info("create RBAC related objects") + log.Debug("create RBAC related objects") env.queryWithNoError(r, "CREATE SETTINGS PROFILE `test.rbac-name` SETTINGS max_execution_time=60") env.queryWithNoError(r, "CREATE ROLE `test.rbac-name` SETTINGS PROFILE `test.rbac-name`") env.queryWithNoError(r, "CREATE USER `test.rbac-name` IDENTIFIED BY 'test_rbac_password' DEFAULT ROLE `test.rbac-name`") @@ -501,18 +508,18 @@ func TestRBAC(t *testing.T) { } createRBACObjects(false) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "create", "--rbac", "--rbac-only", "--env", "S3_COMPRESSION_FORMAT=zstd", "test_rbac_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup upload test_rbac_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) - r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", config, "create", "--rbac", "--rbac-only", "--env", "S3_COMPRESSION_FORMAT=zstd", "test_rbac_backup") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup upload test_rbac_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup") + env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") - log.Info("create conflicted RBAC objects") + log.Debug("create conflicted RBAC objects") createRBACObjects(true) - r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") - log.Info("download+restore RBAC") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup")) + log.Debug("download+restore RBAC") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup") out, err := env.DockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac test_rbac_backup") log.Debug(out) @@ -523,13 +530,13 @@ func TestRBAC(t *testing.T) { log.Debug(out) r.Contains(out, "RBAC successfully restored") r.NoError(err) - r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") env.ch.Close() - // r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) + // r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, append(env.GetDefaultComposeCommand(), "restart", "clickhouse"))) env.connectWithWait(r, 2*time.Second, 2*time.Second, 8*time.Second) - r.NoError(env.DockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) + env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") rbacTypes := map[string]string{ "PROFILES": "test.rbac-name", @@ -546,19 +553,19 @@ func TestRBAC(t *testing.T) { r.NoError(err) found := false for _, row := range rbacRows { - log.Infof("rbacType=%s expectedValue=%s row.Name=%s", rbacType, expectedValue, row.Name) + log.Debugf("rbacType=%s expectedValue=%s row.Name=%s", rbacType, expectedValue, row.Name) if expectedValue == row.Name { found = true break } } if !found { - //r.NoError(env.DockerExec("clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log")) + //env.DockerExecNoError(r, "clickhouse", "cat", "/var/log/clickhouse-server/clickhouse-server.log") r.Failf("wrong RBAC", "SHOW %s, %#v doesn't contain %#v", rbacType, rbacRows, expectedValue) } } - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "remote", "test_rbac_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "remote", "test_rbac_backup") env.queryWithNoError(r, "DROP SETTINGS PROFILE `test.rbac-name`") env.queryWithNoError(r, "DROP QUOTA `test.rbac-name`") @@ -582,24 +589,22 @@ func TestRBAC(t *testing.T) { // TestConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container // need clickhouse-server restart, no parallel func TestConfigs(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) testConfigsScenario := func(config string) { env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Second) env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") env.queryWithNoError(r, "CREATE TABLE default.test_configs (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") - r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml")) + env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "echo '1' > /etc/clickhouse-server/users.d/test_config.xml") - r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "create", "--configs", "--configs-only", "test_configs_backup")) + env.DockerExecNoError(r, "clickhouse", "clickhouse-backup", "-c", config, "create", "--configs", "--configs-only", "test_configs_backup") env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") compression := "" if !strings.Contains(config, "embedded") { compression = "--env AZBLOB_COMPRESSION_FORMAT=zstd --env S3_COMPRESSION_FORMAT=zstd" } - r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "clickhouse-backup upload "+compression+" --env CLICKHOUSE_BACKUP_CONFIG="+config+" --env S3_COMPRESSION_FORMAT=none --env ALLOW_EMPTY_BACKUPS=1 test_configs_backup")) - r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) + env.DockerExecNoError(r, "clickhouse", "bash", "-xec", "clickhouse-backup upload "+compression+" --env CLICKHOUSE_BACKUP_CONFIG="+config+" --env S3_COMPRESSION_FORMAT=none --env ALLOW_EMPTY_BACKUPS=1 test_configs_backup") + env.DockerExecNoError(r, "clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup") env.queryWithNoError(r, "SYSTEM RELOAD CONFIG") env.ch.Close() @@ -608,12 +613,12 @@ func TestConfigs(t *testing.T) { var settings string r.NoError(env.ch.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) if settings != "1" { - r.NoError(env.DockerExec("clickhouse", "grep", "empty_result_for_aggregation_by_empty_set", "-r", "/var/lib/clickhouse/preprocessed_configs/")) + env.DockerExecNoError(r, "clickhouse", "grep", "empty_result_for_aggregation_by_empty_set", "-r", "/var/lib/clickhouse/preprocessed_configs/") } r.Equal("1", settings, "expect empty_result_for_aggregation_by_empty_set=1") - r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) - r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup")) + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml") + env.DockerExecNoError(r, "clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" ALLOW_EMPTY_BACKUPS=1 clickhouse-backup download test_configs_backup") r.NoError(env.ch.Query("SYSTEM RELOAD CONFIG")) env.ch.Close() @@ -623,7 +628,7 @@ func TestConfigs(t *testing.T) { r.NoError(env.ch.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) r.Equal("0", settings, "expect empty_result_for_aggregation_by_empty_set=0") - r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup")) + env.DockerExecNoError(r, "clickhouse", "bash", "-xec", "CLICKHOUSE_BACKUP_CONFIG="+config+" CLICKHOUSE_RESTART_COMMAND='sql:SYSTEM RELOAD CONFIG' clickhouse-backup restore --rm --configs --configs-only test_configs_backup") env.ch.Close() env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) @@ -636,9 +641,9 @@ func TestConfigs(t *testing.T) { r.NoError(env.ch.SelectSingleRowNoCtx(&isTestConfigsTablePresent, "SELECT count() FROM system.tables WHERE database='default' AND name='test_configs' SETTINGS empty_result_for_aggregation_by_empty_set=1")) r.Equal(0, isTestConfigsTablePresent, "expect default.test_configs is not present") - r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup")) - r.NoError(env.DockerExec("clickhouse", "clickhouse-backup", "-c", config, "delete", "remote", "test_configs_backup")) - r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml")) + env.DockerExecNoError(r, "clickhouse", "clickhouse-backup", "-c", config, "delete", "local", "test_configs_backup") + env.DockerExecNoError(r, "clickhouse", "clickhouse-backup", "-c", config, "delete", "remote", "test_configs_backup") + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/etc/clickhouse-server/users.d/test_config.xml") env.ch.Close() } @@ -656,44 +661,42 @@ func TestConfigs(t *testing.T) { // TestLongListRemote - no parallel, cause need to restart minio func TestLongListRemote(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) defer env.ch.Close() totalCacheCount := 20 testBackupName := "test_list_remote" for i := 0; i < totalCacheCount; i++ { - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i))) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i)) } - r.NoError(env.DockerExec("clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) + env.DockerExecNoError(r, "clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) time.Sleep(2 * time.Second) startFirst := time.Now() - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") noCacheDuration := time.Since(startFirst) - r.NoError(env.DockerExec("clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")) + env.DockerExecNoError(r, "clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3") startCashed := time.Now() - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") cashedDuration := time.Since(startCashed) r.Greater(noCacheDuration, cashedDuration) - r.NoError(env.DockerExec("clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")) - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "minio")) + env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) time.Sleep(2 * time.Second) startCacheClear := time.Now() - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") cacheClearDuration := time.Since(startCacheClear) r.Greater(cacheClearDuration, cashedDuration) - log.Infof("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cashedDuration.String(), cacheClearDuration.String()) + log.Debugf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cashedDuration.String(), cacheClearDuration.String()) testListRemoteAllBackups := make([]string, totalCacheCount) for i := 0; i < totalCacheCount; i++ { @@ -705,9 +708,7 @@ func TestLongListRemote(t *testing.T) { const apiBackupNumber = 5 func TestServerAPI(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 10*time.Second) defer func() { env.ch.Close() @@ -720,8 +721,8 @@ func TestServerAPI(t *testing.T) { randFields := 10 fillDatabaseForAPIServer(maxTables, minFields, randFields, env, r, fieldTypes) - log.Info("Run `clickhouse-backup server --watch` in background") - r.NoError(env.DockerExec("-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) + log.Debug("Run `clickhouse-backup server --watch` in background") + env.DockerExecNoError(r, "-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log") time.Sleep(1 * time.Second) testAPIBackupVersion(r, env) @@ -734,7 +735,7 @@ func TestServerAPI(t *testing.T) { testAPIBackupTablesRemote(r, env) - log.Info("Check /backup/actions") + log.Debug("Check /backup/actions") env.queryWithNoError(r, "SELECT count() FROM system.backup_actions") testAPIBackupList(t, r, env) @@ -753,7 +754,7 @@ func TestServerAPI(t *testing.T) { testAPIBackupClean(r, env) - r.NoError(env.DockerExec("clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup")) + env.DockerExecNoError(r, "clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup") r.NoError(env.dropDatabase("long_schema")) } @@ -824,7 +825,7 @@ func testAPIBackupActions(r *require.Assertions, env *TestEnvironment) { } func testAPIWatchAndKill(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/watch + /backup/kill") + log.Debug("Check /backup/watch + /backup/kill") runKillCommand := func(command string) { out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) log.Debug(out) @@ -864,15 +865,15 @@ func testAPIWatchAndKill(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/delete/{where}/{name}") + log.Debug("Check /backup/delete/{where}/{name}") for i := 1; i <= apiBackupNumber; i++ { out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) - log.Infof(out) + log.Debugf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) - log.Infof(out) + log.Debugf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") @@ -882,7 +883,7 @@ func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { r.Contains(out, "clickhouse_backup_last_delete_status 1") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XGET 'http://localhost:7171/backup/list'")) - log.Infof(out) + log.Debugf(out) r.NoError(err) scanner := bufio.NewScanner(strings.NewReader(out)) for scanner.Scan() { @@ -897,7 +898,7 @@ func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { listItem := backupJSON{} r.NoError(json.Unmarshal(scanner.Bytes(), &listItem)) out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/%s/%s'", listItem.Location, listItem.Name)) - log.Infof(out) + log.Debugf(out) r.NoError(err) } @@ -906,15 +907,15 @@ func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupClean(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/clean/ /backup/clean_remote_broken/ and /backup/actions fot these two commands") + log.Debug("Check /backup/clean/ /backup/clean_remote_broken/ and /backup/actions fot these two commands") out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean'")) - log.Infof(out) + log.Debugf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean/remote_broken'")) - log.Infof(out) + log.Debugf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") @@ -923,7 +924,7 @@ func testAPIBackupClean(r *require.Assertions, env *TestEnvironment) { } func testAPIMetrics(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /metrics clickhouse_backup_last_backup_size_remote") + log.Debug("Check /metrics clickhouse_backup_last_backup_size_remote") var lastRemoteSize int64 r.NoError(env.ch.SelectSingleRowNoCtx(&lastRemoteSize, "SELECT size FROM system.backup_list WHERE name='z_backup_5' AND location='remote'")) @@ -941,17 +942,17 @@ func testAPIMetrics(r *require.Assertions, env *TestEnvironment) { r.NoError(err) r.Contains(out, fmt.Sprintf("clickhouse_backup_last_backup_size_remote %d", lastRemoteSize)) - log.Info("Check /metrics clickhouse_backup_number_backups_*") + log.Debug("Check /metrics clickhouse_backup_number_backups_*") r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_local %d", apiBackupNumber)) // +1 watch backup - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "list", "remote")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "list", "remote") r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_remote %d", apiBackupNumber+1)) r.Contains(out, "clickhouse_backup_number_backups_local_expected 0") r.Contains(out, "clickhouse_backup_number_backups_remote_expected 0") } func testAPIDeleteLocalDownloadRestore(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") + log.Debug("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", @@ -974,7 +975,7 @@ func testAPIDeleteLocalDownloadRestore(r *require.Assertions, env *TestEnvironme } func testAPIBackupList(t *testing.T, r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/list") + log.Debug("Check /backup/list") out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") log.Debug(out) r.NoError(err) @@ -983,7 +984,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions, env *TestEnvironment r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - log.Info("Check /backup/list/local") + log.Debug("Check /backup/list/local") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") log.Debug(out) r.NoError(err) @@ -992,7 +993,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions, env *TestEnvironment r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - log.Info("Check /backup/list/remote") + log.Debug("Check /backup/list/remote") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") log.Debug(out) r.NoError(err) @@ -1003,7 +1004,7 @@ func testAPIBackupList(t *testing.T, r *require.Assertions, env *TestEnvironment } func testAPIBackupUpload(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/upload") + log.Debug("Check /backup/upload") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", @@ -1024,7 +1025,7 @@ func testAPIBackupUpload(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupTables(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/tables") + log.Debug("Check /backup/tables") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables\"", @@ -1039,7 +1040,7 @@ func testAPIBackupTables(r *require.Assertions, env *TestEnvironment) { r.NotContains(out, "INFORMATION_SCHEMA") r.NotContains(out, "information_schema") - log.Info("Check /backup/tables/all") + log.Debug("Check /backup/tables/all") out, err = env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables/all\"", @@ -1059,7 +1060,7 @@ func testAPIBackupTables(r *require.Assertions, env *TestEnvironment) { func testAPIBackupTablesRemote(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/tables?remote_backup=z_backup_1") + log.Debug("Check /backup/tables?remote_backup=z_backup_1") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables?remote_backup=z_backup_1\"", @@ -1077,7 +1078,7 @@ func testAPIBackupTablesRemote(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupVersion(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/version") + log.Debug("Check /backup/version") cliVersion, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup --version 2>/dev/null --version | grep 'Version' | cut -d ':' -f 2 | xargs") r.NoError(err) apiVersion, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sL http://localhost:7171/backup/version | jq -r .version") @@ -1089,7 +1090,7 @@ func testAPIBackupVersion(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupCreate(r *require.Assertions, env *TestEnvironment) { - log.Info("Check /backup/create") + log.Debug("Check /backup/create") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", @@ -1106,7 +1107,7 @@ func testAPIBackupCreate(r *require.Assertions, env *TestEnvironment) { } func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch *TestEnvironment, r *require.Assertions, fieldTypes []string) { - log.Infof("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) + log.Debugf("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS long_schema") for i := 0; i < maxTables; i++ { sql := fmt.Sprintf("CREATE TABLE long_schema.t%d (id UInt64", i) @@ -1120,20 +1121,18 @@ func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch * sql = fmt.Sprintf("INSERT INTO long_schema.t%d(id) SELECT number FROM numbers(100)", i) ch.queryWithNoError(r, sql) } - log.Info("...DONE") + log.Debug("...DONE") } func TestSkipNotExistsTable(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.1") < 0 { t.Skip("TestSkipNotExistsTable too small time between `SELECT DISTINCT partition_id` and `ALTER TABLE ... FREEZE PARTITION`") } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) defer env.ch.Close() - log.Info("Check skip not exist errors") + log.Debug("Check skip not exist errors") env.queryWithNoError(r, "CREATE DATABASE freeze_not_exists") ifNotExistsCreateSQL := "CREATE TABLE IF NOT EXISTS freeze_not_exists.freeze_not_exists (id UInt64) ENGINE=MergeTree() ORDER BY id" ifNotExistsInsertSQL := "INSERT INTO freeze_not_exists.freeze_not_exists SELECT number FROM numbers(1000)" @@ -1143,7 +1142,9 @@ func TestSkipNotExistsTable(t *testing.T) { freezeErrorHandled := false pauseChannel := make(chan int64) resumeChannel := make(chan int64) - env.ch.Config.LogSQLQueries = true + if os.Getenv("TEST_LOG_LEVEL") == "debug" { + env.ch.Config.LogSQLQueries = true + } wg := sync.WaitGroup{} wg.Add(2) go func() { @@ -1160,15 +1161,15 @@ func TestSkipNotExistsTable(t *testing.T) { err = env.ch.Query(ifNotExistsInsertSQL) r.NoError(err) if i < 5 { - log.Infof("pauseChannel <- %d", 0) + log.Debugf("pauseChannel <- %d", 0) pauseChannel <- 0 } else { - log.Infof("pauseChannel <- %d", pause/i) + log.Debugf("pauseChannel <- %d", pause/i) pauseChannel <- pause / i } startTime := time.Now() out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "LOG_LEVEL=debug CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create --table freeze_not_exists.freeze_not_exists "+testBackupName) - log.Info(out) + log.Debug(out) if (err != nil && (strings.Contains(out, "can't freeze") || strings.Contains(out, "no tables for backup"))) || (err == nil && !strings.Contains(out, "can't freeze")) { parseTime := func(line string) time.Time { @@ -1201,9 +1202,9 @@ func TestSkipNotExistsTable(t *testing.T) { if strings.Contains(out, "code: 60") && err == nil { freezeErrorHandled = true - log.Info("CODE 60 catched") + log.Debug("CODE 60 catched") <-resumeChannel - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName) break } if err == nil { @@ -1219,11 +1220,11 @@ func TestSkipNotExistsTable(t *testing.T) { wg.Done() }() for pause := range pauseChannel { - log.Infof("%d <- pauseChannel", pause) + log.Debugf("%d <- pauseChannel", pause) if pause > 0 { pauseStart := time.Now() time.Sleep(time.Duration(pause) * time.Nanosecond) - log.Infof("pause=%s pauseStart=%s", time.Duration(pause).String(), pauseStart.String()) + log.Debugf("pause=%s pauseStart=%s", time.Duration(pause).String(), pauseStart.String()) err = env.ch.DropTable(clickhouse.Table{Database: "freeze_not_exists", Name: "freeze_not_exists"}, ifNotExistsCreateSQL, "", false, chVersion, "") r.NoError(err) } @@ -1246,9 +1247,7 @@ func TestSkipNotExistsTable(t *testing.T) { } func TestSkipTablesAndSkipTableEngines(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) defer env.ch.Close() version, err := env.ch.GetVersion(context.Background()) @@ -1272,23 +1271,23 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { r.NoError(env.ch.TurnAnalyzerOnIfNecessary(version, query, allowExperimentalAnalyzer)) } // create - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_table_pattern")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_table_pattern") r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_memory.json")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_memory.json") + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_mv.json") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_live_view.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_window_view.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_table_pattern/metadata/test_skip_tables/test_window_view.json") } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,windowview,liveview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_engines")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_merge_tree.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,windowview,liveview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create skip_engines") + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_merge_tree.json") r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_memory.json")) r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_live_view.json")) } @@ -1296,91 +1295,91 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/skip_engines/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_table_pattern")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_engines")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_table_pattern") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local skip_engines") //upload - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml create test_skip_full_backup") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup") + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json") r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") + env.DockerExecNoError(r, "minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json") } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup") + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json") r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { r.Error(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup") + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json") + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_memory.json") + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") + env.DockerExecNoError(r, "minio", "bash", "-ce", "ls -la /bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(env.DockerExec("minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + env.DockerExecNoError(r, "minio", "ls", "-la", "/bitnami/minio/data/clickhouse/backup/cluster/0/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json") } //download - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml upload test_skip_full_backup") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_merge_tree clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup") r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json") + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json") } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup") + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json") r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { r.Error(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json")) - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "USE_RESUMABLE_STATE=0 clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup") + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_merge_tree.json") + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_memory.json") + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_mv.json") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "ls -la /var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/*inner*.json") if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 0 { - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_live_view.json") } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { - r.NoError(env.DockerExec("clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json")) + env.DockerExecNoError(r, "clickhouse-backup", "ls", "-la", "/var/lib/clickhouse/backup/test_skip_full_backup/metadata/test_skip_tables/test_window_view.json") } //restore @@ -1389,7 +1388,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { } else { env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLES=*.test_memory clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup") result := uint64(0) r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables' AND name!='test_memory'")) expectedTables := uint64(3) @@ -1413,8 +1412,8 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { } else { env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --schema test_skip_full_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --data test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --schema test_skip_full_backup") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "CLICKHOUSE_SKIP_TABLE_ENGINES=memory,materializedview,liveview,windowview clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore --data test_skip_full_backup") result = uint64(0) expectedTables = uint64(2) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") >= 0 { @@ -1431,7 +1430,7 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { } else { env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore test_skip_full_backup") result = uint64(0) r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM system.tables WHERE database='test_skip_tables'")) expectedTables = uint64(4) @@ -1452,14 +1451,12 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { } else { env.queryWithNoError(r, "DROP DATABASE test_skip_tables") } - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup") } func TestTablePatterns(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) defer env.ch.Close() @@ -1472,7 +1469,7 @@ func TestTablePatterns(t *testing.T) { fullCleanup(t, r, env, []string{testBackupName}, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml") generateTestData(t, r, env, "S3", defaultTestData) if createPattern { - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) out, err := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) r.NoError(err) r.Contains(out, dbNameOrdinaryTest) @@ -1482,7 +1479,7 @@ func TestTablePatterns(t *testing.T) { r.Contains(out, dbNameOrdinaryTest) r.NotContains(out, dbNameAtomicTest) } else { - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create_remote", testBackupName) out, err := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "tables", testBackupName) r.NoError(err) r.Contains(out, dbNameOrdinaryTest) @@ -1493,13 +1490,13 @@ func TestTablePatterns(t *testing.T) { r.Contains(out, dbNameAtomicTest) } - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", testBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", testBackupName) dropDatabasesFromTestDataDataSet(t, r, env, databaseList) if restorePattern { - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", "--tables", " "+dbNameOrdinaryTest+".*", testBackupName) } else { - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", testBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore_remote", testBackupName) } restored := uint64(0) @@ -1534,9 +1531,7 @@ func TestProjections(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") == -1 { t.Skipf("Test skipped, PROJECTION available only 21.8+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) defer env.ch.Close() @@ -1545,15 +1540,15 @@ func TestProjections(t *testing.T) { r.NoError(err) env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number DAY, number FROM numbers(5)") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "test_backup_projection_full")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_full")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create_remote", "test_backup_projection_full") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_full") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_full") env.queryWithNoError(r, "INSERT INTO default.table_with_projection SELECT today() - INTERVAL number WEEK, number FROM numbers(5)") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create_remote", "--diff-from-remote", "test_backup_projection_full", "test_backup_projection_increment")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_increment")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create_remote", "--diff-from-remote", "test_backup_projection_full", "test_backup_projection_increment") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore_remote", "--rm", "test_backup_projection_increment") var counts uint64 r.NoError(env.ch.SelectSingleRowNoCtx(&counts, "SELECT count() FROM default.table_with_projection")) @@ -1567,10 +1562,10 @@ func TestProjections(t *testing.T) { err = env.ch.Query("DROP TABLE default.table_with_projection NO DELAY") r.NoError(err) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_increment")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_increment") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment") } func TestCheckSystemPartsColumns(t *testing.T) { @@ -1579,9 +1574,7 @@ func TestCheckSystemPartsColumns(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "23.3") == -1 { t.Skipf("Test skipped, system.parts_columns have inconsistency only in 23.3+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) defer env.ch.Close() version, err = env.ch.GetVersion(context.Background()) @@ -1597,8 +1590,8 @@ func TestCheckSystemPartsColumns(t *testing.T) { env.queryWithNoError(r, "ALTER TABLE "+t.Name()+".test_system_parts_columns MODIFY COLUMN dt Nullable(DateTime('Europe/Moscow')), MODIFY COLUMN v Nullable(UInt64), MODIFY COLUMN e Enum16('test2'=1, 'test'=2)", t.Name()) env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number, 'test2' FROM numbers(10)") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "local", "test_system_parts_columns")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_system_parts_columns") r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_system_parts_columns"}, createSQL, "", false, version, "")) @@ -1611,7 +1604,7 @@ func TestCheckSystemPartsColumns(t *testing.T) { if err != nil { errStr := strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "code: 524") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) - t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + log.Debugf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number FROM numbers(10)") r.Error(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns")) @@ -1628,9 +1621,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { t.Skip("Skipping Advanced integration tests...") return } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) backupNames := make([]string, 5) @@ -1643,10 +1634,10 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { generateTestData(t, r, env, "S3", defaultTestData) for backupNumber, backupName := range backupNames { if backupNumber == 0 { - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote %s", backupName))) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote %s", backupName)) } else { incrementData = generateIncrementTestData(t, r, env, "S3", incrementData, backupNumber) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[backupNumber-1], backupName))) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", fmt.Sprintf("BACKUPS_TO_KEEP_REMOTE=3 CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create_remote --diff-from-remote=%s %s", backupNames[backupNumber-1], backupName)) } } out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") @@ -1654,10 +1645,10 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { // shall not delete any backup, cause all deleted backups have links as required in other backups for _, backupName := range backupNames { r.Contains(out, backupName) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", backupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", backupName) } latestIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-1) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", latestIncrementBackup)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", latestIncrementBackup) out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml list local") r.NoError(err) prevIncrementBackup := fmt.Sprintf("keep_remote_backup_%d", len(backupNames)-2) @@ -1670,7 +1661,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { r.NotContains(out, backupName) } } - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", latestIncrementBackup)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", latestIncrementBackup) var res uint64 r.NoError(env.ch.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name()))) r.Equal(uint64(100+20*4), res) @@ -1683,9 +1674,7 @@ func TestSyncReplicaTimeout(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.11") == -1 { t.Skipf("Test skipped, SYNC REPLICA ignore receive_timeout for %s version", os.Getenv("CLICKHOUSE_VERSION")) } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 2*time.Second) defer env.ch.Close() @@ -1710,10 +1699,10 @@ func TestSyncReplicaTimeout(t *testing.T) { env.queryWithNoError(r, "INSERT INTO "+t.Name()+".repl1 SELECT number FROM numbers(100)") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".repl*", "test_not_synced_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_not_synced_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_not_synced_backup")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_not_synced_backup")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".repl*", "test_not_synced_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_not_synced_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_not_synced_backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_not_synced_backup") env.queryWithNoError(r, "SYSTEM START REPLICATED SENDS "+t.Name()+".repl1") env.queryWithNoError(r, "SYSTEM START FETCHES "+t.Name()+".repl2") @@ -1726,11 +1715,9 @@ func TestGetPartitionId(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "19.17") == -1 { t.Skipf("Test skipped, is_in_partition_key not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } - t.Parallel() - r := require.New(t) - ch := NewTestEnvironment(t, r) - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer ch.ch.Close() + env, r := NewTestEnvironment(t) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) + defer env.ch.Close() type testData struct { CreateTableSQL string @@ -1782,11 +1769,11 @@ func TestGetPartitionId(t *testing.T) { "", }, } - if isAtomic, _ := ch.ch.IsAtomic("default"); !isAtomic { + if isAtomic, _ := env.ch.IsAtomic("default"); !isAtomic { testCases[0].CreateTableSQL = strings.Replace(testCases[0].CreateTableSQL, "UUID 'b45e751f-6c06-42a3-ab4a-f5bb9ac3716e'", "", 1) } for _, tc := range testCases { - partitionId, partitionName, err := partition.GetPartitionIdAndName(context.Background(), ch.ch, tc.Database, tc.Table, tc.CreateTableSQL, tc.Partition) + partitionId, partitionName, err := partition.GetPartitionIdAndName(context.Background(), env.ch, tc.Database, tc.Table, tc.CreateTableSQL, tc.Partition) assert.NoError(t, err) assert.Equal(t, tc.ExpectedId, partitionId) assert.Equal(t, tc.ExpectedName, partitionName) @@ -1794,9 +1781,7 @@ func TestGetPartitionId(t *testing.T) { } func TestRestoreMutationInProgress(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 5*time.Second) defer env.ch.Close() zkPath := "/clickhouse/tables/{shard}/" + t.Name() + "/test_restore_mutation_in_progress" @@ -1822,7 +1807,7 @@ func TestRestoreMutationInProgress(t *testing.T) { if err != nil { errStr := strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) - t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + log.Debugf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } attrs := make([]struct { @@ -1840,19 +1825,19 @@ func TestRestoreMutationInProgress(t *testing.T) { r.NotEqual(nil, err) errStr = strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout")) - t.Logf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + log.Debugf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } - r.NoError(env.DockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical")) + env.DockerExecNoError(r, "clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical") // backup with check consistency out, createErr := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") r.NotEqual(createErr, nil) r.Contains(out, "have inconsistent data types") - t.Log(out) + log.Debug(out) // backup without check consistency out, createErr = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "-c", "/etc/clickhouse-backup/config-s3.yml", "--skip-check-parts-columns", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") - t.Log(out) + log.Debug(out) r.NoError(createErr) r.NotContains(out, "have inconsistent data types") @@ -1902,22 +1887,20 @@ func TestRestoreMutationInProgress(t *testing.T) { if expectedSelectError != "" { r.Error(selectErr) r.Contains(strings.ToLower(selectErr.Error()), expectedSelectError) - t.Logf("%s RETURN EXPECTED ERROR=%#v", selectSQL, selectErr) + log.Debugf("%s RETURN EXPECTED ERROR=%#v", selectSQL, selectErr) } else { r.NoError(selectErr) } - r.NoError(env.DockerExec("clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations FORMAT Vertical")) + env.DockerExecNoError(r, "clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations FORMAT Vertical") r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version, "")) r.NoError(env.dropDatabase(t.Name())) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_restore_mutation_in_progress")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_restore_mutation_in_progress") } func TestInnerTablesMaterializedView(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) defer env.ch.Close() @@ -1928,7 +1911,7 @@ func TestInnerTablesMaterializedView(t *testing.T) { env.queryWithNoError(r, "CREATE MATERIALIZED VIEW test_mv.mv_with_dst TO test_mv.dst_table AS SELECT v FROM test_mv.src_table") env.queryWithNoError(r, "INSERT INTO test_mv.src_table SELECT number FROM numbers(100)") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*") dropSQL := "DROP DATABASE test_mv" isAtomic, err := env.ch.IsAtomic("test_mv") r.NoError(err) @@ -1938,7 +1921,7 @@ func TestInnerTablesMaterializedView(t *testing.T) { env.queryWithNoError(r, dropSQL) var rowCnt uint64 - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*") r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) r.Equal(uint64(100), rowCnt) r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) @@ -1946,65 +1929,63 @@ func TestInnerTablesMaterializedView(t *testing.T) { r.NoError(env.dropDatabase("test_mv")) // https://github.com/Altinity/clickhouse-backup/issues/777 - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_mv", "--delete-source", "--tables=test_mv.mv_with*,test_mv.dst*")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "upload", "test_mv", "--delete-source", "--tables=test_mv.mv_with*,test_mv.dst*") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "download", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mv", "--tables=test_mv.mv_with*,test_mv.dst*") r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_inner")) r.Equal(uint64(100), rowCnt) r.NoError(env.ch.SelectSingleRowNoCtx(&rowCnt, "SELECT count() FROM test_mv.mv_with_dst")) r.Equal(uint64(100), rowCnt) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_mv")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_mv") } func TestFIPS(t *testing.T) { if os.Getenv("QA_AWS_ACCESS_KEY") == "" { t.Skip("QA_AWS_ACCESS_KEY is empty, TestFIPS will skip") } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) defer env.ch.Close() fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) - r.NoError(env.DockerExec("clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list")) + env.DockerExecNoError(r, "clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list") env.InstallDebIfNotExists(r, "clickhouse", "ca-certificates", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") - r.NoError(env.DockerExec("clickhouse", "update-ca-certificates")) + env.DockerExecNoError(r, "clickhouse", "update-ca-certificates") r.NoError(env.DockerCP("config-s3-fips.yml", "clickhouse:/etc/clickhouse-backup/config.yml.fips-template")) - r.NoError(env.DockerExec("clickhouse", "git", "clone", "--depth", "1", "--branch", "v3.2rc3", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl")) - r.NoError(env.DockerExec("clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh")) + env.DockerExecNoError(r, "clickhouse", "git", "clone", "--depth", "1", "--branch", "v3.2rc3", "https://github.com/drwetter/testssl.sh.git", "/opt/testssl") + env.DockerExecNoError(r, "clickhouse", "chmod", "+x", "/opt/testssl/testssl.sh") generateCerts := func(certType, keyLength, curveType string) { - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl rand -out /root/.rnd 2048")) + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", "openssl rand -out /root/.rnd 2048") switch certType { case "rsa": - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/ca-key.pem %s", keyLength))) - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/server-key.pem %s", keyLength))) + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/ca-key.pem %s", keyLength)) + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", fmt.Sprintf("openssl genrsa -out /etc/clickhouse-backup/server-key.pem %s", keyLength)) case "ecdsa": - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/ca-key.pem", curveType))) - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/server-key.pem", curveType))) + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/ca-key.pem", curveType)) + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", fmt.Sprintf("openssl ecparam -name %s -genkey -out /etc/clickhouse-backup/server-key.pem", curveType)) } - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/O=altinity\" -x509 -new -nodes -key /etc/clickhouse-backup/ca-key.pem -sha256 -days 365000 -out /etc/clickhouse-backup/ca-cert.pem")) - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl req -subj \"/CN=localhost\" -addext \"subjectAltName = DNS:localhost,DNS:*.cluster.local\" -new -key /etc/clickhouse-backup/server-key.pem -out /etc/clickhouse-backup/server-req.csr")) - r.NoError(env.DockerExec("clickhouse", "bash", "-xce", "openssl x509 -req -days 365000 -extensions SAN -extfile <(printf \"\\n[SAN]\\nsubjectAltName=DNS:localhost,DNS:*.cluster.local\") -in /etc/clickhouse-backup/server-req.csr -out /etc/clickhouse-backup/server-cert.pem -CA /etc/clickhouse-backup/ca-cert.pem -CAkey /etc/clickhouse-backup/ca-key.pem -CAcreateserial")) + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", "openssl req -subj \"/O=altinity\" -x509 -new -nodes -key /etc/clickhouse-backup/ca-key.pem -sha256 -days 365000 -out /etc/clickhouse-backup/ca-cert.pem") + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", "openssl req -subj \"/CN=localhost\" -addext \"subjectAltName = DNS:localhost,DNS:*.cluster.local\" -new -key /etc/clickhouse-backup/server-key.pem -out /etc/clickhouse-backup/server-req.csr") + env.DockerExecNoError(r, "clickhouse", "bash", "-xce", "openssl x509 -req -days 365000 -extensions SAN -extfile <(printf \"\\n[SAN]\\nsubjectAltName=DNS:localhost,DNS:*.cluster.local\") -in /etc/clickhouse-backup/server-req.csr -out /etc/clickhouse-backup/server-cert.pem -CA /etc/clickhouse-backup/ca-cert.pem -CAkey /etc/clickhouse-backup/ca-key.pem -CAcreateserial") } - r.NoError(env.DockerExec("clickhouse", "bash", "-xec", "cat /etc/clickhouse-backup/config-s3-fips.yml.template | envsubst > /etc/clickhouse-backup/config-s3-fips.yml")) + env.DockerExecNoError(r, "clickhouse", "bash", "-xec", "cat /etc/clickhouse-backup/config-s3-fips.yml.template | envsubst > /etc/clickhouse-backup/config-s3-fips.yml") generateCerts("rsa", "4096", "") env.queryWithNoError(r, "CREATE DATABASE "+t.Name()) createSQL := "CREATE TABLE " + t.Name() + ".fips_table (v UInt64) ENGINE=MergeTree() ORDER BY tuple()" env.queryWithNoError(r, createSQL) env.queryWithNoError(r, "INSERT INTO "+t.Name()+".fips_table SELECT number FROM numbers(1000)") - r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml create_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) - r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) - r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml restore_remote --tables="+t.Name()+".fips_table "+fipsBackupName)) - r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName)) - r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete remote "+fipsBackupName)) - - log.Info("Run `clickhouse-backup-fips server` in background") - r.NoError(env.DockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) + env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml create_remote --tables="+t.Name()+".fips_table "+fipsBackupName) + env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName) + env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml restore_remote --tables="+t.Name()+".fips_table "+fipsBackupName) + env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName) + env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete remote "+fipsBackupName) + + log.Debug("Run `clickhouse-backup-fips server` in background") + env.DockerExecNoError(r, "-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") time.Sleep(1 * time.Second) runClickHouseClientInsertSystemBackupActions(r, env, []string{fmt.Sprintf("create_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) @@ -2022,16 +2003,16 @@ func TestFIPS(t *testing.T) { fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, )) r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) - r.NoError(env.DockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) + env.DockerExecNoError(r, "clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips") testTLSCerts := func(certType, keyLength, curveName string, cipherList ...string) { generateCerts(certType, keyLength, curveName) - log.Infof("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) - r.NoError(env.DockerExec("-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log")) + log.Debugf("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) + env.DockerExecNoError(r, "-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") time.Sleep(1 * time.Second) - r.NoError(env.DockerExec("clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7172")) - r.NoError(env.DockerExec("clickhouse", "cat", "/tmp/testssl.csv")) + env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7172") + env.DockerExecNoError(r, "clickhouse", "cat", "/tmp/testssl.csv") out, err := env.DockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("grep -o -E '%s' /tmp/testssl.csv | uniq | wc -l", strings.Join(cipherList, "|"))) r.NoError(err) r.Equal(strconv.Itoa(len(cipherList)), strings.Trim(out, " \t\r\n")) @@ -2045,7 +2026,7 @@ func TestFIPS(t *testing.T) { fmt.Sprintf("%%%s%%", fipsBackupName), status.InProgressStatus, status.ErrorStatus, )) r.Equal(0, len(inProgressActions), "inProgressActions=%+v", inProgressActions) - r.NoError(env.DockerExec("clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips")) + env.DockerExecNoError(r, "clickhouse", "pkill", "-n", "-f", "clickhouse-backup-fips") } // https://www.perplexity.ai/search/0920f1e8-59ec-4e14-b779-ba7b2e037196 testTLSCerts("rsa", "4096", "", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-GCM-SHA384", "AES_128_GCM_SHA256", "AES_256_GCM_SHA384") @@ -2060,12 +2041,10 @@ func TestIntegrationS3Glacier(t *testing.T) { t.Skip("Skipping GLACIER integration tests...") return } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) r.NoError(env.DockerCP("config-s3-glacier.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml.s3glacier-template")) env.InstallDebIfNotExists(r, "clickhouse-backup", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config-s3-glacier.yml")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config-s3-glacier.yml") dockerExecTimeout = 60 * time.Minute env.runMainIntegrationScenario(t, "GLACIER", "config-s3-glacier.yml") dockerExecTimeout = 3 * time.Minute @@ -2076,16 +2055,12 @@ func TestIntegrationAzure(t *testing.T) { t.Skip("Skipping Azure integration tests...") return } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, _ := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") } func TestIntegrationS3(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.checkObjectStorageIsEmpty(t, r, "S3") env.runMainIntegrationScenario(t, "S3", "config-s3.yml") } @@ -2095,9 +2070,7 @@ func TestIntegrationGCS(t *testing.T) { t.Skip("Skipping GCS integration tests...") return } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, _ := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS", "config-gcs.yml") } @@ -2106,24 +2079,17 @@ func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { t.Skip("Skipping GCS_EMULATOR integration tests...") return } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, _ := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") } func TestIntegrationSFTPAuthPassword(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, _ := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") } func TestIntegrationFTP(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) - + env, _ := NewTestEnvironment(t) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") } else { @@ -2132,55 +2098,47 @@ func TestIntegrationFTP(t *testing.T) { } func TestIntegrationSFTPAuthKey(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.uploadSSHKeys(r, "clickhouse-backup") env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") } func TestIntegrationCustomKopia(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) + env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list") env.InstallDebIfNotExists(r, "clickhouse-backup", "kopia", "xxd", "bsdmainutils", "parallel") env.runIntegrationCustom(t, r, "kopia") } func TestIntegrationCustomRestic(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t,r) + env, r := NewTestEnvironment(t) env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) + env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic") env.runIntegrationCustom(t, r, "restic") } func TestIntegrationCustomRsync(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t,r) + env, r := NewTestEnvironment(t) env.uploadSSHKeys(r, "clickhouse-backup") env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - r.NoError(env.DockerExec("clickhouse-backup", "update-ca-certificates")) - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq")) + env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "openssh-client", "rsync") env.runIntegrationCustom(t, r, "rsync") } func (env *TestEnvironment) runIntegrationCustom(t *testing.T, r *require.Assertions, customType string) { - r.NoError(env.DockerExec("clickhouse-backup", "mkdir", "-pv", "/custom/"+customType)) + env.DockerExecNoError(r, "clickhouse-backup", "mkdir", "-pv", "/custom/"+customType) r.NoError(env.DockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) env.runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") } @@ -2192,48 +2150,44 @@ func TestIntegrationEmbedded(t *testing.T) { if compareVersion(version, "23.3") < 0 { t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) //CUSTOM backup creates folder in each disk, need to clear - r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/") env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") //@TODO think about how to implements embedded backup for s3_plain disks - //r.NoError(env.DockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/") //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") - //@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053 - //CUSTOM backup create folder in each disk - r.NoError(env.DockerExec("azure", "apk", "add", "tcpdump")) - r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) - r.NoError(env.DockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) - if compareVersion(version, "24.2") >= 0 { - env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") - } - env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") - r.NoError(env.DockerExec("azure", "pkill", "tcpdump")) - r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) + t.Log("@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053") + //env.DockerExecNoError(r, "azure", "apk", "add", "tcpdump") + //r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) + ////CUSTOM backup create folder in each disk + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/") + //if compareVersion(version, "24.2") >= 0 { + // env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + //} + //env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + //env.DockerExecNoError(r, "azure", "pkill", "tcpdump") + //r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) - if compareVersion(version, "24.3") >= 0 { + if compareVersion(version, "23.8") >= 0 { //CUSTOM backup creates folder in each disk, need to clear - r.NoError(env.DockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/")) + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/") env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") } - if compareVersion(version, "23.8") >= 0 { + if compareVersion(version, "24.3") >= 0 { //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") - r.NoError(env.DockerExec("clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml")) + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml") env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") } } func TestRestoreMapping(t *testing.T) { - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) + env, r := NewTestEnvironment(t) env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) defer env.ch.Close() @@ -2262,32 +2216,32 @@ func TestRestoreMapping(t *testing.T) { env.queryWithNoError(r, "CREATE VIEW database1.v1 AS SELECT * FROM database1.t1") env.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") - log.Info("Create backup") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName)) + log.Debug("Create backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName) - log.Info("Restore schema") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName)) + log.Debug("Restore schema") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName) - log.Info("Check result database1") + log.Debug("Check result database1") env.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") checkRecordset(1, 20, "SELECT count() FROM database1.t1") checkRecordset(1, 20, "SELECT count() FROM database1.d1") checkRecordset(1, 20, "SELECT count() FROM database1.mv1") checkRecordset(1, 20, "SELECT count() FROM database1.v1") - log.Info("Drop database1") + log.Debug("Drop database1") r.NoError(env.dropDatabase("database1")) - log.Info("Restore data") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName)) + log.Debug("Restore data") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName) - log.Info("Check result database-2") + log.Debug("Check result database-2") checkRecordset(1, 10, "SELECT count() FROM `database-2`.t3") checkRecordset(1, 10, "SELECT count() FROM `database-2`.d2") checkRecordset(1, 10, "SELECT count() FROM `database-2`.mv2") checkRecordset(1, 10, "SELECT count() FROM `database-2`.v2") - log.Info("Check database1 not exists") + log.Debug("Check database1 not exists") checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1'") fullCleanup(t, r, env, []string{testBackupName}, []string{"local"}, databaseList, true, true, "config-database-mapping.yml") @@ -2298,10 +2252,8 @@ func TestMySQLMaterialized(t *testing.T) { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "22.12") == -1 { t.Skipf("MaterializedMySQL doens't support for clickhouse version %s", os.Getenv("CLICKHOUSE_VERSION")) } - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) - r.NoError(env.DockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl")) + env, r := NewTestEnvironment(t) + env.DockerExecNoError(r, "mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl") env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) defer env.ch.Close() engine := "MaterializedMySQL" @@ -2309,19 +2261,19 @@ func TestMySQLMaterialized(t *testing.T) { engine = "MaterializeMySQL" } env.queryWithNoError(r, fmt.Sprintf("CREATE DATABASE ch_mysql_repl ENGINE=%s('mysql:3306','ch_mysql_repl','root','root')", engine)) - r.NoError(env.DockerExec("mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE TABLE ch_mysql_repl.t1 (id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, s VARCHAR(255)); INSERT INTO ch_mysql_repl.t1(s) VALUES('s1'),('s2'),('s3')")) + env.DockerExecNoError(r, "mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE TABLE ch_mysql_repl.t1 (id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY, s VARCHAR(255)); INSERT INTO ch_mysql_repl.t1(s) VALUES('s1'),('s2'),('s3')") time.Sleep(1 * time.Second) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mysql_materialized")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_mysql_materialized") env.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mysql_materialized")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_mysql_materialized") result := 0 r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_mysql_repl.t1")) r.Equal(3, result, "expect count=3") env.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mysql_materialized")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mysql_materialized") } func TestPostgreSQLMaterialized(t *testing.T) { @@ -2333,11 +2285,9 @@ func TestPostgreSQLMaterialized(t *testing.T) { } t.Skip("FREEZE don't support for MaterializedPostgreSQL, https://github.com/ClickHouse/ClickHouse/issues/32902") - t.Parallel() - r := require.New(t) - env := NewTestEnvironment(t, r) - r.NoError(env.DockerExec("pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root")) - r.NoError(env.DockerExec("pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl")) + env, r := NewTestEnvironment(t) + env.DockerExecNoError(r, "pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root") + env.DockerExecNoError(r, "pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl") env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) defer env.ch.Close() env.queryWithNoError(r, @@ -2352,30 +2302,30 @@ func TestPostgreSQLMaterialized(t *testing.T) { if count > 0 { break } - t.Logf("ch_pgsql_repl contains %d tables, wait 5 seconds", count) + log.Debugf("ch_pgsql_repl contains %d tables, wait 5 seconds", count) time.Sleep(5 * time.Second) } - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_pgsql_materialized")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "test_pgsql_materialized") env.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_pgsql_materialized")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "test_pgsql_materialized") result := 0 r.NoError(env.ch.SelectSingleRowNoCtx(&result, "SELECT count() FROM ch_pgsql_repl.t1")) r.Equal(3, result, "expect count=3") env.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_pgsql_materialized")) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_pgsql_materialized") } func (env *TestEnvironment) uploadSSHKeys(r *require.Assertions, container string) { r.NoError(env.DockerCP("sftp/clickhouse-backup_rsa", container+":/id_rsa")) - r.NoError(env.DockerExec(container, "cp", "-vf", "/id_rsa", "/tmp/id_rsa")) - r.NoError(env.DockerExec(container, "chmod", "-v", "0600", "/tmp/id_rsa")) + env.DockerExecNoError(r, container, "cp", "-vf", "/id_rsa", "/tmp/id_rsa") + env.DockerExecNoError(r, container, "chmod", "-v", "0600", "/tmp/id_rsa") r.NoError(env.DockerCP("sftp/clickhouse-backup_rsa.pub", "sshd:/root/.ssh/authorized_keys")) - r.NoError(env.DockerExec("sshd", "chown", "-v", "root:root", "/root/.ssh/authorized_keys")) - r.NoError(env.DockerExec("sshd", "chmod", "-v", "0600", "/root/.ssh/authorized_keys")) + env.DockerExecNoError(r, "sshd", "chown", "-v", "root:root", "/root/.ssh/authorized_keys") + env.DockerExecNoError(r, "sshd", "chmod", "-v", "0600", "/root/.ssh/authorized_keys") } func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStorageType, backupConfig string) { @@ -2393,34 +2343,34 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora incrementBackupName2 := fmt.Sprintf("%s_increment2_%d", t.Name(), rand.Int()) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} tablesPattern := fmt.Sprintf("*_%s.*", t.Name()) - log.Info("Clean before start") + log.Debug("Clean before start") fullCleanup(t, r, env, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) - r.NoError(env.DockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) + env.DockerExecNoError(r, "minio", "mc", "ls", "local/clickhouse/disk_s3") testData := generateTestData(t, r, env, remoteStorageType, defaultTestData) - r.NoError(env.DockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) + env.DockerExecNoError(r, "minio", "mc", "ls", "local/clickhouse/disk_s3") - log.Info("Create backup") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, testBackupName)) + log.Debug("Create backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, testBackupName) incrementData := generateIncrementTestData(t, r, env, remoteStorageType, defaultIncrementData, 1) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, incrementBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, incrementBackupName) - log.Info("Upload full") + log.Debug("Upload full") uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/%s clickhouse-backup upload --resume %s", remoteStorageType, backupConfig, testBackupName) env.checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) // https://github.com/Altinity/clickhouse-backup/pull/900 if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") >= 0 { - log.Info("create --diff-from-remote backup") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--diff-from-remote", testBackupName, "--tables", tablesPattern, incrementBackupName2)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", incrementBackupName2)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", incrementBackupName2)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName2)) + log.Debug("create --diff-from-remote backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--diff-from-remote", testBackupName, "--tables", tablesPattern, incrementBackupName2) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", incrementBackupName2) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", incrementBackupName2) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName2) } - log.Info("Upload increment") + log.Debug("Upload increment") uploadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s upload %s --diff-from-remote %s --resume", backupConfig, incrementBackupName, testBackupName) env.checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) @@ -2431,30 +2381,30 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) r.NoError(err) r.Equal(2, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") - log.Info("Delete backup") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName)) + log.Debug("Delete backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName) out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) r.NotNil(err) r.Equal("", strings.Trim(out, " \t\r\n"), "expect '0' backup exists in backup directory") dropDatabasesFromTestDataDataSet(t, r, env, databaseList) - log.Info("Download") + log.Debug("Download") replaceStorageDiskNameForReBalance(r, env, remoteStorageType, false) downloadCmd := fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, testBackupName) env.checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) - log.Info("Restore schema") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", testBackupName)) + log.Debug("Restore schema") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", testBackupName) - log.Info("Restore data") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", testBackupName)) + log.Debug("Restore data") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", testBackupName) - log.Info("Full restore with rm") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", testBackupName)) + log.Debug("Full restore with rm") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", testBackupName) - log.Info("Check data") + log.Debug("Check data") for i := range testData { if testData[i].CheckDatabaseOnly { r.NoError(env.checkDatabaseEngine(t, testData[i])) @@ -2468,17 +2418,17 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora // test increment dropDatabasesFromTestDataDataSet(t, r, env, databaseList) - log.Info("Delete backup") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName)) + log.Debug("Delete backup") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName) - log.Info("Download increment") + log.Debug("Download increment") downloadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, incrementBackupName) env.checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) - log.Info("Restore") - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", "--data", incrementBackupName)) + log.Debug("Restore") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", "--data", incrementBackupName) - log.Info("Check increment data") + log.Debug("Check increment data") for i := range testData { testDataItem := testData[i] if isTableSkip(env, testDataItem, true) || testDataItem.IsDictionary { @@ -2497,7 +2447,7 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora } // test end - log.Info("Clean after finish") + log.Debug("Clean after finish") // during download increment, partially downloaded full will clean fullCleanup(t, r, env, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) fullCleanup(t, r, env, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) @@ -2509,15 +2459,15 @@ func (env *TestEnvironment) checkObjectStorageIsEmpty(t *testing.T, r *require.A if remoteStorageType == "AZBLOB" || remoteStorageType == "AZBLOB_EMBEDDED_URL" { t.Log("wait when resolve https://github.com/Azure/Azurite/issues/2362, todo try to use mysql as azurite storage") /* - r.NoError(env.DockerExec("azure", "apk", "add", "jq")) + env.DockerExecNoError(r, "azure", "apk", "add", "jq") checkBlobCollection := func(containerName string, expected string) { out, err := env.DockerExecOut("azure", "sh", "-c", "jq '.collections[] | select(.name == \"$BLOBS_COLLECTION$\") | .data[] | select(.containerName == \""+containerName+"\") | .name' /data/__azurite_db_blob__.json") r.NoError(err) actual := strings.Trim(out, "\n\r\t ") if expected != actual { - r.NoError(env.DockerExec("azure", "sh", "-c", "cat /data/__azurite_db_blob__.json | jq")) - r.NoError(env.DockerExec("azure", "sh", "-c", "stat -c '%y' /data/__azurite_db_blob__.json")) - r.NoError(env.DockerExec("azure", "sh", "-c", "cat /data/debug.log")) + env.DockerExecNoError(r, "azure", "sh", "-c", "cat /data/__azurite_db_blob__.json | jq") + env.DockerExecNoError(r, "azure", "sh", "-c", "stat -c '%y' /data/__azurite_db_blob__.json") + env.DockerExecNoError(r, "azure", "sh", "-c", "cat /data/debug.log") } r.Equal(expected, actual) } @@ -2576,20 +2526,20 @@ func replaceStorageDiskNameForReBalance(r *require.Assertions, env *TestEnvironm origFile := "/etc/clickhouse-server/config.d/" + fileName dstFile := "/var/lib/clickhouse/" + fileName sedCmd := fmt.Sprintf("s/<%s>/<%s>/g; s/<\\/%s>/<\\/%s>/g; s/%s<\\/disk>/%s<\\/disk>/g", oldDisk, newDisk, oldDisk, newDisk, oldDisk, newDisk) - r.NoError(env.DockerExec("clickhouse", "sed", "-i", sedCmd, origFile)) - r.NoError(env.DockerExec("clickhouse", "cp", "-vf", origFile, dstFile)) + env.DockerExecNoError(r, "clickhouse", "sed", "-i", sedCmd, origFile) + env.DockerExecNoError(r, "clickhouse", "cp", "-vf", origFile, dstFile) } if isRebalanced { - r.NoError(env.DockerExec("clickhouse", "bash", "-xc", "cp -aflv -t /var/lib/clickhouse/disks/"+newDisk+"/ /var/lib/clickhouse/disks/"+oldDisk+"/*")) - r.NoError(env.DockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/"+oldDisk+"")) + env.DockerExecNoError(r, "clickhouse", "bash", "-xc", "cp -aflv -t /var/lib/clickhouse/disks/"+newDisk+"/ /var/lib/clickhouse/disks/"+oldDisk+"/*") + env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/"+oldDisk+"") } env.ch.Close() - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "clickhouse")...)) env.connectWithWait(r, 3*time.Second, 1500*time.Millisecond, 3*time.Minute) } func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, backupConfig string) { - log.Info("testBackupSpecifiedPartitions started") + log.Debug("testBackupSpecifiedPartitions started") var err error var out string var result, expectedCount uint64 @@ -2609,9 +2559,9 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes } // check create_remote full > download + partitions > restore --data --partitions > delete local > download > restore --partitions > restore - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create_remote", "--tables="+dbName+".t*", fullBackupName)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", "--partitions="+dbName+".t?:(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create_remote", "--tables="+dbName+".t*", fullBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", "--partitions="+dbName+".t?:(0,'2022-01-02'),(0,'2022-01-03')", fullBackupName) fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" // embedded storage with embedded disks contains object disk files and will download additional data parts if strings.HasPrefix(remoteStorageType, "EMBEDDED") { @@ -2648,14 +2598,14 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes } else { out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) } - t.Log(out) + log.Debug(out) r.NoError(err) r.Contains(out, "DROP PARTITION") // we just replace data in exists table checkRestoredDataWithPartitions(80) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "download", fullBackupName) expectedLines = "17" fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/" + dbName + "/t?/default/" @@ -2677,14 +2627,14 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes r.NotContains(out, "DROP PARTITION") checkRestoredDataWithPartitions(40) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", fullBackupName) checkRestoredDataWithPartitions(80) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", fullBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName) // check create + partitions - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", partitionBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", "--partitions=(0,'2022-01-02'),(0,'2022-01-03')", partitionBackupName) expectedLines = "5" partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { @@ -2698,10 +2648,10 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes out, err = env.DockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") r.NoError(err) r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName) // check create > upload + partitions - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", partitionBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables="+dbName+".t1", partitionBackupName) partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/" + dbName + "/t1/default/" expectedLines = "7" if strings.HasPrefix(remoteStorageType, "EMBEDDED") && !strings.HasSuffix(remoteStorageType, "_URL") { @@ -2715,11 +2665,11 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes out, err = env.DockerExecOut("clickhouse-backup", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") r.NoError(err) r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", "--tables="+dbName+".t1", "--partitions=0-20220102,0-20220103", partitionBackupName)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", "--tables="+dbName+".t1", "--partitions=0-20220102,0-20220103", partitionBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName) // restore partial uploaded - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore_remote", partitionBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore_remote", partitionBackupName) // Check partial restored t1 result = 0 @@ -2743,13 +2693,13 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes r.Equal(expectedCount, result, "expect count=0") // DELETE backup. - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", partitionBackupName)) - r.NoError(env.DockerExec("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName)) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", partitionBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", partitionBackupName) if err = env.dropDatabase(dbName); err != nil { t.Fatal(err) } - log.Info("testBackupSpecifiedPartitions finish") + log.Debug("testBackupSpecifiedPartitions finish") } func (env *TestEnvironment) checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { @@ -2760,7 +2710,7 @@ func (env *TestEnvironment) checkResumeAlreadyProcessed(backupCmd, testBackupNam backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) } out, err := env.DockerExecOut("clickhouse-backup", "bash", "-xce", backupCmd) - log.Info(out) + log.Debug(out) r.NoError(err) if strings.Contains(backupCmd, "--resume") { r.Contains(out, "already processed") @@ -2772,7 +2722,7 @@ func fullCleanup(t *testing.T, r *require.Assertions, env *TestEnvironment, back for _, backupType := range backupTypes { err := env.DockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete "+backupType+" "+backupName) if checkDeleteErr { - r.NoError(err) + r.NoError(err,"checkDeleteErr delete %s %s error: %v", err, backupType, backupName) } } } @@ -2792,7 +2742,7 @@ func fullCleanup(t *testing.T, r *require.Assertions, env *TestEnvironment, back } func generateTestData(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, testData []TestDataStruct) []TestDataStruct { - log.Infof("Generate test data %s with _%s suffix", remoteStorageType, t.Name()) + log.Debugf("Generate test data %s with _%s suffix", remoteStorageType, t.Name()) testData = generateTestDataWithDifferentStoragePolicy(remoteStorageType, 0, 5, testData) for _, data := range testData { if isTableSkip(env, data, false) { @@ -2810,7 +2760,7 @@ func generateTestData(t *testing.T, r *require.Assertions, env *TestEnvironment, } func generateTestDataWithDifferentStoragePolicy(remoteStorageType string, offset, rowsCount int, testData []TestDataStruct) []TestDataStruct { - log.Infof("generateTestDataWithDifferentStoragePolicy remoteStorageType=%s", remoteStorageType) + log.Debugf("generateTestDataWithDifferentStoragePolicy remoteStorageType=%s", remoteStorageType) for databaseName, databaseEngine := range map[string]string{dbNameOrdinary: "Ordinary", dbNameAtomic: "Atomic"} { testDataWithStoragePolicy := TestDataStruct{ Database: databaseName, DatabaseEngine: databaseEngine, @@ -2876,7 +2826,7 @@ func generateTestDataWithDifferentStoragePolicy(remoteStorageType string, offset } func generateIncrementTestData(t *testing.T, r *require.Assertions, ch *TestEnvironment, remoteStorageType string, incrementData []TestDataStruct, incrementNumber int) []TestDataStruct { - log.Infof("Generate increment test data for %s", remoteStorageType) + log.Debugf("Generate increment test data for %s", remoteStorageType) incrementData = generateTestDataWithDifferentStoragePolicy(remoteStorageType, 5*incrementNumber, 5, incrementData) for _, data := range incrementData { if isTableSkip(ch, data, false) { @@ -2888,7 +2838,7 @@ func generateIncrementTestData(t *testing.T, r *require.Assertions, ch *TestEnvi } func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *TestEnvironment, databaseList []string) { - log.Info("Drop all databases") + log.Debug("Drop all databases") for _, db := range databaseList { db = db + "_" + t.Name() r.NoError(ch.dropDatabase(db)) @@ -2910,7 +2860,7 @@ func (env *TestEnvironment) connectWithWait(r *require.Assertions, sleepBefore, if err != nil { r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "ps", "-a")) if out, dockerErr := env.DockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "SELECT version()"); dockerErr == nil { - log.Info(out) + log.Debug(out) } else { log.Warn(out) } @@ -3099,7 +3049,7 @@ func (env *TestEnvironment) checkData(t *testing.T, r *require.Assertions, data assert.NotNil(t, data.Rows) data.Database += "_" + t.Name() data.Name += "_" + t.Name() - log.Infof("Check '%d' rows in '%s.%s'\n", len(data.Rows), data.Database, data.Name) + log.Debugf("Check '%d' rows in '%s.%s'\n", len(data.Rows), data.Database, data.Name) selectSQL := fmt.Sprintf("SELECT * FROM `%s`.`%s` ORDER BY `%s`", data.Database, data.Name, strings.Replace(data.OrderBy, "{test}", t.Name(), -1)) if data.IsFunction && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") == -1 { @@ -3182,11 +3132,11 @@ func (env *TestEnvironment) queryWithNoError(r *require.Assertions, query string r.NoError(err) } -var dockerExecTimeout = 180 * time.Second +var dockerExecTimeout = 600 * time.Second func (env *TestEnvironment) DockerExecBackground(container string, cmd ...string) error { out, err := env.DockerExecBackgroundOut(container, cmd...) - log.Info(out) + log.Debug(out) return err } @@ -3200,9 +3150,14 @@ func (env *TestEnvironment) GetDefaultComposeCommand() []string { return []string{"compose", "-f", path.Join(os.Getenv("CUR_DIR"),os.Getenv("COMPOSE_FILE")), "--progress", "plain", "--project-name", env.ProjectName} } +func (env *TestEnvironment) DockerExecNoError(r *require.Assertions, container string, cmd ...string) { + out, err := env.DockerExecOut(container, cmd...) + r.NoError(err,"%s\n[ERROR]\n%v", out, err) +} + func (env *TestEnvironment) DockerExec(container string, cmd ...string) error { out, err := env.DockerExecOut(container, cmd...) - log.Info(out) + log.Debug(out) return err } @@ -3216,9 +3171,9 @@ func (env *TestEnvironment) DockerCP(src, dst string) error { ctx, cancel := context.WithTimeout(context.Background(), 180*time.Second) dcmd := append(env.GetDefaultComposeCommand(), "cp", src, dst) - log.Infof("docker %s", strings.Join(dcmd, " ")) + log.Debugf("docker %s", strings.Join(dcmd, " ")) out, err := exec.CommandContext(ctx, "docker", dcmd...).CombinedOutput() - log.Info(string(out)) + log.Debug(string(out)) cancel() return err } diff --git a/test/integration/run.sh b/test/integration/run.sh index 3d091565..932b5f01 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -16,6 +16,7 @@ else fi export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" export LOG_LEVEL=${LOG_LEVEL:-info} +export TEST_LOG_LEVEL=${TEST_LOG_LEVEL:-info} if [[ -f "${CUR_DIR}/credentials.json" ]]; then export GCS_TESTS=${GCS_TESTS:-1} @@ -41,9 +42,18 @@ else export COMPOSE_FILE=docker-compose.yml fi -docker-compose -f ${CUR_DIR}/${COMPOSE_FILE} down --remove-orphans + +for project in $(docker compose -f ${CUR_DIR}/${COMPOSE_FILE} ls --all -q); do + docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name ${project} --progress plain down --remove-orphans --volumes +done + docker volume prune -f make clean build-race-docker build-race-fips-docker -go test -parallel ${RUN_PARALLEL:-$(nproc)} -timeout ${TESTS_TIMEOUT:-60m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go +export RUN_PARALLEL=${RUN_PARALLEL:-1} +if [[ "1" == "${RUN_PARALLEL}" ]]; then + docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name all --progress plain up -d +fi + +go test -parallel ${RUN_PARALLEL} -race -timeout ${TEST_TIMEOUT:-60m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" From fd760d3ddc0fb51d897cd0314a496cbfdb723480 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 11:17:50 +0400 Subject: [PATCH 04/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, reformat code and add more detailed error message for failures --- test/integration/integration_test.go | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index ab8cd5e0..b21da1bc 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -34,7 +34,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/utils" ) -//setup log level +// setup log level func init() { log.SetHandler(logcli.New(os.Stdout)) logLevel := "info" @@ -54,7 +54,6 @@ const dbNamePostgreSQL = "pgsql_db" const Issue331Atomic = "_issue331._atomic_" const Issue331Ordinary = "_issue331.ordinary_" - type TestDataStruct struct { Database string DatabaseEngine string @@ -428,10 +427,12 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { t.Parallel() env.ProjectName = strings.ToLower(t.Name()) upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") - r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", upCmd...)) + out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", upCmd...) + r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(upCmd, " "), out, err) t.Cleanup(func() { downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes") - r.NoError(utils.ExecCmd(context.Background(), dockerExecTimeout, "docker", downCmd...)) + out, err = utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) + r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) }) } else { t.Logf("[%s] executing in sequence mode", t.Name()) @@ -2722,7 +2723,7 @@ func fullCleanup(t *testing.T, r *require.Assertions, env *TestEnvironment, back for _, backupType := range backupTypes { err := env.DockerExec("clickhouse-backup", "bash", "-xce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" delete "+backupType+" "+backupName) if checkDeleteErr { - r.NoError(err,"checkDeleteErr delete %s %s error: %v", err, backupType, backupName) + r.NoError(err, "checkDeleteErr delete %s %s error: %v", err, backupType, backupName) } } } @@ -2845,13 +2846,12 @@ func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *T } } - func (env *TestEnvironment) connectWithWait(r *require.Assertions, sleepBefore, pollInterval, timeOut time.Duration) { time.Sleep(sleepBefore) for i := 1; i < 11; i++ { err := env.connect(timeOut.String()) if i == 10 { - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(),"logs", "clickhouse")...)) + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "logs", "clickhouse")...)) out, dockerErr := env.DockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "'SELECT version()'") r.NoError(dockerErr) env.ch.Log.Debug(out) @@ -2884,12 +2884,12 @@ func (env *TestEnvironment) connectWithWait(r *require.Assertions, sleepBefore, } func (env *TestEnvironment) connect(timeOut string) error { - portOut, err := utils.ExecCmdOut(context.Background(), 10*time.Second, "docker", append(env.GetDefaultComposeCommand(), "port", "clickhouse","9000")...) + portOut, err := utils.ExecCmdOut(context.Background(), 10*time.Second, "docker", append(env.GetDefaultComposeCommand(), "port", "clickhouse", "9000")...) if err != nil { log.Error(portOut) log.Fatalf("can't get port for clickhouse: %v", err) } - hostAndPort := strings.Split(strings.Trim(portOut," \r\n\t"),":") + hostAndPort := strings.Split(strings.Trim(portOut, " \r\n\t"), ":") if len(hostAndPort) < 1 { log.Error(portOut) log.Fatalf("invalid port for clickhouse: %v", err) @@ -3147,12 +3147,12 @@ func (env *TestEnvironment) DockerExecBackgroundOut(container string, cmd ...str } func (env *TestEnvironment) GetDefaultComposeCommand() []string { - return []string{"compose", "-f", path.Join(os.Getenv("CUR_DIR"),os.Getenv("COMPOSE_FILE")), "--progress", "plain", "--project-name", env.ProjectName} + return []string{"compose", "-f", path.Join(os.Getenv("CUR_DIR"), os.Getenv("COMPOSE_FILE")), "--progress", "plain", "--project-name", env.ProjectName} } func (env *TestEnvironment) DockerExecNoError(r *require.Assertions, container string, cmd ...string) { out, err := env.DockerExecOut(container, cmd...) - r.NoError(err,"%s\n[ERROR]\n%v", out, err) + r.NoError(err, "%s\n[ERROR]\n%v", out, err) } func (env *TestEnvironment) DockerExec(container string, cmd ...string) error { @@ -3190,7 +3190,6 @@ func (env *TestEnvironment) InstallDebIfNotExists(r *require.Assertions, contain r.NoError(err) } - func toDate(s string) time.Time { result, _ := time.Parse("2006-01-02", s) return result @@ -3241,4 +3240,3 @@ func isTestShouldSkip(envName string) bool { isSkip, _ := map[string]bool{"": true, "0": true, "false": true, "False": true, "1": false, "True": false, "true": false}[os.Getenv(envName)] return isSkip } - From dd505631a8b86ae50b0846e6cfa5485d9a2dfcb3 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 11:42:15 +0400 Subject: [PATCH 05/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix CUR_DIR definition --- .github/workflows/build.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index d085cdde..979c1d10 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -313,8 +313,7 @@ jobs: export COMPOSE_FILE=docker-compose.yml fi - CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" - export CUR_DIR="${CUR_DIR}/test/integration" + export CUR_DIR="$(pwd)/test/integration" export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" go test -parallel ${RUN_PARALLEL} -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage From cfceb6cd6f57f3f5355e9c91a5725958aa224f4e Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 12:07:16 +0400 Subject: [PATCH 06/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix gcs container healthcheck --- test/integration/docker-compose.yml | 4 ++++ test/integration/docker-compose_advanced.yml | 8 +++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 5a3267b9..7a295a29 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -46,6 +46,10 @@ services: - "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080" environment: QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" + healthcheck: + test: nc 127.0.0.1 8080 -z + interval: 1s + retries: 30 azure: image: mcr.microsoft.com/azure-storage/azurite:latest diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 84e8bcf9..26c569d5 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -46,7 +46,6 @@ services: ports: - "9001" -# todo need to reproduce download after upload gcs: image: fsouza/fake-gcs-server:latest hostname: gcs @@ -57,6 +56,11 @@ services: - "mkdir -p /data/altinity-qa-test && mkdir -p /data/${QA_GCS_OVER_S3_BUCKET} && fake-gcs-server -data /data -scheme http -port 8080 -public-host gcs:8080" environment: QA_GCS_OVER_S3_BUCKET: "${QA_GCS_OVER_S3_BUCKET}" + healthcheck: + test: nc 127.0.0.1 8080 -z + interval: 1s + retries: 30 + azure: image: mcr.microsoft.com/azure-storage/azurite:latest @@ -292,6 +296,8 @@ services: condition: service_healthy azure: condition: service_healthy + gcs: + condition: service_healthy # azure_init: # condition: service_completed_successfully From cd85b129e9a05fd00b08ccc0fc3bf36daf842f3c Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 13:53:31 +0400 Subject: [PATCH 07/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix TestLongListRemote --- test/integration/integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index b21da1bc..bceead70 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -665,7 +665,7 @@ func TestLongListRemote(t *testing.T) { env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) defer env.ch.Close() - totalCacheCount := 20 + totalCacheCount := 25 testBackupName := "test_list_remote" for i := 0; i < totalCacheCount; i++ { @@ -3152,7 +3152,7 @@ func (env *TestEnvironment) GetDefaultComposeCommand() []string { func (env *TestEnvironment) DockerExecNoError(r *require.Assertions, container string, cmd ...string) { out, err := env.DockerExecOut(container, cmd...) - r.NoError(err, "%s\n[ERROR]\n%v", out, err) + r.NoError(err, "%s\n\n%s\n[ERROR]\n%v", strings.Join(append(append(env.GetDefaultComposeCommand(), "exec", container), cmd...), " "), out, err) } func (env *TestEnvironment) DockerExec(container string, cmd ...string) error { From cb7ee61488c32e9e61c719d9721d16fd3818f20e Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 14:44:53 +0400 Subject: [PATCH 08/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix TestIntegrationCustomRestic --- test/integration/restic/delete.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/restic/delete.sh b/test/integration/restic/delete.sh index 04449094..cece8080 100755 --- a/test/integration/restic/delete.sh +++ b/test/integration/restic/delete.sh @@ -3,4 +3,4 @@ set -xeuo pipefail CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" source "${CUR_DIR}/init.sh" BACKUP_NAME=$1 -restic forget --tag "${BACKUP_NAME}" --prune +restic forget --tag "${BACKUP_NAME}" --prune --unsafe-allow-remove-all From 200270487ea4433df56b586d0432f821d28b9854 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 15:00:33 +0400 Subject: [PATCH 09/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, measure docker compose up / down time --- test/integration/integration_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index bceead70..f9165740 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -427,12 +427,16 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { t.Parallel() env.ProjectName = strings.ToLower(t.Name()) upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") + upStart := time.Now() out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", upCmd...) r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(upCmd, " "), out, err) + t.Logf("docker compose up time = %s", time.Since(upStart)) t.Cleanup(func() { + downStart := time.Now() downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes") out, err = utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) + t.Logf("docker compose down time = %s", time.Since(downStart)) }) } else { t.Logf("[%s] executing in sequence mode", t.Name()) From 57d879ad2c7436d9f2b12f1324f343226b5eb66c Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 16:11:51 +0400 Subject: [PATCH 10/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix Cannot attach table with UUID 59dbfd98-137f-4f5d-ad99-dee160565ed6, because it was detached but still used by some query --- pkg/clickhouse/clickhouse.go | 2 +- test/integration/integration_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 0a373de5..4aee1b4e 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -802,7 +802,7 @@ func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetad if ch.version <= 21003000 { return fmt.Errorf("your clickhouse-server version doesn't support SYSTEM RESTORE REPLICA statement, use `restore_as_attach: false` in config") } - query := fmt.Sprintf("DETACH TABLE `%s`.`%s`", table.Database, table.Table) + query := fmt.Sprintf("DETACH TABLE `%s`.`%s` SYNC", table.Database, table.Table) if err := ch.Query(query); err != nil { return err } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index f9165740..dd52662f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -433,7 +433,7 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { t.Logf("docker compose up time = %s", time.Since(upStart)) t.Cleanup(func() { downStart := time.Now() - downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes") + downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes","--timeout","1") out, err = utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) t.Logf("docker compose down time = %s", time.Since(downStart)) From 8e60834dd541681a0de425299ce36f435cebb96b Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 17:17:10 +0400 Subject: [PATCH 11/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestLongListRemote --- test/integration/integration_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index dd52662f..a00bead2 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -669,7 +669,7 @@ func TestLongListRemote(t *testing.T) { env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) defer env.ch.Close() - totalCacheCount := 25 + totalCacheCount := 20 testBackupName := "test_list_remote" for i := 0; i < totalCacheCount; i++ { @@ -688,9 +688,9 @@ func TestLongListRemote(t *testing.T) { startCashed := time.Now() env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") - cashedDuration := time.Since(startCashed) + cachedDuration := time.Since(startCashed) - r.Greater(noCacheDuration, cashedDuration) + r.Greater(noCacheDuration, cachedDuration,"noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration) env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) @@ -700,8 +700,8 @@ func TestLongListRemote(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") cacheClearDuration := time.Since(startCacheClear) - r.Greater(cacheClearDuration, cashedDuration) - log.Debugf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cashedDuration.String(), cacheClearDuration.String()) + r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s shall be greater cachedDuration=%s", cacheClearDuration.String(), cachedDuration.String()) + log.Debugf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cachedDuration.String(), cacheClearDuration.String()) testListRemoteAllBackups := make([]string, totalCacheCount) for i := 0; i < totalCacheCount; i++ { From 603da07258f5ffffed92929b785e5bb4a9108a81 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 18:08:21 +0400 Subject: [PATCH 12/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, trick for TestLongListRemote and build.yaml --- .github/workflows/build.yaml | 1 + test/integration/integration_test.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 979c1d10..3578eae7 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -315,6 +315,7 @@ jobs: export CUR_DIR="$(pwd)/test/integration" export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" + docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" pull go test -parallel ${RUN_PARALLEL} -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage env: diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index a00bead2..68face4f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -676,11 +676,11 @@ func TestLongListRemote(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i)) } - env.DockerExecNoError(r, "clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) time.Sleep(2 * time.Second) startFirst := time.Now() + env.DockerExecNoError(r, "clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") noCacheDuration := time.Since(startFirst) @@ -692,11 +692,11 @@ func TestLongListRemote(t *testing.T) { r.Greater(noCacheDuration, cachedDuration,"noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration) - env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) time.Sleep(2 * time.Second) startCacheClear := time.Now() + env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") cacheClearDuration := time.Since(startCacheClear) From 5631af648e5edcd5cb830cdfe1f242e9495761cc Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 18:27:52 +0400 Subject: [PATCH 13/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, tricks for build.yaml --- .github/workflows/build.yaml | 2 +- test/integration/run.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3578eae7..ee464782 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -315,7 +315,7 @@ jobs: export CUR_DIR="$(pwd)/test/integration" export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" - docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" pull + docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" --progress=plain pull go test -parallel ${RUN_PARALLEL} -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage env: diff --git a/test/integration/run.sh b/test/integration/run.sh index 932b5f01..408d17a9 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -55,5 +55,6 @@ if [[ "1" == "${RUN_PARALLEL}" ]]; then docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name all --progress plain up -d fi +docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --progress=plain pull go test -parallel ${RUN_PARALLEL} -race -timeout ${TEST_TIMEOUT:-60m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" From dfb1bc47c8b898a4fb821a368710190dc800ddd9 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 18:36:39 +0400 Subject: [PATCH 14/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, improve startup time for docker-compose_advanced.yml --- test/integration/docker-compose_advanced.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 26c569d5..a45bac05 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -98,8 +98,9 @@ services: - "3306" healthcheck: test: mysqladmin -p=root ping -h localhost - timeout: 20s - retries: 10 + timeout: 10s + interval: 1s + retries: 100 pgsql: image: docker.io/postgres:${PGSQL_VERSION:-latest} @@ -114,7 +115,8 @@ services: command: [ "postgres", "-c", "wal_level=logical" ] healthcheck: test: pg_isready - timeout: 20s + timeout: 10s + interval: 1s retries: 10 zookeeper: From b79a0fbfd2cb293fb4e92d4d15f2c9bd0d17cf4d Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 19:52:28 +0400 Subject: [PATCH 15/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestLongListRemote --- .github/workflows/build.yaml | 2 +- Dockerfile | 2 +- test/integration/integration_test.go | 3 +++ test/integration/run.sh | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ee464782..5f5c9528 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -315,7 +315,7 @@ jobs: export CUR_DIR="$(pwd)/test/integration" export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" - docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" --progress=plain pull + docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" --progress=quiet pull go test -parallel ${RUN_PARALLEL} -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage env: diff --git a/Dockerfile b/Dockerfile index 37e98721..39a951c3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,7 +9,7 @@ RUN rm -fv /etc/apt/sources.list.d/clickhouse.list && \ find /etc/apt/ -type f -name *.list -exec sed -i 's/ru.archive.ubuntu.com/archive.ubuntu.com/g' {} + && \ ( apt-get update || true ) && \ apt-get install -y --no-install-recommends gnupg ca-certificates wget && update-ca-certificates && \ - for srv in "keyserver.ubuntu.com" "pool.sks-keyservers.net" "keys.gnupg.net"; do apt-key adv --keyserver $srv --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E; if [ $? -eq 0 ]; then break; fi; done && \ + for srv in "keyserver.ubuntu.com" "pool.sks-keyservers.net" "keys.gnupg.net"; do host $srv; apt-key adv --keyserver $srv --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E; if [ $? -eq 0 ]; then break; fi; done && \ DISTRIB_CODENAME=$(cat /etc/lsb-release | grep DISTRIB_CODENAME | cut -d "=" -f 2) && \ echo ${DISTRIB_CODENAME} && \ echo "deb https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" > /etc/apt/sources.list.d/golang.list && \ diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 68face4f..c47508cf 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -686,6 +686,9 @@ func TestLongListRemote(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3") + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) + time.Sleep(2 * time.Second) + startCashed := time.Now() env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") cachedDuration := time.Since(startCashed) diff --git a/test/integration/run.sh b/test/integration/run.sh index 408d17a9..ea6a725f 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -55,6 +55,6 @@ if [[ "1" == "${RUN_PARALLEL}" ]]; then docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name all --progress plain up -d fi -docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --progress=plain pull +docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --progress=quiet pull go test -parallel ${RUN_PARALLEL} -race -timeout ${TEST_TIMEOUT:-60m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" From 0644b5e85b165de7af70f1a88771a2f91a0f76fa Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 21:29:51 +0400 Subject: [PATCH 16/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestLongListRemote --- .github/workflows/build.yaml | 6 +++--- pkg/storage/general.go | 8 ++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 5f5c9528..9f0fd3a9 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -272,10 +272,10 @@ jobs: RUN_PARALLEL: 4 GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} - # TEST_LOG_LEVEL: "debug" # options for advanced debug CI/CD - # RUN_TESTS: "TestIntegrationS3" - # LOG_LEVEL: "debug" + RUN_TESTS: "TestLongListRemote" + LOG_LEVEL: "debug" + # TEST_LOG_LEVEL: "debug" # GCS_DEBUG: "true" # SFTP_DEBUG: "true" # AZBLOB_DEBUG: "true" diff --git a/pkg/storage/general.go b/pkg/storage/general.go index 7f3db10b..b802e3ae 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -160,6 +160,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, if err != nil { return nil, err } + cacheMiss := false err = bd.Walk(ctx, "/", false, func(ctx context.Context, o RemoteFile) error { backupName := strings.Trim(o.Name(), "/") if !parseMetadata || (parseMetadataOnly != "" && parseMetadataOnly != backupName) { @@ -231,6 +232,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, } goodBackup := Backup{m, "", mf.LastModified()} listCache[backupName] = goodBackup + cacheMiss = true result = append(result, goodBackup) return nil }) @@ -244,8 +246,10 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, sort.SliceStable(result, func(i, j int) bool { return result[i].UploadDate.Before(result[j].UploadDate) }) - if err = bd.saveMetadataCache(ctx, listCache, result); err != nil { - return nil, fmt.Errorf("bd.saveMetadataCache return error: %v", err) + if cacheMiss { + if err = bd.saveMetadataCache(ctx, listCache, result); err != nil { + return nil, fmt.Errorf("bd.saveMetadataCache return error: %v", err) + } } return result, nil } From 021bca35060adc2c0697fe7e0721917c9845d22f Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 21:46:51 +0400 Subject: [PATCH 17/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestLongListRemote --- .github/workflows/build.yaml | 4 ++-- pkg/storage/general.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9f0fd3a9..973974a0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -273,8 +273,8 @@ jobs: GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} # options for advanced debug CI/CD - RUN_TESTS: "TestLongListRemote" - LOG_LEVEL: "debug" + # RUN_TESTS: "TestLongListRemote" + # LOG_LEVEL: "debug" # TEST_LOG_LEVEL: "debug" # GCS_DEBUG: "true" # SFTP_DEBUG: "true" diff --git a/pkg/storage/general.go b/pkg/storage/general.go index b802e3ae..adea2154 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -246,7 +246,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, sort.SliceStable(result, func(i, j int) bool { return result[i].UploadDate.Before(result[j].UploadDate) }) - if cacheMiss { + if cacheMiss || len(result) < len(listCache) { if err = bd.saveMetadataCache(ctx, listCache, result); err != nil { return nil, fmt.Errorf("bd.saveMetadataCache return error: %v", err) } From 18c6773cbb89563d724329f1cc1f4bd60c9b5516 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 26 Jul 2024 22:11:13 +0400 Subject: [PATCH 18/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestLongListRemote --- test/integration/integration_test.go | 98 ++++++++++++++-------------- 1 file changed, 49 insertions(+), 49 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index c47508cf..c0799ff8 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -450,6 +450,55 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { return &env, r } +// TestLongListRemote - no parallel, cause need to restart minio +func TestLongListRemote(t *testing.T) { + env, r := NewTestEnvironment(t) + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) + defer env.ch.Close() + totalCacheCount := 20 + testBackupName := "test_list_remote" + + for i := 0; i < totalCacheCount; i++ { + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i)) + } + + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) + time.Sleep(2 * time.Second) + + startFirst := time.Now() + env.DockerExecNoError(r, "clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") + noCacheDuration := time.Since(startFirst) + + env.DockerExecNoError(r, "clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3") + + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) + time.Sleep(2 * time.Second) + + startCashed := time.Now() + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") + cachedDuration := time.Since(startCashed) + + r.Greater(noCacheDuration, cachedDuration,"noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration) + + r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) + time.Sleep(2 * time.Second) + + startCacheClear := time.Now() + env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") + cacheClearDuration := time.Since(startCacheClear) + + r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s shall be greater cachedDuration=%s", cacheClearDuration.String(), cachedDuration.String()) + log.Debugf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cachedDuration.String(), cacheClearDuration.String()) + + testListRemoteAllBackups := make([]string, totalCacheCount) + for i := 0; i < totalCacheCount; i++ { + testListRemoteAllBackups[i] = fmt.Sprintf("%s_%d", testBackupName, i) + } + fullCleanup(t, r, env, testListRemoteAllBackups, []string{"remote", "local"}, []string{}, true, true, "config-s3.yml") +} + // TestS3NoDeletePermission - no parallel func TestS3NoDeletePermission(t *testing.T) { if isTestShouldSkip("RUN_ADVANCED_TESTS") { @@ -664,55 +713,6 @@ func TestConfigs(t *testing.T) { } } -// TestLongListRemote - no parallel, cause need to restart minio -func TestLongListRemote(t *testing.T) { - env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer env.ch.Close() - totalCacheCount := 20 - testBackupName := "test_list_remote" - - for i := 0; i < totalCacheCount; i++ { - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", fmt.Sprintf("CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml ALLOW_EMPTY_BACKUPS=true clickhouse-backup create_remote %s_%d", testBackupName, i)) - } - - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) - time.Sleep(2 * time.Second) - - startFirst := time.Now() - env.DockerExecNoError(r, "clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") - noCacheDuration := time.Since(startFirst) - - env.DockerExecNoError(r, "clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3") - - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) - time.Sleep(2 * time.Second) - - startCashed := time.Now() - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") - cachedDuration := time.Since(startCashed) - - r.Greater(noCacheDuration, cachedDuration,"noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration) - - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) - time.Sleep(2 * time.Second) - - startCacheClear := time.Now() - env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") - cacheClearDuration := time.Since(startCacheClear) - - r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s shall be greater cachedDuration=%s", cacheClearDuration.String(), cachedDuration.String()) - log.Debugf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cachedDuration.String(), cacheClearDuration.String()) - - testListRemoteAllBackups := make([]string, totalCacheCount) - for i := 0; i < totalCacheCount; i++ { - testListRemoteAllBackups[i] = fmt.Sprintf("%s_%d", testBackupName, i) - } - fullCleanup(t, r, env, testListRemoteAllBackups, []string{"remote", "local"}, []string{}, true, true, "config-s3.yml") -} - const apiBackupNumber = 5 func TestServerAPI(t *testing.T) { From 2d0b6ba65ec11c81577b24b6455f92a55ebfa907 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 27 Jul 2024 05:54:17 +0400 Subject: [PATCH 19/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, try set MaxIdleConns 0 --- pkg/clickhouse/clickhouse.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 4aee1b4e..b934ebfc 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -65,7 +65,7 @@ func (ch *ClickHouse) Connect() error { }, MaxOpenConns: ch.Config.MaxConnections, ConnMaxLifetime: 0, // don't change it, it related to SYSTEM SHUTDOWN behavior for properly rebuild RBAC lists on 20.4-22.3 - MaxIdleConns: 1, + MaxIdleConns: 0, DialTimeout: timeout, ReadTimeout: timeout, } From 5bfa04602d29c7949d4c9050ab7c197d0a948ce7 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 27 Jul 2024 06:39:25 +0400 Subject: [PATCH 20/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestLongListRemote ;( --- test/integration/integration_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index c0799ff8..cd92bc8a 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -423,8 +423,12 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { ProjectName: "all", } if os.Getenv("RUN_PARALLEL") != "1" { - t.Logf("[%s] executing in parallel mode", t.Name()) - t.Parallel() + if t.Name() != "TestLongListRemote" { + t.Logf("[%s] executing in parallel mode", t.Name()) + t.Parallel() + } else { + t.Logf("[%s] executing in sequence mode", t.Name()) + } env.ProjectName = strings.ToLower(t.Name()) upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") upStart := time.Now() From 6c0db8a56cf04876cd83aeca60c1b79edbc3b32d Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 27 Jul 2024 08:22:52 +0400 Subject: [PATCH 21/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestIntegrationAzure --- test/integration/integration_test.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index cd92bc8a..fd285001 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -434,13 +434,13 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { upStart := time.Now() out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", upCmd...) r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(upCmd, " "), out, err) - t.Logf("docker compose up time = %s", time.Since(upStart)) + t.Logf("%s docker compose up time = %s", t.Name(), time.Since(upStart)) t.Cleanup(func() { downStart := time.Now() downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes","--timeout","1") out, err = utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) - t.Logf("docker compose down time = %s", time.Since(downStart)) + t.Logf("%s docker compose down time = %s", t.Name(), time.Since(downStart)) }) } else { t.Logf("[%s] executing in sequence mode", t.Name()) @@ -3029,7 +3029,7 @@ func (env *TestEnvironment) createTestData(t *testing.T, data TestDataStruct) er batch, err := env.ch.GetConn().PrepareBatch(context.Background(), insertSQL) if err != nil { - return err + return fmt.Errorf("createTestData PrepareBatch error: %v", err) } for _, row := range data.Rows { @@ -3039,10 +3039,14 @@ func (env *TestEnvironment) createTestData(t *testing.T, data TestDataStruct) er insertData[idx] = row[field] } if err = batch.Append(insertData...); err != nil { - return err + return fmt.Errorf("createTestData batch.Append(%#v) error: %v", insertData, err) } } - return batch.Send() + err = batch.Send() + if err != nil { + return fmt.Errorf("createTestData batch.Send() error: %v", err) + } + return err } func (env *TestEnvironment) dropDatabase(database string) (err error) { From 0cbc506641a68d83dfa2734dfa6898f4919722a5 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 27 Jul 2024 08:55:20 +0400 Subject: [PATCH 22/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, improve speed up for docker compose up --- test/integration/docker-compose.yml | 20 ++++++++++--------- test/integration/docker-compose_advanced.yml | 21 +++++++++----------- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 7a295a29..6df31b9b 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -35,7 +35,6 @@ services: volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh -# todo need to reproduce download after upload gcs: image: fsouza/fake-gcs-server:latest hostname: gcs @@ -59,6 +58,8 @@ services: interval: 1s retries: 30 command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] +# environment: +# - AZURITE_DB="mysql://root:root@mysql:3306/azurite_blob" # azure_init: # image: mcr.microsoft.com/azure-cli:latest @@ -83,10 +84,10 @@ services: ZOO_4LW_COMMANDS_WHITELIST: "*" healthcheck: test: bash -c 'if [[ "$$(echo 'ruok' | nc 127.0.0.1 2181)" == "imok" ]]; then exit 0; else exit 1; fi' - interval: 3s + interval: 1s timeout: 2s - retries: 5 - start_period: 2s + retries: 10 + start_period: 1s clickhouse-backup: @@ -99,7 +100,7 @@ services: - sleep infinity healthcheck: test: bash -c "exit 0" - interval: 3s + interval: 1s timeout: 1s retries: 5 start_period: 1s @@ -177,6 +178,7 @@ services: - ./config-ftp.yaml:/etc/clickhouse-backup/config-ftp.yaml - ./config-ftp-old.yaml:/etc/clickhouse-backup/config-ftp-old.yaml - ./config-gcs.yml:/etc/clickhouse-backup/config-gcs.yml + - ./config-gcs-embedded-url.yml:/etc/clickhouse-backup/config-gcs-embedded-url.yml.template - ./config-gcs-custom-endpoint.yml:/etc/clickhouse-backup/config-gcs-custom-endpoint.yml - ./config-s3.yml:/etc/clickhouse-backup/config-s3.yml - ./config-s3-embedded.yml:/etc/clickhouse-backup/config-s3-embedded.yml @@ -208,7 +210,7 @@ services: - "8123" - "9000" # for delve debugger - - "40001" + - "40002" links: - zookeeper - minio @@ -218,10 +220,10 @@ services: - gcs healthcheck: test: clickhouse client -q "SELECT 1" - interval: 3s + interval: 1s timeout: 2s - retries: 30 - start_period: 5s + retries: 60 + start_period: 1s depends_on: zookeeper: condition: service_healthy diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index a45bac05..f00e78bd 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -43,8 +43,6 @@ services: retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh - ports: - - "9001" gcs: image: fsouza/fake-gcs-server:latest @@ -61,7 +59,6 @@ services: interval: 1s retries: 30 - azure: image: mcr.microsoft.com/azure-storage/azurite:latest hostname: devstoreaccount1.blob.azure @@ -97,7 +94,7 @@ services: ports: - "3306" healthcheck: - test: mysqladmin -p=root ping -h localhost + test: bash -ce "exit 0" timeout: 10s interval: 1s retries: 100 @@ -114,7 +111,7 @@ services: - "5432" command: [ "postgres", "-c", "wal_level=logical" ] healthcheck: - test: pg_isready + test: bash -ce "exit 0" timeout: 10s interval: 1s retries: 10 @@ -131,10 +128,10 @@ services: - CLICKHOUSE_GID=0 healthcheck: test: bash -c 'if [[ "$$(echo 'ruok' | nc 127.0.0.1 2181)" == "imok" ]]; then exit 0; else exit 1; fi' - interval: 3s + interval: 1s timeout: 2s - retries: 5 - start_period: 2s + retries: 10 + start_period: 1s clickhouse-backup: @@ -147,7 +144,7 @@ services: - sleep infinity healthcheck: test: bash -c "exit 0" - interval: 3s + interval: 1s timeout: 1s retries: 5 start_period: 1s @@ -283,10 +280,10 @@ services: - gcs healthcheck: test: clickhouse client -q "SELECT 1" - interval: 3s + interval: 1s timeout: 2s - retries: 30 - start_period: 3s + retries: 60 + start_period: 1s depends_on: mysql: condition: service_healthy From c022b41ed69356cff253b6ced53f2a2e5726c090 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 27 Jul 2024 10:18:55 +0400 Subject: [PATCH 23/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug docker compose up --- test/integration/integration_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index fd285001..479d62cc 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -433,6 +433,10 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") upStart := time.Now() out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", upCmd...) + if err != nil { + logs, _ := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", append(env.GetDefaultComposeCommand(),"logs")...) + t.Log(logs) + } r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(upCmd, " "), out, err) t.Logf("%s docker compose up time = %s", t.Name(), time.Since(upStart)) t.Cleanup(func() { From 454baed87be647604da258ce3a60585042fd07da Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 27 Jul 2024 23:28:37 +0400 Subject: [PATCH 24/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug docker compose up --- test/integration/docker-compose.yml | 4 +-- test/integration/docker-compose_advanced.yml | 28 ++++++++++++++++---- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 6df31b9b..e7f4b0ca 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -29,8 +29,8 @@ services: MINIO_ROOT_USER: access_key MINIO_ROOT_PASSWORD: it_is_my_super_secret_key healthcheck: - test: curl -sL http://localhost:9000/ - interval: 3s + test: curl -sL http://localhost:9000/ && ls -la /bitnami/minio/data/clickhouse/ + interval: 1s retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index f00e78bd..2d23ea5b 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -38,8 +38,8 @@ services: MINIO_ROOT_USER: access_key MINIO_ROOT_PASSWORD: it_is_my_super_secret_key healthcheck: - test: curl -sL http://localhost:9000/ - interval: 3s + test: curl -sL http://localhost:9000/ && ls -la /bitnami/minio/data/clickhouse/ + interval: 1s retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh @@ -93,8 +93,10 @@ services: MYSQL_ROOT_PASSWORD: "root" ports: - "3306" + volumes: + - mysql:/var/lib/mysql healthcheck: - test: bash -ce "exit 0" + test: mysqladmin -p=root ping -h localhost timeout: 10s interval: 1s retries: 100 @@ -111,10 +113,12 @@ services: - "5432" command: [ "postgres", "-c", "wal_level=logical" ] healthcheck: - test: bash -ce "exit 0" + test: pg_isready timeout: 10s interval: 1s retries: 10 + volumes: + - pgsql:/var/lib/postgresql zookeeper: image: docker.io/clickhouse/clickhouse-keeper:${CLICKHOUSE_KEEPER_VERSION:-latest-alpine} @@ -304,4 +308,18 @@ services: image: hello-world depends_on: clickhouse-backup: - condition: service_healthy \ No newline at end of file + condition: service_healthy + +volumes: + mysql: + driver: local + driver_opts: + device: tmpfs + type: tmpfs + o: size=250m + pgsql: + driver: local + driver_opts: + device: tmpfs + type: tmpfs + o: size=60m \ No newline at end of file From c73bc8dda2cd369b5c8d675ea216e6bf3496432c Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 11:05:08 +0400 Subject: [PATCH 25/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug LOG_LEVEL, TEST_LOG_LEVEL --- test/integration/integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 479d62cc..891a4dae 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -38,10 +38,10 @@ import ( func init() { log.SetHandler(logcli.New(os.Stdout)) logLevel := "info" - if os.Getenv("LOG_LEVEL") != "" { + if os.Getenv("LOG_LEVEL") != "" && os.Getenv("LOG_LEVEL") != "info" { logLevel = os.Getenv("LOG_LEVEL") } - if os.Getenv("TEST_LOG_LEVEL") != "" { + if os.Getenv("TEST_LOG_LEVEL") != "" && os.Getenv("TEST_LOG_LEVEL") != "info" { logLevel = os.Getenv("TEST_LOG_LEVEL") } log.SetLevelFromString(logLevel) From 16b72ee4e5084196e86adc2f5f149839409a7e78 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 12:43:28 +0400 Subject: [PATCH 26/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, replacte t.Cleanup defer to explicit call of env.Cleanup and increase clickhouse client timeout --- test/integration/integration_test.go | 125 +++++++++++++++------------ 1 file changed, 69 insertions(+), 56 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 891a4dae..3b0b7da9 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -439,13 +439,6 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { } r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(upCmd, " "), out, err) t.Logf("%s docker compose up time = %s", t.Name(), time.Since(upStart)) - t.Cleanup(func() { - downStart := time.Now() - downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes","--timeout","1") - out, err = utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) - r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) - t.Logf("%s docker compose down time = %s", t.Name(), time.Since(downStart)) - }) } else { t.Logf("[%s] executing in sequence mode", t.Name()) } @@ -458,11 +451,20 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { return &env, r } +func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { + downStart := time.Now() + env.ch.Close() + downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes","--timeout","1") + out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) + r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) + t.Logf("%s docker compose down time = %s", t.Name(), time.Since(downStart)) +} + + // TestLongListRemote - no parallel, cause need to restart minio func TestLongListRemote(t *testing.T) { env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) totalCacheCount := 20 testBackupName := "test_list_remote" @@ -505,6 +507,7 @@ func TestLongListRemote(t *testing.T) { testListRemoteAllBackups[i] = fmt.Sprintf("%s_%d", testBackupName, i) } fullCleanup(t, r, env, testListRemoteAllBackups, []string{"remote", "local"}, []string{}, true, true, "config-s3.yml") + env.Cleanup(t, r) } // TestS3NoDeletePermission - no parallel @@ -514,8 +517,7 @@ func TestS3NoDeletePermission(t *testing.T) { return } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 2*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) env.DockerExecNoError(r, "minio", "/bin/minio_nodelete.sh") r.NoError(env.DockerCP("config-s3-nodelete.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) @@ -532,6 +534,7 @@ func TestS3NoDeletePermission(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "list", "remote") env.checkObjectStorageIsEmpty(t, r, "S3") + env.Cleanup(t, r) } // TestRBAC need clickhouse-server restart, no parallel @@ -542,7 +545,7 @@ func TestRBAC(t *testing.T) { } env, r := NewTestEnvironment(t) testRBACScenario := func(config string) { - env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Minute) env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_rbac") env.queryWithNoError(r, "CREATE TABLE default.test_rbac (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") @@ -596,7 +599,7 @@ func TestRBAC(t *testing.T) { env.ch.Close() // r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, append(env.GetDefaultComposeCommand(), "restart", "clickhouse"))) - env.connectWithWait(r, 2*time.Second, 2*time.Second, 8*time.Second) + env.connectWithWait(r, 2*time.Second, 2*time.Second, 1*time.Minute) env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") @@ -646,6 +649,7 @@ func TestRBAC(t *testing.T) { if compareVersion(chVersion, "24.2") >= 0 { testRBACScenario("/etc/clickhouse-backup/config-azblob-embedded-url.yml") } + env.Cleanup(t, r) } // TestConfigs - require direct access to `/etc/clickhouse-backup/`, so executed inside `clickhouse` container @@ -653,7 +657,7 @@ func TestRBAC(t *testing.T) { func TestConfigs(t *testing.T) { env, r := NewTestEnvironment(t) testConfigsScenario := func(config string) { - env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Second) + env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Minute) env.queryWithNoError(r, "DROP TABLE IF EXISTS default.test_configs") env.queryWithNoError(r, "CREATE TABLE default.test_configs (v UInt64) ENGINE=MergeTree() ORDER BY tuple()") @@ -670,7 +674,7 @@ func TestConfigs(t *testing.T) { env.queryWithNoError(r, "SYSTEM RELOAD CONFIG") env.ch.Close() - env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Minute) selectEmptyResultForAggQuery := "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'" var settings string r.NoError(env.ch.SelectSingleRowNoCtx(&settings, selectEmptyResultForAggQuery)) @@ -684,7 +688,7 @@ func TestConfigs(t *testing.T) { r.NoError(env.ch.Query("SYSTEM RELOAD CONFIG")) env.ch.Close() - env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Second) + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Minute) settings = "" r.NoError(env.ch.SelectSingleRowNoCtx(&settings, "SELECT value FROM system.settings WHERE name='empty_result_for_aggregation_by_empty_set'")) @@ -719,16 +723,14 @@ func TestConfigs(t *testing.T) { if compareVersion(chVersion, "24.2") >= 0 { testConfigsScenario("/etc/clickhouse-backup/config-azblob-embedded-url.yml") } + env.Cleanup(t, r) } const apiBackupNumber = 5 func TestServerAPI(t *testing.T) { env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 10*time.Second) - defer func() { - env.ch.Close() - }() + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) fieldTypes := []string{"UInt64", "String", "Int"} env.InstallDebIfNotExists(r, "clickhouse-backup", "curl", "jq") @@ -772,6 +774,7 @@ func TestServerAPI(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "pkill", "-n", "-f", "clickhouse-backup") r.NoError(env.dropDatabase("long_schema")) + env.Cleanup(t, r) } func testAPIRestart(r *require.Assertions, env *TestEnvironment) { @@ -1145,8 +1148,7 @@ func TestSkipNotExistsTable(t *testing.T) { t.Skip("TestSkipNotExistsTable too small time between `SELECT DISTINCT partition_id` and `ALTER TABLE ... FREEZE PARTITION`") } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) log.Debug("Check skip not exist errors") env.queryWithNoError(r, "CREATE DATABASE freeze_not_exists") @@ -1260,12 +1262,12 @@ func TestSkipNotExistsTable(t *testing.T) { } r.NoError(err) t.Log("TestSkipNotExistsTable DONE, ALL OK") + env.Cleanup(t, r) } func TestSkipTablesAndSkipTableEngines(t *testing.T) { env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) version, err := env.ch.GetVersion(context.Background()) r.NoError(err) env.queryWithNoError(r, "CREATE DATABASE test_skip_tables") @@ -1469,12 +1471,12 @@ func TestSkipTablesAndSkipTableEngines(t *testing.T) { } env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete local test_skip_full_backup") env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml delete remote test_skip_full_backup") + env.Cleanup(t, r) } func TestTablePatterns(t *testing.T) { env, r := NewTestEnvironment(t) - env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) testBackupName := "test_backup_patterns" databaseList := []string{dbNameOrdinary, dbNameAtomic} @@ -1540,6 +1542,7 @@ func TestTablePatterns(t *testing.T) { } } env.checkObjectStorageIsEmpty(t, r, "S3") + env.Cleanup(t, r) } func TestProjections(t *testing.T) { @@ -1548,8 +1551,7 @@ func TestProjections(t *testing.T) { t.Skipf("Test skipped, PROJECTION available only 21.8+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml")) err = env.ch.Query("CREATE TABLE default.table_with_projection(dt DateTime, v UInt64, PROJECTION x (SELECT toStartOfMonth(dt) m, sum(v) GROUP BY m)) ENGINE=MergeTree() PARTITION BY toYYYYMMDD(dt) ORDER BY dt") @@ -1582,6 +1584,7 @@ func TestProjections(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "test_backup_projection_full") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "test_backup_projection_increment") + env.Cleanup(t, r) } func TestCheckSystemPartsColumns(t *testing.T) { @@ -1591,8 +1594,7 @@ func TestCheckSystemPartsColumns(t *testing.T) { t.Skipf("Test skipped, system.parts_columns have inconsistency only in 23.3+, current version %s", os.Getenv("CLICKHOUSE_VERSION")) } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) version, err = env.ch.GetVersion(context.Background()) r.NoError(err) @@ -1629,7 +1631,7 @@ func TestCheckSystemPartsColumns(t *testing.T) { r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_system_parts_columns"}, createSQL, "", false, version, "")) r.NoError(env.dropDatabase(t.Name())) - + env.Cleanup(t, r) } func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { @@ -1638,7 +1640,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { return } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 5*time.Second) + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) backupNames := make([]string, 5) for i := 0; i < 5; i++ { @@ -1684,6 +1686,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { fullCleanup(t, r, env, []string{latestIncrementBackup}, []string{"local"}, nil, true, true, "config-s3.yml") fullCleanup(t, r, env, backupNames, []string{"remote"}, databaseList, true, true, "config-s3.yml") env.checkObjectStorageIsEmpty(t, r, "S3") + env.Cleanup(t, r) } func TestSyncReplicaTimeout(t *testing.T) { @@ -1691,8 +1694,7 @@ func TestSyncReplicaTimeout(t *testing.T) { t.Skipf("Test skipped, SYNC REPLICA ignore receive_timeout for %s version", os.Getenv("CLICKHOUSE_VERSION")) } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 2*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 0*time.Millisecond, 1*time.Second, 1*time.Minute) env.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS "+t.Name()) dropReplTables := func() { @@ -1725,6 +1727,7 @@ func TestSyncReplicaTimeout(t *testing.T) { dropReplTables() r.NoError(env.dropDatabase(t.Name())) + env.Cleanup(t, r) } func TestGetPartitionId(t *testing.T) { @@ -1732,8 +1735,7 @@ func TestGetPartitionId(t *testing.T) { t.Skipf("Test skipped, is_in_partition_key not available for %s version", os.Getenv("CLICKHOUSE_VERSION")) } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) type testData struct { CreateTableSQL string @@ -1794,12 +1796,12 @@ func TestGetPartitionId(t *testing.T) { assert.Equal(t, tc.ExpectedId, partitionId) assert.Equal(t, tc.ExpectedName, partitionName) } + env.Cleanup(t, r) } func TestRestoreMutationInProgress(t *testing.T) { env, r := NewTestEnvironment(t) - env.connectWithWait(r, 0*time.Second, 1*time.Second, 5*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) zkPath := "/clickhouse/tables/{shard}/" + t.Name() + "/test_restore_mutation_in_progress" onCluster := "" if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") >= 0 { @@ -1913,12 +1915,12 @@ func TestRestoreMutationInProgress(t *testing.T) { r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "test_restore_mutation_in_progress"}, "", "", false, version, "")) r.NoError(env.dropDatabase(t.Name())) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_restore_mutation_in_progress") + env.Cleanup(t, r) } func TestInnerTablesMaterializedView(t *testing.T) { env, r := NewTestEnvironment(t) - env.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Minute) env.queryWithNoError(r, "CREATE DATABASE test_mv") env.queryWithNoError(r, "CREATE TABLE test_mv.src_table (v UInt64) ENGINE=MergeTree() ORDER BY v") @@ -1956,6 +1958,7 @@ func TestInnerTablesMaterializedView(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mv") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "remote", "test_mv") + env.Cleanup(t, r) } func TestFIPS(t *testing.T) { @@ -1963,8 +1966,7 @@ func TestFIPS(t *testing.T) { t.Skip("QA_AWS_ACCESS_KEY is empty, TestFIPS will skip") } env, r := NewTestEnvironment(t) - env.connectWithWait(r, 1*time.Second, 1*time.Second, 10*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 1*time.Second, 1*time.Second, 1*time.Minute) fipsBackupName := fmt.Sprintf("fips_backup_%d", rand.Int()) env.DockerExecNoError(r, "clickhouse", "rm", "-fv", "/etc/apt/sources.list.d/clickhouse.list") env.InstallDebIfNotExists(r, "clickhouse", "ca-certificates", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git") @@ -2049,7 +2051,7 @@ func TestFIPS(t *testing.T) { testTLSCerts("ecdsa", "", "prime256v1", "ECDHE-ECDSA-AES128-GCM-SHA256", "ECDHE-ECDSA-AES256-GCM-SHA384") r.NoError(env.ch.DropTable(clickhouse.Table{Database: t.Name(), Name: "fips_table"}, createSQL, "", false, 0, "")) r.NoError(env.dropDatabase(t.Name())) - + env.Cleanup(t, r) } func TestIntegrationS3Glacier(t *testing.T) { @@ -2064,6 +2066,7 @@ func TestIntegrationS3Glacier(t *testing.T) { dockerExecTimeout = 60 * time.Minute env.runMainIntegrationScenario(t, "GLACIER", "config-s3-glacier.yml") dockerExecTimeout = 3 * time.Minute + env.Cleanup(t, r) } func TestIntegrationAzure(t *testing.T) { @@ -2071,14 +2074,16 @@ func TestIntegrationAzure(t *testing.T) { t.Skip("Skipping Azure integration tests...") return } - env, _ := NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") + env.Cleanup(t, r) } func TestIntegrationS3(t *testing.T) { env, r := NewTestEnvironment(t) env.checkObjectStorageIsEmpty(t, r, "S3") env.runMainIntegrationScenario(t, "S3", "config-s3.yml") + env.Cleanup(t, r) } func TestIntegrationGCS(t *testing.T) { @@ -2086,8 +2091,9 @@ func TestIntegrationGCS(t *testing.T) { t.Skip("Skipping GCS integration tests...") return } - env, _ := NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS", "config-gcs.yml") + env.Cleanup(t, r) } func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { @@ -2095,28 +2101,32 @@ func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { t.Skip("Skipping GCS_EMULATOR integration tests...") return } - env, _ := NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") + env.Cleanup(t, r) } func TestIntegrationSFTPAuthPassword(t *testing.T) { - env, _ := NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") + env.Cleanup(t, r) } func TestIntegrationFTP(t *testing.T) { - env, _ := NewTestEnvironment(t) + env, r := NewTestEnvironment(t) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") } else { env.runMainIntegrationScenario(t, "FTP", "config-ftp-old.yaml") } + env.Cleanup(t, r) } func TestIntegrationSFTPAuthKey(t *testing.T) { env, r := NewTestEnvironment(t) env.uploadSSHKeys(r, "clickhouse-backup") env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") + env.Cleanup(t, r) } func TestIntegrationCustomKopia(t *testing.T) { @@ -2131,6 +2141,7 @@ func TestIntegrationCustomKopia(t *testing.T) { env.InstallDebIfNotExists(r, "clickhouse-backup", "kopia", "xxd", "bsdmainutils", "parallel") env.runIntegrationCustom(t, r, "kopia") + env.Cleanup(t, r) } func TestIntegrationCustomRestic(t *testing.T) { @@ -2141,6 +2152,7 @@ func TestIntegrationCustomRestic(t *testing.T) { env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic") env.runIntegrationCustom(t, r, "restic") + env.Cleanup(t, r) } func TestIntegrationCustomRsync(t *testing.T) { @@ -2151,6 +2163,7 @@ func TestIntegrationCustomRsync(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "openssh-client", "rsync") env.runIntegrationCustom(t, r, "rsync") + env.Cleanup(t, r) } func (env *TestEnvironment) runIntegrationCustom(t *testing.T, r *require.Assertions, customType string) { @@ -2200,12 +2213,12 @@ func TestIntegrationEmbedded(t *testing.T) { env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") } + env.Cleanup(t, r) } func TestRestoreMapping(t *testing.T) { env, r := NewTestEnvironment(t) - env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) checkRecordset := func(expectedRows int, expectedCount uint64, query string) { result := make([]struct { @@ -2261,6 +2274,7 @@ func TestRestoreMapping(t *testing.T) { checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1'") fullCleanup(t, r, env, []string{testBackupName}, []string{"local"}, databaseList, true, true, "config-database-mapping.yml") + env.Cleanup(t, r) } func TestMySQLMaterialized(t *testing.T) { @@ -2270,8 +2284,7 @@ func TestMySQLMaterialized(t *testing.T) { } env, r := NewTestEnvironment(t) env.DockerExecNoError(r, "mysql", "mysql", "-u", "root", "--password=root", "-v", "-e", "CREATE DATABASE ch_mysql_repl") - env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) engine := "MaterializedMySQL" if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.9") == -1 { engine = "MaterializeMySQL" @@ -2290,6 +2303,7 @@ func TestMySQLMaterialized(t *testing.T) { env.queryWithNoError(r, "DROP DATABASE ch_mysql_repl") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_mysql_materialized") + env.Cleanup(t, r) } func TestPostgreSQLMaterialized(t *testing.T) { @@ -2304,8 +2318,7 @@ func TestPostgreSQLMaterialized(t *testing.T) { env, r := NewTestEnvironment(t) env.DockerExecNoError(r, "pgsql", "bash", "-ce", "echo 'CREATE DATABASE ch_pgsql_repl' | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root") env.DockerExecNoError(r, "pgsql", "bash", "-ce", "echo \"CREATE TABLE t1 (id BIGINT PRIMARY KEY, s VARCHAR(255)); INSERT INTO t1(id, s) VALUES(1,'s1'),(2,'s2'),(3,'s3')\" | PGPASSWORD=root psql -v ON_ERROR_STOP=1 -U root -d ch_pgsql_repl") - env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Second) - defer env.ch.Close() + env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) env.queryWithNoError(r, "CREATE DATABASE ch_pgsql_repl ENGINE=MaterializedPostgreSQL('pgsql:5432','ch_pgsql_repl','root','root') "+ "SETTINGS materialized_postgresql_schema = 'public'", @@ -2332,6 +2345,7 @@ func TestPostgreSQLMaterialized(t *testing.T) { env.queryWithNoError(r, "DROP DATABASE ch_pgsql_repl") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "delete", "local", "test_pgsql_materialized") + env.Cleanup(t, r) } func (env *TestEnvironment) uploadSSHKeys(r *require.Assertions, container string) { @@ -2349,7 +2363,6 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora var err error r := require.New(t) env.connectWithWait(r, 500*time.Millisecond, 1500*time.Millisecond, 1*time.Minute) - defer env.ch.Close() // test for specified partitions backup testBackupSpecifiedPartitions(t, r, env, remoteStorageType, backupConfig) @@ -2551,7 +2564,7 @@ func replaceStorageDiskNameForReBalance(r *require.Assertions, env *TestEnvironm } env.ch.Close() r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "clickhouse")...)) - env.connectWithWait(r, 3*time.Second, 1500*time.Millisecond, 3*time.Minute) + env.connectWithWait(r, 3*time.Second, 1500*time.Millisecond, 1*time.Minute) } func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, backupConfig string) { From a81daacaa5373d454d9c19dafc9c08f14a49fc6c Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 14:05:37 +0400 Subject: [PATCH 27/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix pgsql unhealthy --- test/integration/docker-compose_advanced.yml | 2 +- test/integration/run.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 2d23ea5b..29ce6bae 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -116,7 +116,7 @@ services: test: pg_isready timeout: 10s interval: 1s - retries: 10 + retries: 60 volumes: - pgsql:/var/lib/postgresql diff --git a/test/integration/run.sh b/test/integration/run.sh index ea6a725f..a39499fc 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -44,7 +44,7 @@ fi for project in $(docker compose -f ${CUR_DIR}/${COMPOSE_FILE} ls --all -q); do - docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name ${project} --progress plain down --remove-orphans --volumes + docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name ${project} --progress plain down --remove-orphans --volumes --timeout=1 done docker volume prune -f From debadcc5c9ace5daf9f1b2f1ca9880d54f5bd7f1 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 15:59:29 +0400 Subject: [PATCH 28/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, increase clickhouse timeout for all clickhouse-backup configs --- test/integration/config-azblob.yml | 1 + test/integration/config-custom-kopia.yml | 2 +- test/integration/config-custom-restic.yml | 2 +- test/integration/config-custom-rsync.yml | 2 +- test/integration/config-database-mapping.yml | 2 +- test/integration/config-ftp-old.yaml | 1 + test/integration/config-ftp.yaml | 1 + test/integration/config-gcs-custom-endpoint.yml | 2 +- test/integration/config-gcs.yml | 1 + test/integration/config-s3-fips.yml | 2 +- test/integration/config-s3-nodelete.yml | 2 +- test/integration/config-s3.yml | 2 +- test/integration/config-sftp-auth-key.yaml | 1 + test/integration/config-sftp-auth-password.yaml | 1 + 14 files changed, 14 insertions(+), 8 deletions(-) diff --git a/test/integration/config-azblob.yml b/test/integration/config-azblob.yml index 29ef5be1..8477246f 100644 --- a/test/integration/config-azblob.yml +++ b/test/integration/config-azblob.yml @@ -8,6 +8,7 @@ clickhouse: host: clickhouse port: 9000 restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s azblob: account_name: devstoreaccount1 account_key: Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== diff --git a/test/integration/config-custom-kopia.yml b/test/integration/config-custom-kopia.yml index 8c8f1e3a..8fb991db 100644 --- a/test/integration/config-custom-kopia.yml +++ b/test/integration/config-custom-kopia.yml @@ -15,8 +15,8 @@ clickhouse: username: backup password: meow=& 123?*%# МЯУ sync_replicated_tables: true - timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" + timeout: 60s custom: # all `kopia` uploads are incremental we don't need {{ .diffFromRemote }} upload_command: /custom/kopia/upload.sh {{ .backupName }} diff --git a/test/integration/config-custom-restic.yml b/test/integration/config-custom-restic.yml index 88372f07..69a9bf48 100644 --- a/test/integration/config-custom-restic.yml +++ b/test/integration/config-custom-restic.yml @@ -15,8 +15,8 @@ clickhouse: username: backup password: meow=& 123?*%# МЯУ sync_replicated_tables: true - timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" + timeout: 60s custom: upload_command: /custom/restic/upload.sh {{ .backupName }} {{ .diffFromRemote }} download_command: /custom/restic/download.sh {{ .backupName }} diff --git a/test/integration/config-custom-rsync.yml b/test/integration/config-custom-rsync.yml index 74965d84..b8671975 100644 --- a/test/integration/config-custom-rsync.yml +++ b/test/integration/config-custom-rsync.yml @@ -15,8 +15,8 @@ clickhouse: username: backup password: meow=& 123?*%# МЯУ sync_replicated_tables: true - timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; sql:SYSTEM SHUTDOWN" + timeout: 60s custom: upload_command: /custom/rsync/upload.sh {{ .backupName }} {{ .diffFromRemote }} download_command: /custom/rsync/download.sh {{ .backupName }} diff --git a/test/integration/config-database-mapping.yml b/test/integration/config-database-mapping.yml index 1ae1eb4c..86efe946 100644 --- a/test/integration/config-database-mapping.yml +++ b/test/integration/config-database-mapping.yml @@ -14,8 +14,8 @@ clickhouse: secure: true skip_verify: true sync_replicated_tables: true - timeout: 1s restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s s3: access_key: access_key secret_key: it_is_my_super_secret_key diff --git a/test/integration/config-ftp-old.yaml b/test/integration/config-ftp-old.yaml index 202aaafa..083b3314 100644 --- a/test/integration/config-ftp-old.yaml +++ b/test/integration/config-ftp-old.yaml @@ -13,6 +13,7 @@ clickhouse: secure: true skip_verify: true restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s ftp: address: "ftp:21" username: "test_backup" diff --git a/test/integration/config-ftp.yaml b/test/integration/config-ftp.yaml index 73c92461..8c0da918 100644 --- a/test/integration/config-ftp.yaml +++ b/test/integration/config-ftp.yaml @@ -15,6 +15,7 @@ clickhouse: secure: true skip_verify: true restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s ftp: address: "ftp:21" username: "test_backup" diff --git a/test/integration/config-gcs-custom-endpoint.yml b/test/integration/config-gcs-custom-endpoint.yml index 3f864266..f1f354ae 100644 --- a/test/integration/config-gcs-custom-endpoint.yml +++ b/test/integration/config-gcs-custom-endpoint.yml @@ -17,10 +17,10 @@ clickhouse: secure: true skip_verify: true sync_replicated_tables: true - timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; exec:ls -la /var/lib/clickhouse/access; sql:SYSTEM SHUTDOWN" # restart_command: bash -c 'echo "FAKE RESTART"' backup_mutations: true + timeout: 60s gcs: bucket: altinity-qa-test path: backup/{cluster}/{shard} diff --git a/test/integration/config-gcs.yml b/test/integration/config-gcs.yml index f7101c66..514de3a4 100644 --- a/test/integration/config-gcs.yml +++ b/test/integration/config-gcs.yml @@ -8,6 +8,7 @@ clickhouse: host: clickhouse port: 9000 restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s gcs: bucket: altinity-qa-test path: backup/{cluster}/{shard} diff --git a/test/integration/config-s3-fips.yml b/test/integration/config-s3-fips.yml index 7fe65ff0..f856377b 100644 --- a/test/integration/config-s3-fips.yml +++ b/test/integration/config-s3-fips.yml @@ -17,9 +17,9 @@ clickhouse: secure: true skip_verify: true sync_replicated_tables: true - timeout: 2s restart_command: bash -c 'echo "FAKE RESTART"' backup_mutations: true + timeout: 60s # secrets for `FISP` will provide from `.env` or from GitHub actions secrets s3: access_key: ${QA_AWS_ACCESS_KEY} diff --git a/test/integration/config-s3-nodelete.yml b/test/integration/config-s3-nodelete.yml index b5e093be..601e9c63 100644 --- a/test/integration/config-s3-nodelete.yml +++ b/test/integration/config-s3-nodelete.yml @@ -17,8 +17,8 @@ clickhouse: secure: true skip_verify: true sync_replicated_tables: true - timeout: 1s restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s s3: access_key: nodelete secret_key: nodelete_password diff --git a/test/integration/config-s3.yml b/test/integration/config-s3.yml index c4773eac..18e36504 100644 --- a/test/integration/config-s3.yml +++ b/test/integration/config-s3.yml @@ -20,10 +20,10 @@ clickhouse: secure: true skip_verify: true sync_replicated_tables: true - timeout: 5s restart_command: "sql:SYSTEM RELOAD USERS; sql:SYSTEM RELOAD CONFIG; exec:ls -la /var/lib/clickhouse/access; sql:SYSTEM SHUTDOWN" # restart_command: bash -c 'echo "FAKE RESTART"' backup_mutations: true + timeout: 60s s3: access_key: access_key secret_key: it_is_my_super_secret_key diff --git a/test/integration/config-sftp-auth-key.yaml b/test/integration/config-sftp-auth-key.yaml index d7037c85..89efaaf2 100644 --- a/test/integration/config-sftp-auth-key.yaml +++ b/test/integration/config-sftp-auth-key.yaml @@ -11,6 +11,7 @@ clickhouse: secure: true skip_verify: true restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s sftp: address: "sshd" username: "root" diff --git a/test/integration/config-sftp-auth-password.yaml b/test/integration/config-sftp-auth-password.yaml index 55191d5f..e862b4a0 100644 --- a/test/integration/config-sftp-auth-password.yaml +++ b/test/integration/config-sftp-auth-password.yaml @@ -12,6 +12,7 @@ clickhouse: secure: true skip_verify: true restart_command: bash -c 'echo "FAKE RESTART"' + timeout: 60s sftp: address: "sshd" username: "root" From 2644dbc71d007bf6c08c6fa9f1046745d6e8ffb3 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 17:35:28 +0400 Subject: [PATCH 29/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, RUN_PARALLEL=2 --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 973974a0..9a56ecee 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -269,7 +269,7 @@ jobs: - name: Running integration tests env: - RUN_PARALLEL: 4 + RUN_PARALLEL: 2 GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} # options for advanced debug CI/CD From 54f5b208b912b76de9fddaae121d87f4b44d40b0 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 17:58:40 +0400 Subject: [PATCH 30/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, proper clean for RUN_PARALLEL=1 --- test/integration/integration_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 3b0b7da9..5bb6db20 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -452,12 +452,14 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { } func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { - downStart := time.Now() - env.ch.Close() - downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes","--timeout","1") - out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) - r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) - t.Logf("%s docker compose down time = %s", t.Name(), time.Since(downStart)) + if "1" != os.Getenv("RUN_PARALLEL") { + downStart := time.Now() + env.ch.Close() + downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes", "--timeout", "1") + out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) + r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) + t.Logf("%s docker compose down time = %s", t.Name(), time.Since(downStart)) + } } From 6ad2f45a43166caa11e7c1e298bc01e811269a62 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 18:40:49 +0400 Subject: [PATCH 31/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, try to reduce memory usage --- test/integration/{cluster.xml => clickhouse-config.xml} | 0 test/integration/docker-compose.yml | 2 +- test/integration/docker-compose_advanced.yml | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename test/integration/{cluster.xml => clickhouse-config.xml} (100%) diff --git a/test/integration/cluster.xml b/test/integration/clickhouse-config.xml similarity index 100% rename from test/integration/cluster.xml rename to test/integration/clickhouse-config.xml diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index e7f4b0ca..fe9cf771 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -198,7 +198,7 @@ services: - ./server.key:/etc/clickhouse-server/server.key - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem - ./ssl.xml:/etc/clickhouse-server/config.d/ssl.xml - - ./cluster.xml:/etc/clickhouse-server/config.d/cluster.xml + - ./clickhouse-config.xml:/etc/clickhouse-server/config.d/clickhouse-config.xml - /var/lib/clickhouse - /hdd1_data - /hdd2_data diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 29ce6bae..4ec05dba 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -260,7 +260,7 @@ services: - ./server.key:/etc/clickhouse-server/server.key - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem - ./ssl.xml:/etc/clickhouse-server/config.d/ssl.xml - - ./cluster.xml:/etc/clickhouse-server/config.d/cluster.xml + - ./clickhouse-config.xml:/etc/clickhouse-server/config.d/clickhouse-config.xml - /var/lib/clickhouse - /hdd1_data - /hdd2_data From 1eb37c5e871b2249d9361e66b08c48b8c691a771 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 19:31:57 +0400 Subject: [PATCH 32/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, try increase timeout, wtf why timeout 1 minute is not enugh ;( --- test/integration/integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 5bb6db20..fc1dbfff 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2364,7 +2364,7 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora var out string var err error r := require.New(t) - env.connectWithWait(r, 500*time.Millisecond, 1500*time.Millisecond, 1*time.Minute) + env.connectWithWait(r, 500*time.Millisecond, 1500*time.Millisecond, 3*time.Minute) // test for specified partitions backup testBackupSpecifiedPartitions(t, r, env, remoteStorageType, backupConfig) @@ -2566,7 +2566,7 @@ func replaceStorageDiskNameForReBalance(r *require.Assertions, env *TestEnvironm } env.ch.Close() r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "clickhouse")...)) - env.connectWithWait(r, 3*time.Second, 1500*time.Millisecond, 1*time.Minute) + env.connectWithWait(r, 3*time.Second, 1500*time.Millisecond, 3*time.Minute) } func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, backupConfig string) { From 27c42a06b82bafc176c3d7eeca840532bdacf5a0 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 20:41:16 +0400 Subject: [PATCH 33/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix can't calculate max(bytes_on_disk) --- pkg/clickhouse/clickhouse.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index b934ebfc..d057542c 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -1157,6 +1157,7 @@ func (ch *ClickHouse) CalculateMaxFileSize(ctx context.Context, cfg *config.Conf if !cfg.General.UploadByPart { maxSizeQuery = "SELECT toInt64(max(data_by_disk) * 1.02) AS max_file_size FROM (SELECT disk_name, max(toInt64(bytes_on_disk)) data_by_disk FROM system.parts GROUP BY disk_name)" } + maxSizeQuery += " SETTINGS empty_result_for_aggregation_by_empty_set=0" if err := ch.SelectSingleRow(ctx, &rows, maxSizeQuery); err != nil { return 0, fmt.Errorf("can't calculate max(bytes_on_disk): %v", err) } From 0e19b03c40587725f554c35c27c5493ee7a18d78 Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 28 Jul 2024 21:26:56 +0400 Subject: [PATCH 34/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, RUN_PARALLEL=4 and move TestIntegrationEmbedded to first --- .github/workflows/build.yaml | 2 +- test/integration/integration_test.go | 87 ++++++++++++++-------------- 2 files changed, 45 insertions(+), 44 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 9a56ecee..973974a0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -269,7 +269,7 @@ jobs: - name: Running integration tests env: - RUN_PARALLEL: 2 + RUN_PARALLEL: 4 GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} # options for advanced debug CI/CD diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index fc1dbfff..d91191e0 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2056,6 +2056,50 @@ func TestFIPS(t *testing.T) { env.Cleanup(t, r) } +func TestIntegrationEmbedded(t *testing.T) { + //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) + //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 + version := os.Getenv("CLICKHOUSE_VERSION") + if compareVersion(version, "23.3") < 0 { + t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) + } + env, r := NewTestEnvironment(t) + + //CUSTOM backup creates folder in each disk, need to clear + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/") + env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") + + //@TODO think about how to implements embedded backup for s3_plain disks + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/") + //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") + + t.Log("@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053") + //env.DockerExecNoError(r, "azure", "apk", "add", "tcpdump") + //r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) + ////CUSTOM backup create folder in each disk + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/") + //if compareVersion(version, "24.2") >= 0 { + // env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + //} + //env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + //env.DockerExecNoError(r, "azure", "pkill", "tcpdump") + //r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) + + if compareVersion(version, "23.8") >= 0 { + //CUSTOM backup creates folder in each disk, need to clear + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/") + env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") + } + if compareVersion(version, "24.3") >= 0 { + //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml") + env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") + env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") + } + env.Cleanup(t, r) +} + func TestIntegrationS3Glacier(t *testing.T) { if isTestShouldSkip("GLACIER_TESTS") { t.Skip("Skipping GLACIER integration tests...") @@ -2174,49 +2218,6 @@ func (env *TestEnvironment) runIntegrationCustom(t *testing.T, r *require.Assert env.runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") } -func TestIntegrationEmbedded(t *testing.T) { - //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) - //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 - version := os.Getenv("CLICKHOUSE_VERSION") - if compareVersion(version, "23.3") < 0 { - t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) - } - env, r := NewTestEnvironment(t) - - //CUSTOM backup creates folder in each disk, need to clear - env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/") - env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") - - //@TODO think about how to implements embedded backup for s3_plain disks - //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/") - //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") - - t.Log("@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053") - //env.DockerExecNoError(r, "azure", "apk", "add", "tcpdump") - //r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) - ////CUSTOM backup create folder in each disk - //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/") - //if compareVersion(version, "24.2") >= 0 { - // env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") - //} - //env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") - //env.DockerExecNoError(r, "azure", "pkill", "tcpdump") - //r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) - - if compareVersion(version, "23.8") >= 0 { - //CUSTOM backup creates folder in each disk, need to clear - env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/") - env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") - } - if compareVersion(version, "24.3") >= 0 { - //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 - env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml") - env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") - env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") - } - env.Cleanup(t, r) -} func TestRestoreMapping(t *testing.T) { env, r := NewTestEnvironment(t) From ff5ecbbc65e6a678945362a86a6f6e0c94f33f1b Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 29 Jul 2024 01:01:28 +0400 Subject: [PATCH 35/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestIntegrationAzure --- test/integration/integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index d91191e0..75b632b7 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -3049,7 +3049,7 @@ func (env *TestEnvironment) createTestData(t *testing.T, data TestDataStruct) er batch, err := env.ch.GetConn().PrepareBatch(context.Background(), insertSQL) if err != nil { - return fmt.Errorf("createTestData PrepareBatch error: %v", err) + return fmt.Errorf("createTestData PrepareBatch(%s) error: %v", insertSQL, err) } for _, row := range data.Rows { @@ -3064,7 +3064,7 @@ func (env *TestEnvironment) createTestData(t *testing.T, data TestDataStruct) er } err = batch.Send() if err != nil { - return fmt.Errorf("createTestData batch.Send() error: %v", err) + return fmt.Errorf("createTestData batch.Send(%s) error: %v", insertSQL, err) } return err } From 0e085c1445c0282ee07a5f205cba4c55f0ca4d14 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 29 Jul 2024 23:23:42 +0400 Subject: [PATCH 36/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, use dockerPool instead of up+down, remove partial download metadata for object disk required backup during `restore` --- .github/workflows/build.yaml | 19 ++++- pkg/backup/delete.go | 17 ++++ pkg/backup/download.go | 22 ++--- pkg/backup/restore.go | 8 ++ test/integration/integration_test.go | 120 +++++++++++++++------------ test/integration/run.sh | 49 ++++++++++- 6 files changed, 163 insertions(+), 72 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 973974a0..2f195446 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -295,7 +295,7 @@ jobs: QA_GCS_OVER_S3_SECRET_KEY: ${{ secrets.QA_GCS_OVER_S3_SECRET_KEY }} QA_GCS_OVER_S3_BUCKET: ${{ secrets.QA_GCS_OVER_S3_BUCKET }} run: | - set -x + set -xe echo "CLICKHOUSE_VERSION=${CLICKHOUSE_VERSION}" echo "GCS_TESTS=${GCS_TESTS}" @@ -316,6 +316,23 @@ jobs: export CUR_DIR="$(pwd)/test/integration" export CLICKHOUSE_BACKUP_BIN="$(pwd)/clickhouse-backup/clickhouse-backup-race" docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" --progress=quiet pull + + pids=() + for ((i = 1; i <= RUN_PARALLEL; i++)); do + docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name project${i} --progress plain up -d & + pids+=($!) + done + + + for pid in "${pids[@]}"; do + if wait "$pid"; then + echo "$pid docker compose up successful" + else + echo "$pid the docker compose up failed. Exiting." + exit 1 # Exit with an error code if any command fails + fi + done + go test -parallel ${RUN_PARALLEL} -timeout 60m -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v test/integration/integration_test.go - name: Format integration coverage env: diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 4d87dbf5..0d34a009 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -441,3 +441,20 @@ func (b *Backuper) CleanRemoteBroken(commandId int) error { } return nil } + +func (b *Backuper) cleanPartialRequiredBackup(ctx context.Context, disks []clickhouse.Disk, currentBackupName string) error { + if localBackups, _, err := b.GetLocalBackups(ctx, disks); err == nil { + for _, localBackup := range localBackups { + if localBackup.BackupName != currentBackupName && localBackup.DataSize+localBackup.CompressedSize+localBackup.MetadataSize+localBackup.RBACSize == 0 { + if err = b.RemoveBackupLocal(ctx, localBackup.BackupName, disks); err != nil { + return fmt.Errorf("CleanPartialRequiredBackups %s -> RemoveBackupLocal cleaning error: %v", localBackup.BackupName, err) + } else { + b.log.Infof("CleanPartialRequiredBackups %s deleted", localBackup.BackupName) + } + } + } + } else { + return fmt.Errorf("CleanPartialRequiredBackups -> GetLocalBackups cleaning error: %v", err) + } + return nil +} diff --git a/pkg/backup/download.go b/pkg/backup/download.go index fe2a7ad8..ac563b0c 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -270,26 +270,16 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ //clean partially downloaded requiredBackup if remoteBackup.RequiredBackup != "" { - if localBackups, _, err = b.GetLocalBackups(ctx, disks); err == nil { - for _, localBackup := range localBackups { - if localBackup.BackupName != remoteBackup.BackupName && localBackup.DataSize+localBackup.CompressedSize+localBackup.MetadataSize+localBackup.RBACSize == 0 { - if err = b.RemoveBackupLocal(ctx, localBackup.BackupName, disks); err != nil { - return fmt.Errorf("downloadWithDiff -> RemoveBackupLocal cleaning error: %v", err) - } else { - b.log.Infof("partial required backup %s deleted", localBackup.BackupName) - } - } - } - } else { - return fmt.Errorf("downloadWithDiff -> GetLocalBackups cleaning error: %v", err) + if err = b.cleanPartialRequiredBackup(ctx, disks, remoteBackup.BackupName); err != nil { + return err } } log.WithFields(apexLog.Fields{ - "duration": utils.HumanizeDuration(time.Since(startDownload)), - "download_size": utils.FormatBytes(dataSize + metadataSize + rbacSize + configSize), - "object_disk_size": utils.FormatBytes(backupMetadata.ObjectDiskSize), - "version": backupVersion, + "duration": utils.HumanizeDuration(time.Since(startDownload)), + "download_size": utils.FormatBytes(dataSize + metadataSize + rbacSize + configSize), + "object_disk_size": utils.FormatBytes(backupMetadata.ObjectDiskSize), + "version": backupVersion, }).Info("done") return nil } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 0d4e591c..8e7c81e0 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -223,6 +223,14 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab } } } + + //clean partially downloaded requiredBackup + if backupMetadata.RequiredBackup != "" { + if err = b.cleanPartialRequiredBackup(ctx, disks, backupMetadata.BackupName); err != nil { + return err + } + } + log.WithFields(apexLog.Fields{ "duration": utils.HumanizeDuration(time.Since(startRestore)), "version": backupVersion, diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 75b632b7..f74007ed 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -7,6 +7,7 @@ import ( "context" "encoding/json" "fmt" + pool "github.com/jolestar/go-commons-pool/v2" "math/rand" "os" "os/exec" @@ -16,6 +17,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" @@ -34,6 +36,9 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/utils" ) +var projectId atomic.Uint32 +var dockerPool *pool.ObjectPool + // setup log level func init() { log.SetHandler(logcli.New(os.Stdout)) @@ -45,6 +50,27 @@ func init() { logLevel = os.Getenv("TEST_LOG_LEVEL") } log.SetLevelFromString(logLevel) + + runParallel, isExists := os.LookupEnv("RUN_PARALLEL") + if !isExists { + runParallel = "1" + } + runParallelInt, err := strconv.Atoi(runParallel) + if err != nil { + log.Fatalf("invalid RUN_PARALLEL environment variable value %s", runParallel) + } + + ctx := context.Background() + factory := pool.NewPooledObjectFactorySimple( + func(context.Context) (interface{}, error) { + projectId.Add(1) + env := TestEnvironment{ + ProjectName: fmt.Sprintf("project%d", projectId.Load() % uint32(runParallelInt)), + } + return &env, nil + }) + dockerPool = pool.NewObjectPoolWithDefaultConfig(ctx, factory) + dockerPool.Config.MaxTotal = runParallelInt } const dbNameAtomic = "_test#$.ДБ_atomic_" @@ -414,33 +440,25 @@ var defaultIncrementData = []TestDataStruct{ } func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { - t.Helper() - r := require.New(t) if os.Getenv("COMPOSE_FILE") == "" || os.Getenv("CUR_DIR") == "" { t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } - env := TestEnvironment{ - ProjectName: "all", + t.Helper() + if os.Getenv("RUN_PARALLEL") != "1" /* && t.Name() != "TestLongListRemote" */ { + t.Parallel() } - if os.Getenv("RUN_PARALLEL") != "1" { - if t.Name() != "TestLongListRemote" { - t.Logf("[%s] executing in parallel mode", t.Name()) - t.Parallel() - } else { - t.Logf("[%s] executing in sequence mode", t.Name()) - } - env.ProjectName = strings.ToLower(t.Name()) - upCmd := append(env.GetDefaultComposeCommand(), "up", "-d") - upStart := time.Now() - out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", upCmd...) - if err != nil { - logs, _ := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", append(env.GetDefaultComposeCommand(),"logs")...) - t.Log(logs) - } - r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(upCmd, " "), out, err) - t.Logf("%s docker compose up time = %s", t.Name(), time.Since(upStart)) + + r := require.New(t) + envObj, err := dockerPool.BorrowObject(context.Background()) + if err != nil { + t.Fatalf("dockerPool.BorrowObject retrun error: %v", err) + } + env := envObj.(*TestEnvironment) + + if os.Getenv("RUN_PARALLEL") != "1" /* && t.Name() != "TestLongListRemote" */ { + t.Logf("%s run in parallel mode project=%s", t.Name(), env.ProjectName) } else { - t.Logf("[%s] executing in sequence mode", t.Name()) + t.Logf("%s run in sequence mode project=%s", t.Name(), env.ProjectName) } if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "1.1.54394") <= 0 { @@ -448,18 +466,18 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") } - return &env, r + return env, r } func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { - if "1" != os.Getenv("RUN_PARALLEL") { - downStart := time.Now() - env.ch.Close() - downCmd := append(env.GetDefaultComposeCommand(), "down", "--remove-orphans", "--volumes", "--timeout", "1") - out, err := utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", downCmd...) - r.NoError(err, "%s\n\n%s\n\n[ERROR]\n%v", "docker "+strings.Join(downCmd, " "), out, err) - t.Logf("%s docker compose down time = %s", t.Name(), time.Since(downStart)) + env.ch.Close() + if t.Name() == "TestIntegrationCustomRsync" { + env.DockerExecNoError(r, "sshd", "rm", "-rf", "/root/rsync_backups") } + if err := dockerPool.ReturnObject(context.Background(), env); err != nil { + t.Fatalf("dockerPool.ReturnObject error: %+v", err) + } + } @@ -2120,7 +2138,7 @@ func TestIntegrationAzure(t *testing.T) { t.Skip("Skipping Azure integration tests...") return } - env, r := NewTestEnvironment(t) + env, r :=NewTestEnvironment(t) env.runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") env.Cleanup(t, r) } @@ -2137,7 +2155,7 @@ func TestIntegrationGCS(t *testing.T) { t.Skip("Skipping GCS integration tests...") return } - env, r := NewTestEnvironment(t) + env, r :=NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS", "config-gcs.yml") env.Cleanup(t, r) } @@ -2147,19 +2165,19 @@ func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { t.Skip("Skipping GCS_EMULATOR integration tests...") return } - env, r := NewTestEnvironment(t) + env, r :=NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") env.Cleanup(t, r) } func TestIntegrationSFTPAuthPassword(t *testing.T) { - env, r := NewTestEnvironment(t) + env, r :=NewTestEnvironment(t) env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") env.Cleanup(t, r) } func TestIntegrationFTP(t *testing.T) { - env, r := NewTestEnvironment(t) + env, r :=NewTestEnvironment(t) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") } else { @@ -2370,13 +2388,13 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora testBackupSpecifiedPartitions(t, r, env, remoteStorageType, backupConfig) // main test scenario - testBackupName := fmt.Sprintf("%s_full_%d", t.Name(), rand.Int()) + fullBackupName := fmt.Sprintf("%s_full_%d", t.Name(), rand.Int()) incrementBackupName := fmt.Sprintf("%s_increment_%d", t.Name(), rand.Int()) incrementBackupName2 := fmt.Sprintf("%s_increment2_%d", t.Name(), rand.Int()) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} tablesPattern := fmt.Sprintf("*_%s.*", t.Name()) log.Debug("Clean before start") - fullCleanup(t, r, env, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) + fullCleanup(t, r, env, []string{fullBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) env.DockerExecNoError(r, "minio", "mc", "ls", "local/clickhouse/disk_s3") testData := generateTestData(t, r, env, remoteStorageType, defaultTestData) @@ -2384,26 +2402,26 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora env.DockerExecNoError(r, "minio", "mc", "ls", "local/clickhouse/disk_s3") log.Debug("Create backup") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, testBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, fullBackupName) incrementData := generateIncrementTestData(t, r, env, remoteStorageType, defaultIncrementData, 1) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, incrementBackupName) log.Debug("Upload full") - uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/%s clickhouse-backup upload --resume %s", remoteStorageType, backupConfig, testBackupName) - env.checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) + uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/%s clickhouse-backup upload --resume %s", remoteStorageType, backupConfig, fullBackupName) + env.checkResumeAlreadyProcessed(uploadCmd, fullBackupName, "upload", r, remoteStorageType) // https://github.com/Altinity/clickhouse-backup/pull/900 if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") >= 0 { log.Debug("create --diff-from-remote backup") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--diff-from-remote", testBackupName, "--tables", tablesPattern, incrementBackupName2) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--diff-from-remote", fullBackupName, "--tables", tablesPattern, incrementBackupName2) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", incrementBackupName2) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", incrementBackupName2) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName2) } log.Debug("Upload increment") - uploadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s upload %s --diff-from-remote %s --resume", backupConfig, incrementBackupName, testBackupName) + uploadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s upload %s --diff-from-remote %s --resume", backupConfig, incrementBackupName, fullBackupName) env.checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) backupDir := "/var/lib/clickhouse/backup" @@ -2414,7 +2432,7 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora r.NoError(err) r.Equal(2, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") log.Debug("Delete backup") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName) out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) r.NotNil(err) @@ -2424,17 +2442,17 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora log.Debug("Download") replaceStorageDiskNameForReBalance(r, env, remoteStorageType, false) - downloadCmd := fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, testBackupName) - env.checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) + downloadCmd := fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, fullBackupName) + env.checkResumeAlreadyProcessed(downloadCmd, fullBackupName, "download", r, remoteStorageType) log.Debug("Restore schema") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", testBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", fullBackupName) log.Debug("Restore data") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", testBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", fullBackupName) log.Debug("Full restore with rm") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", testBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", fullBackupName) log.Debug("Check data") for i := range testData { @@ -2451,7 +2469,7 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora dropDatabasesFromTestDataDataSet(t, r, env, databaseList) log.Debug("Delete backup") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", testBackupName) + env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName) log.Debug("Download increment") downloadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, incrementBackupName) @@ -2480,9 +2498,9 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora // test end log.Debug("Clean after finish") - // during download increment, partially downloaded full will clean + // during download increment, partially downloaded full will also clean fullCleanup(t, r, env, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) - fullCleanup(t, r, env, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) + fullCleanup(t, r, env, []string{fullBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) replaceStorageDiskNameForReBalance(r, env, remoteStorageType, true) env.checkObjectStorageIsEmpty(t, r, remoteStorageType) } diff --git a/test/integration/run.sh b/test/integration/run.sh index a39499fc..7b683148 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -43,18 +43,59 @@ else fi +pids=() for project in $(docker compose -f ${CUR_DIR}/${COMPOSE_FILE} ls --all -q); do - docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name ${project} --progress plain down --remove-orphans --volumes --timeout=1 + docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name ${project} --progress plain down --remove-orphans --volumes --timeout=1 & + pids+=($!) +done + +for pid in "${pids[@]}"; do + if wait "$pid"; then + echo "$pid docker compose down successful" + else + echo "$pid docker compose down failed. Exiting." + exit 1 # Exit with an error code if any command fails + fi done docker volume prune -f make clean build-race-docker build-race-fips-docker export RUN_PARALLEL=${RUN_PARALLEL:-1} -if [[ "1" == "${RUN_PARALLEL}" ]]; then - docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name all --progress plain up -d -fi docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --progress=quiet pull + +pids=() +for ((i = 0; i < RUN_PARALLEL; i++)); do + docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name project${i} --progress plain up -d & + pids+=($!) +done + +for pid in "${pids[@]}"; do + if wait "$pid"; then + echo "$pid docker compose up successful" + else + echo "$pid docker compose up failed. Exiting." + exit 1 # Exit with an error code if any command fails + fi +done + go test -parallel ${RUN_PARALLEL} -race -timeout ${TEST_TIMEOUT:-60m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" + +if [[ "1" == "${CLEAN_AFTER:-1}" ]]; then + pids=() + for project in $(docker compose -f ${CUR_DIR}/${COMPOSE_FILE} ls --all -q); do + docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name ${project} --progress plain down --remove-orphans --volumes --timeout=1 & + pids+=($!) + done + + for pid in "${pids[@]}"; do + if wait "$pid"; then + echo "$pid docker compose down successful" + else + echo "$pid docker compose down failed. Exiting." + exit 1 # Exit with an error code if any command fails + fi + done +fi \ No newline at end of file From 27855c0e397baf3d229c017c05132ddacceb4dd7 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 06:38:50 +0400 Subject: [PATCH 37/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, use dockerPool instead of up+down --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2f195446..e63b95ec 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -318,7 +318,7 @@ jobs: docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" --progress=quiet pull pids=() - for ((i = 1; i <= RUN_PARALLEL; i++)); do + for ((i = 0; i < RUN_PARALLEL; i++)); do docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name project${i} --progress plain up -d & pids+=($!) done From 47f00ed8e46aa4d6aab81373ee1697051de8326c Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 10:10:32 +0400 Subject: [PATCH 38/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, use `docker exec` instead of `docker compose exec` --- test/integration/integration_test.go | 36 +++++++++++++++++----------- test/integration/run.sh | 10 ++++++-- 2 files changed, 30 insertions(+), 16 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index f74007ed..873c434b 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -43,7 +43,7 @@ var dockerPool *pool.ObjectPool func init() { log.SetHandler(logcli.New(os.Stdout)) logLevel := "info" - if os.Getenv("LOG_LEVEL") != "" && os.Getenv("LOG_LEVEL") != "info" { + if os.Getenv("LOG_LEVEL") != "" && os.Getenv("LOG_LEVEL") != "info" { logLevel = os.Getenv("LOG_LEVEL") } if os.Getenv("TEST_LOG_LEVEL") != "" && os.Getenv("TEST_LOG_LEVEL") != "info" { @@ -65,7 +65,7 @@ func init() { func(context.Context) (interface{}, error) { projectId.Add(1) env := TestEnvironment{ - ProjectName: fmt.Sprintf("project%d", projectId.Load() % uint32(runParallelInt)), + ProjectName: fmt.Sprintf("project%d", projectId.Load()%uint32(runParallelInt)), } return &env, nil }) @@ -444,7 +444,7 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } t.Helper() - if os.Getenv("RUN_PARALLEL") != "1" /* && t.Name() != "TestLongListRemote" */ { + if os.Getenv("RUN_PARALLEL") != "1" /* && t.Name() != "TestLongListRemote" */ { t.Parallel() } @@ -474,13 +474,19 @@ func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { if t.Name() == "TestIntegrationCustomRsync" { env.DockerExecNoError(r, "sshd", "rm", "-rf", "/root/rsync_backups") } + if t.Name() == "TestIntegrationCustomRestic" { + env.DockerExecNoError(r, "minio", "rm", "-rf", "/bitnami/minio/data/clickhouse/restic") + } + if t.Name() == "TestIntegrationCustomKopia" { + env.DockerExecNoError(r, "minio", "rm", "-rf", "/bitnami/minio/data/clickhouse/kopia") + } + if err := dockerPool.ReturnObject(context.Background(), env); err != nil { t.Fatalf("dockerPool.ReturnObject error: %+v", err) } } - // TestLongListRemote - no parallel, cause need to restart minio func TestLongListRemote(t *testing.T) { env, r := NewTestEnvironment(t) @@ -509,7 +515,7 @@ func TestLongListRemote(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") cachedDuration := time.Since(startCashed) - r.Greater(noCacheDuration, cachedDuration,"noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration) + r.Greater(noCacheDuration, cachedDuration, "noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration) r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) time.Sleep(2 * time.Second) @@ -2138,7 +2144,7 @@ func TestIntegrationAzure(t *testing.T) { t.Skip("Skipping Azure integration tests...") return } - env, r :=NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") env.Cleanup(t, r) } @@ -2155,7 +2161,7 @@ func TestIntegrationGCS(t *testing.T) { t.Skip("Skipping GCS integration tests...") return } - env, r :=NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS", "config-gcs.yml") env.Cleanup(t, r) } @@ -2165,19 +2171,19 @@ func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { t.Skip("Skipping GCS_EMULATOR integration tests...") return } - env, r :=NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") env.Cleanup(t, r) } func TestIntegrationSFTPAuthPassword(t *testing.T) { - env, r :=NewTestEnvironment(t) + env, r := NewTestEnvironment(t) env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") env.Cleanup(t, r) } func TestIntegrationFTP(t *testing.T) { - env, r :=NewTestEnvironment(t) + env, r := NewTestEnvironment(t) if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") } else { @@ -2236,7 +2242,6 @@ func (env *TestEnvironment) runIntegrationCustom(t *testing.T, r *require.Assert env.runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") } - func TestRestoreMapping(t *testing.T) { env, r := NewTestEnvironment(t) env.connectWithWait(r, 500*time.Millisecond, 1*time.Second, 1*time.Minute) @@ -3203,9 +3208,13 @@ func (env *TestEnvironment) GetDefaultComposeCommand() []string { return []string{"compose", "-f", path.Join(os.Getenv("CUR_DIR"), os.Getenv("COMPOSE_FILE")), "--progress", "plain", "--project-name", env.ProjectName} } +func (env *TestEnvironment) GetExecDockerCommand(container string) []string { + return []string{"exec", fmt.Sprintf("%s-%s-1", env.ProjectName, container)} +} + func (env *TestEnvironment) DockerExecNoError(r *require.Assertions, container string, cmd ...string) { out, err := env.DockerExecOut(container, cmd...) - r.NoError(err, "%s\n\n%s\n[ERROR]\n%v", strings.Join(append(append(env.GetDefaultComposeCommand(), "exec", container), cmd...), " "), out, err) + r.NoError(err, "%s\n\n%s\n[ERROR]\n%v", strings.Join(append(env.GetExecDockerCommand(container), cmd...), " "), out, err) } func (env *TestEnvironment) DockerExec(container string, cmd ...string) error { @@ -3215,8 +3224,7 @@ func (env *TestEnvironment) DockerExec(container string, cmd ...string) error { } func (env *TestEnvironment) DockerExecOut(container string, cmd ...string) (string, error) { - dcmd := append(env.GetDefaultComposeCommand(), "exec", container) - dcmd = append(dcmd, cmd...) + dcmd := append(env.GetExecDockerCommand(container), cmd...) return utils.ExecCmdOut(context.Background(), dockerExecTimeout, "docker", dcmd...) } diff --git a/test/integration/run.sh b/test/integration/run.sh index 7b683148..450d041c 100755 --- a/test/integration/run.sh +++ b/test/integration/run.sh @@ -80,10 +80,16 @@ for pid in "${pids[@]}"; do fi done +set +e go test -parallel ${RUN_PARALLEL} -race -timeout ${TEST_TIMEOUT:-60m} -failfast -tags=integration -run "${RUN_TESTS:-.+}" -v ${CUR_DIR}/integration_test.go -go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" +TEST_FAILED=$? +set -e + +if [[ "0" == "${TEST_FAILED}" ]]; then + go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out" +fi -if [[ "1" == "${CLEAN_AFTER:-1}" ]]; then +if [[ "1" == "${CLEAN_AFTER:-0}" || "0" == "${TEST_FAILED}" ]]; then pids=() for project in $(docker compose -f ${CUR_DIR}/${COMPOSE_FILE} ls --all -q); do docker compose -f ${CUR_DIR}/${COMPOSE_FILE} --project-name ${project} --progress plain down --remove-orphans --volumes --timeout=1 & From ca93e21b5860e33bbe45be57e41b9abb822aec3f Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 11:18:29 +0400 Subject: [PATCH 39/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix pool.NewPooledObjectFactorySimple --- test/integration/integration_test.go | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 873c434b..bf8d8785 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -59,16 +59,14 @@ func init() { if err != nil { log.Fatalf("invalid RUN_PARALLEL environment variable value %s", runParallel) } - ctx := context.Background() - factory := pool.NewPooledObjectFactorySimple( - func(context.Context) (interface{}, error) { - projectId.Add(1) + factory := pool.NewPooledObjectFactorySimple( func(context.Context) (interface{}, error) { + id := projectId.Add(1) env := TestEnvironment{ - ProjectName: fmt.Sprintf("project%d", projectId.Load()%uint32(runParallelInt)), + ProjectName: fmt.Sprintf("project%d", id%uint32(runParallelInt)), } return &env, nil - }) + }) dockerPool = pool.NewObjectPoolWithDefaultConfig(ctx, factory) dockerPool.Config.MaxTotal = runParallelInt } @@ -766,7 +764,7 @@ func TestServerAPI(t *testing.T) { fillDatabaseForAPIServer(maxTables, minFields, randFields, env, r, fieldTypes) log.Debug("Run `clickhouse-backup server --watch` in background") - env.DockerExecNoError(r, "-d", "clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log") + env.DockerExecBackgroundNoError(r,"clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log") time.Sleep(1 * time.Second) testAPIBackupVersion(r, env) @@ -2029,7 +2027,7 @@ func TestFIPS(t *testing.T) { env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete remote "+fipsBackupName) log.Debug("Run `clickhouse-backup-fips server` in background") - env.DockerExecNoError(r, "-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") + env.DockerExecBackgroundNoError(r, "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") time.Sleep(1 * time.Second) runClickHouseClientInsertSystemBackupActions(r, env, []string{fmt.Sprintf("create_remote --tables="+t.Name()+".fips_table %s", fipsBackupName)}, true) @@ -3192,6 +3190,11 @@ func (env *TestEnvironment) queryWithNoError(r *require.Assertions, query string var dockerExecTimeout = 600 * time.Second +func (env *TestEnvironment) DockerExecBackgroundNoError(r *require.Assertions, container string, cmd ...string) { + out, err := env.DockerExecBackgroundOut(container, cmd...) + r.NoError(err, "%s\n\n%s\n[ERROR]\n%v", strings.Join(append(append(env.GetDefaultComposeCommand(), "exec", "-d", container), cmd...), " "), out, err) +} + func (env *TestEnvironment) DockerExecBackground(container string, cmd ...string) error { out, err := env.DockerExecBackgroundOut(container, cmd...) log.Debug(out) From cdcadeb4b34c0f7d62f38bdeac655f06d7b96741 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 11:32:44 +0400 Subject: [PATCH 40/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix TestFIPS --- test/integration/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index bf8d8785..36d4efe0 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -2050,7 +2050,7 @@ func TestFIPS(t *testing.T) { testTLSCerts := func(certType, keyLength, curveName string, cipherList ...string) { generateCerts(certType, keyLength, curveName) log.Debugf("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) - env.DockerExecNoError(r, "-d", "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") + env.DockerExecBackgroundNoError(r, "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") time.Sleep(1 * time.Second) env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "rm -rf /tmp/testssl* && /opt/testssl/testssl.sh -e -s -oC /tmp/testssl.csv --color 0 --disable-rating --quiet -n min --mode parallel --add-ca /etc/clickhouse-backup/ca-cert.pem localhost:7172") From 882d92171571ed0c4870f62c65569b204ce1d62d Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 12:44:22 +0400 Subject: [PATCH 41/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix TestIntegrationCustomKopia --- test/integration/kopia/init.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/kopia/init.sh b/test/integration/kopia/init.sh index e00dfd61..edabe293 100755 --- a/test/integration/kopia/init.sh +++ b/test/integration/kopia/init.sh @@ -1,7 +1,7 @@ CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" export KOPIA_PASSWORD_FILE="${CUR_DIR}/password" export KOPIA_S3_BUCKET=clickhouse -export KOPIA_S3_PATH=/clickhouse/kopia/cluster_name/shard_number/ +export KOPIA_S3_PATH=/kopia/cluster_name/shard_number/ export KOPIA_S3_ENDPOINT=minio:9000 export AWS_ACCESS_KEY_ID=access_key export AWS_SECRET_ACCESS_KEY=it_is_my_super_secret_key From 533040c66e00c93d2b7deeab06277c0769046705 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 13:15:02 +0400 Subject: [PATCH 42/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix TestLongListRemote --- test/integration/integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 36d4efe0..91ad528f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -442,7 +442,7 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } t.Helper() - if os.Getenv("RUN_PARALLEL") != "1" /* && t.Name() != "TestLongListRemote" */ { + if os.Getenv("RUN_PARALLEL") != "1" && t.Name() != "TestLongListRemote" { t.Parallel() } @@ -453,7 +453,7 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { } env := envObj.(*TestEnvironment) - if os.Getenv("RUN_PARALLEL") != "1" /* && t.Name() != "TestLongListRemote" */ { + if os.Getenv("RUN_PARALLEL") != "1" && t.Name() != "TestLongListRemote" { t.Logf("%s run in parallel mode project=%s", t.Name(), env.ProjectName) } else { t.Logf("%s run in sequence mode project=%s", t.Name(), env.ProjectName) From cdd84f39882a70986f4bc84a59941573acdc6913 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 14:52:56 +0400 Subject: [PATCH 43/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix TestRBAC and TestConfigs cleanup --- test/integration/integration_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 91ad528f..2200ce5e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -469,6 +469,9 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { env.ch.Close() + if t.Name() == "TestRBAC" || t.Name() == "TestConfigs" { + env.DockerExecNoError(r, "minio", "rm", "-rf", "/bitnami/minio/data/clickhouse/backups_s3") + } if t.Name() == "TestIntegrationCustomRsync" { env.DockerExecNoError(r, "sshd", "rm", "-rf", "/root/rsync_backups") } From c923d297f856b5bef9574c6e3bd1f5aa6574016d Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 16:56:34 +0400 Subject: [PATCH 44/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, fix TestIntegrationEmbedded cleanup --- test/integration/integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2200ce5e..3ac12bdf 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -469,7 +469,7 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { env.ch.Close() - if t.Name() == "TestRBAC" || t.Name() == "TestConfigs" { + if t.Name() == "TestRBAC" || t.Name() == "TestConfigs" || t.Name() == "TestIntegrationEmbedded" { env.DockerExecNoError(r, "minio", "rm", "-rf", "/bitnami/minio/data/clickhouse/backups_s3") } if t.Name() == "TestIntegrationCustomRsync" { @@ -2298,7 +2298,7 @@ func TestRestoreMapping(t *testing.T) { checkRecordset(1, 10, "SELECT count() FROM `database-2`.v2") log.Debug("Check database1 not exists") - checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1'") + checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1' SETTINGS empty_result_for_aggregation_by_empty_set=0") fullCleanup(t, r, env, []string{testBackupName}, []string{"local"}, databaseList, true, true, "config-database-mapping.yml") env.Cleanup(t, r) From 400b1188f7a33d0605b97dc4f7563ba47f48d8eb Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 18:16:10 +0400 Subject: [PATCH 45/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestIntegrationAzure --- test/integration/integration_test.go | 323 ++++++++++++++------------- 1 file changed, 162 insertions(+), 161 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 3ac12bdf..4e5daeaf 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -537,6 +537,168 @@ func TestLongListRemote(t *testing.T) { env.Cleanup(t, r) } +func TestIntegrationAzure(t *testing.T) { + if isTestShouldSkip("AZURE_TESTS") { + t.Skip("Skipping Azure integration tests...") + return + } + env, r := NewTestEnvironment(t) + env.runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") + env.Cleanup(t, r) +} + +func TestIntegrationEmbedded(t *testing.T) { + //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) + //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 + version := os.Getenv("CLICKHOUSE_VERSION") + if compareVersion(version, "23.3") < 0 { + t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) + } + env, r := NewTestEnvironment(t) + + //CUSTOM backup creates folder in each disk, need to clear + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/") + env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") + + //@TODO think about how to implements embedded backup for s3_plain disks + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/") + //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") + + t.Log("@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053") + //env.DockerExecNoError(r, "azure", "apk", "add", "tcpdump") + //r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) + ////CUSTOM backup create folder in each disk + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/") + //if compareVersion(version, "24.2") >= 0 { + // env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + //} + //env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + //env.DockerExecNoError(r, "azure", "pkill", "tcpdump") + //r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) + + if compareVersion(version, "23.8") >= 0 { + //CUSTOM backup creates folder in each disk, need to clear + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/") + env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") + } + if compareVersion(version, "24.3") >= 0 { + //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml") + env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") + env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") + } + env.Cleanup(t, r) +} + +func TestIntegrationS3Glacier(t *testing.T) { + if isTestShouldSkip("GLACIER_TESTS") { + t.Skip("Skipping GLACIER integration tests...") + return + } + env, r := NewTestEnvironment(t) + r.NoError(env.DockerCP("config-s3-glacier.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml.s3glacier-template")) + env.InstallDebIfNotExists(r, "clickhouse-backup", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config-s3-glacier.yml") + dockerExecTimeout = 60 * time.Minute + env.runMainIntegrationScenario(t, "GLACIER", "config-s3-glacier.yml") + dockerExecTimeout = 3 * time.Minute + env.Cleanup(t, r) +} + +func TestIntegrationS3(t *testing.T) { + env, r := NewTestEnvironment(t) + env.checkObjectStorageIsEmpty(t, r, "S3") + env.runMainIntegrationScenario(t, "S3", "config-s3.yml") + env.Cleanup(t, r) +} + +func TestIntegrationGCS(t *testing.T) { + if isTestShouldSkip("GCS_TESTS") { + t.Skip("Skipping GCS integration tests...") + return + } + env, r := NewTestEnvironment(t) + env.runMainIntegrationScenario(t, "GCS", "config-gcs.yml") + env.Cleanup(t, r) +} + +func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { + if isTestShouldSkip("GCS_TESTS") { + t.Skip("Skipping GCS_EMULATOR integration tests...") + return + } + env, r := NewTestEnvironment(t) + env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") + env.Cleanup(t, r) +} + +func TestIntegrationSFTPAuthPassword(t *testing.T) { + env, r := NewTestEnvironment(t) + env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") + env.Cleanup(t, r) +} + +func TestIntegrationFTP(t *testing.T) { + env, r := NewTestEnvironment(t) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { + env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") + } else { + env.runMainIntegrationScenario(t, "FTP", "config-ftp-old.yaml") + } + env.Cleanup(t, r) +} + +func TestIntegrationSFTPAuthKey(t *testing.T) { + env, r := NewTestEnvironment(t) + env.uploadSSHKeys(r, "clickhouse-backup") + env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") + env.Cleanup(t, r) +} + +func TestIntegrationCustomKopia(t *testing.T) { + env, r := NewTestEnvironment(t) + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") + env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") + + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list") + env.InstallDebIfNotExists(r, "clickhouse-backup", "kopia", "xxd", "bsdmainutils", "parallel") + + env.runIntegrationCustom(t, r, "kopia") + env.Cleanup(t, r) +} + +func TestIntegrationCustomRestic(t *testing.T) { + env, r := NewTestEnvironment(t) + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") + env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic") + env.runIntegrationCustom(t, r, "restic") + env.Cleanup(t, r) +} + +func TestIntegrationCustomRsync(t *testing.T) { + env, r := NewTestEnvironment(t) + env.uploadSSHKeys(r, "clickhouse-backup") + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") + env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") + env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "openssh-client", "rsync") + env.runIntegrationCustom(t, r, "rsync") + env.Cleanup(t, r) +} + +func (env *TestEnvironment) runIntegrationCustom(t *testing.T, r *require.Assertions, customType string) { + env.DockerExecNoError(r, "clickhouse-backup", "mkdir", "-pv", "/custom/"+customType) + r.NoError(env.DockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) + env.runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") +} + // TestS3NoDeletePermission - no parallel func TestS3NoDeletePermission(t *testing.T) { if isTestShouldSkip("RUN_ADVANCED_TESTS") { @@ -2081,167 +2243,6 @@ func TestFIPS(t *testing.T) { env.Cleanup(t, r) } -func TestIntegrationEmbedded(t *testing.T) { - //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) - //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 - version := os.Getenv("CLICKHOUSE_VERSION") - if compareVersion(version, "23.3") < 0 { - t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) - } - env, r := NewTestEnvironment(t) - - //CUSTOM backup creates folder in each disk, need to clear - env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/") - env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") - - //@TODO think about how to implements embedded backup for s3_plain disks - //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/") - //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") - - t.Log("@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053") - //env.DockerExecNoError(r, "azure", "apk", "add", "tcpdump") - //r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) - ////CUSTOM backup create folder in each disk - //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/") - //if compareVersion(version, "24.2") >= 0 { - // env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") - //} - //env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") - //env.DockerExecNoError(r, "azure", "pkill", "tcpdump") - //r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) - - if compareVersion(version, "23.8") >= 0 { - //CUSTOM backup creates folder in each disk, need to clear - env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/") - env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") - } - if compareVersion(version, "24.3") >= 0 { - //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 - env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml") - env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") - env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") - } - env.Cleanup(t, r) -} - -func TestIntegrationS3Glacier(t *testing.T) { - if isTestShouldSkip("GLACIER_TESTS") { - t.Skip("Skipping GLACIER integration tests...") - return - } - env, r := NewTestEnvironment(t) - r.NoError(env.DockerCP("config-s3-glacier.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml.s3glacier-template")) - env.InstallDebIfNotExists(r, "clickhouse-backup", "curl", "gettext-base", "bsdmainutils", "dnsutils", "git", "ca-certificates") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config.yml.s3glacier-template | envsubst > /etc/clickhouse-backup/config-s3-glacier.yml") - dockerExecTimeout = 60 * time.Minute - env.runMainIntegrationScenario(t, "GLACIER", "config-s3-glacier.yml") - dockerExecTimeout = 3 * time.Minute - env.Cleanup(t, r) -} - -func TestIntegrationAzure(t *testing.T) { - if isTestShouldSkip("AZURE_TESTS") { - t.Skip("Skipping Azure integration tests...") - return - } - env, r := NewTestEnvironment(t) - env.runMainIntegrationScenario(t, "AZBLOB", "config-azblob.yml") - env.Cleanup(t, r) -} - -func TestIntegrationS3(t *testing.T) { - env, r := NewTestEnvironment(t) - env.checkObjectStorageIsEmpty(t, r, "S3") - env.runMainIntegrationScenario(t, "S3", "config-s3.yml") - env.Cleanup(t, r) -} - -func TestIntegrationGCS(t *testing.T) { - if isTestShouldSkip("GCS_TESTS") { - t.Skip("Skipping GCS integration tests...") - return - } - env, r := NewTestEnvironment(t) - env.runMainIntegrationScenario(t, "GCS", "config-gcs.yml") - env.Cleanup(t, r) -} - -func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { - if isTestShouldSkip("GCS_TESTS") { - t.Skip("Skipping GCS_EMULATOR integration tests...") - return - } - env, r := NewTestEnvironment(t) - env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") - env.Cleanup(t, r) -} - -func TestIntegrationSFTPAuthPassword(t *testing.T) { - env, r := NewTestEnvironment(t) - env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") - env.Cleanup(t, r) -} - -func TestIntegrationFTP(t *testing.T) { - env, r := NewTestEnvironment(t) - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { - env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") - } else { - env.runMainIntegrationScenario(t, "FTP", "config-ftp-old.yaml") - } - env.Cleanup(t, r) -} - -func TestIntegrationSFTPAuthKey(t *testing.T) { - env, r := NewTestEnvironment(t) - env.uploadSSHKeys(r, "clickhouse-backup") - env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") - env.Cleanup(t, r) -} - -func TestIntegrationCustomKopia(t *testing.T) { - env, r := NewTestEnvironment(t) - env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") - env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") - - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "curl -sfL https://kopia.io/signing-key | gpg --dearmor -o /usr/share/keyrings/kopia-keyring.gpg") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ce", "echo 'deb [signed-by=/usr/share/keyrings/kopia-keyring.gpg] https://packages.kopia.io/apt/ stable main' > /etc/apt/sources.list.d/kopia.list") - env.InstallDebIfNotExists(r, "clickhouse-backup", "kopia", "xxd", "bsdmainutils", "parallel") - - env.runIntegrationCustom(t, r, "kopia") - env.Cleanup(t, r) -} - -func TestIntegrationCustomRestic(t *testing.T) { - env, r := NewTestEnvironment(t) - env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") - env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "bzip2", "pgp", "git") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "command -v restic || RELEASE_TAG=$(curl -H 'Accept: application/json' -sL https://github.com/restic/restic/releases/latest | jq -c -r -M '.tag_name'); RELEASE=$(echo ${RELEASE_TAG} | sed -e 's/v//'); curl -sfL \"https://github.com/restic/restic/releases/download/${RELEASE_TAG}/restic_${RELEASE}_linux_amd64.bz2\" | bzip2 -d > /bin/restic; chmod +x /bin/restic") - env.runIntegrationCustom(t, r, "restic") - env.Cleanup(t, r) -} - -func TestIntegrationCustomRsync(t *testing.T) { - env, r := NewTestEnvironment(t) - env.uploadSSHKeys(r, "clickhouse-backup") - env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "curl") - env.DockerExecNoError(r, "clickhouse-backup", "update-ca-certificates") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xce", "command -v yq || curl -sL \"https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$(dpkg --print-architecture)\" -o /usr/bin/yq && chmod +x /usr/bin/yq") - env.InstallDebIfNotExists(r, "clickhouse-backup", "jq", "openssh-client", "rsync") - env.runIntegrationCustom(t, r, "rsync") - env.Cleanup(t, r) -} - -func (env *TestEnvironment) runIntegrationCustom(t *testing.T, r *require.Assertions, customType string) { - env.DockerExecNoError(r, "clickhouse-backup", "mkdir", "-pv", "/custom/"+customType) - r.NoError(env.DockerCP("./"+customType+"/", "clickhouse-backup:/custom/")) - env.runMainIntegrationScenario(t, "CUSTOM", "config-custom-"+customType+".yml") -} func TestRestoreMapping(t *testing.T) { env, r := NewTestEnvironment(t) From 0ef2aa0860647c77372901b1a1d5b162e280537f Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 20:29:37 +0400 Subject: [PATCH 46/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug disk_s3 --- test/integration/integration_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 4e5daeaf..61996dbf 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -469,6 +469,14 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { env.ch.Close() + out, err := env.DockerExecOut("minio", "bash", "-ce", "ls -lh /bitnami/minio/data/clickhouse/") + t.Log(t.Name(), "DEBUG", out) + r.NoError(err) + + if t.Name() == "TestIntegrationS3" { + env.DockerExecNoError(r, "minio", "rm", "-rf", "/bitnami/minio/data/clickhouse/disk_s3") + } + if t.Name() == "TestRBAC" || t.Name() == "TestConfigs" || t.Name() == "TestIntegrationEmbedded" { env.DockerExecNoError(r, "minio", "rm", "-rf", "/bitnami/minio/data/clickhouse/backups_s3") } @@ -2403,11 +2411,8 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora log.Debug("Clean before start") fullCleanup(t, r, env, []string{fullBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) - env.DockerExecNoError(r, "minio", "mc", "ls", "local/clickhouse/disk_s3") testData := generateTestData(t, r, env, remoteStorageType, defaultTestData) - env.DockerExecNoError(r, "minio", "mc", "ls", "local/clickhouse/disk_s3") - log.Debug("Create backup") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, fullBackupName) From 07521a3abde24a83f8ebd32a7ce5f9de15796393 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 21:05:51 +0400 Subject: [PATCH 47/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug disk_s3, again --- test/integration/integration_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 61996dbf..8840b0cb 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -469,11 +469,8 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { env.ch.Close() - out, err := env.DockerExecOut("minio", "bash", "-ce", "ls -lh /bitnami/minio/data/clickhouse/") - t.Log(t.Name(), "DEBUG", out) - r.NoError(err) - if t.Name() == "TestIntegrationS3" { + if t.Name() == "TestIntegrationS3" || t.Name() == "TestIntegrationEmbedded" { env.DockerExecNoError(r, "minio", "rm", "-rf", "/bitnami/minio/data/clickhouse/disk_s3") } From c9288fd83dc9110a96ed7f08e522a123f7acdbbe Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 22:00:52 +0400 Subject: [PATCH 48/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestIntegrationAzure, re order tests --- test/integration/docker-compose.yml | 2 +- test/integration/docker-compose_advanced.yml | 2 +- test/integration/integration_test.go | 119 ++++++++++--------- 3 files changed, 62 insertions(+), 61 deletions(-) diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index fe9cf771..a255b7bb 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -57,7 +57,7 @@ services: test: nc 127.0.0.1 10000 -z interval: 1s retries: 30 - command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] + command: [ "azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0","--inMemoryPersistence" ] # environment: # - AZURITE_DB="mysql://root:root@mysql:3306/azurite_blob" diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 4ec05dba..faccf56c 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -66,7 +66,7 @@ services: test: nc 127.0.0.1 10000 -z interval: 1s retries: 30 - command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] + command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0", "--inMemoryPersistence"] # environment: # - AZURITE_DB="mysql://root:root@mysql:3306/azurite_blob" diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 8840b0cb..5a1f3de8 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -552,46 +552,35 @@ func TestIntegrationAzure(t *testing.T) { env.Cleanup(t, r) } -func TestIntegrationEmbedded(t *testing.T) { - //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) - //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 - version := os.Getenv("CLICKHOUSE_VERSION") - if compareVersion(version, "23.3") < 0 { - t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) +func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { + if isTestShouldSkip("GCS_TESTS") { + t.Skip("Skipping GCS_EMULATOR integration tests...") + return } env, r := NewTestEnvironment(t) + env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") + env.Cleanup(t, r) +} - //CUSTOM backup creates folder in each disk, need to clear - env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/") - env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") - - //@TODO think about how to implements embedded backup for s3_plain disks - //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/") - //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") +func TestIntegrationSFTPAuthKey(t *testing.T) { + env, r := NewTestEnvironment(t) + env.uploadSSHKeys(r, "clickhouse-backup") + env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") + env.Cleanup(t, r) +} - t.Log("@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053") - //env.DockerExecNoError(r, "azure", "apk", "add", "tcpdump") - //r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) - ////CUSTOM backup create folder in each disk - //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/") - //if compareVersion(version, "24.2") >= 0 { - // env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") - //} - //env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") - //env.DockerExecNoError(r, "azure", "pkill", "tcpdump") - //r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) +func TestIntegrationSFTPAuthPassword(t *testing.T) { + env, r := NewTestEnvironment(t) + env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") + env.Cleanup(t, r) +} - if compareVersion(version, "23.8") >= 0 { - //CUSTOM backup creates folder in each disk, need to clear - env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/") - env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") - } - if compareVersion(version, "24.3") >= 0 { - //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 - env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") - env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml") - env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") - env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") +func TestIntegrationFTP(t *testing.T) { + env, r := NewTestEnvironment(t) + if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { + env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") + } else { + env.runMainIntegrationScenario(t, "FTP", "config-ftp-old.yaml") } env.Cleanup(t, r) } @@ -628,38 +617,50 @@ func TestIntegrationGCS(t *testing.T) { env.Cleanup(t, r) } -func TestIntegrationGCSWithCustomEndpoint(t *testing.T) { - if isTestShouldSkip("GCS_TESTS") { - t.Skip("Skipping GCS_EMULATOR integration tests...") - return +func TestIntegrationEmbedded(t *testing.T) { + //t.Skipf("Test skipped, wait 23.8, RESTORE Ordinary table and RESTORE MATERIALIZED VIEW and {uuid} not works for %s version, look https://github.com/ClickHouse/ClickHouse/issues/43971 and https://github.com/ClickHouse/ClickHouse/issues/42709", os.Getenv("CLICKHOUSE_VERSION")) + //dependencies restore https://github.com/ClickHouse/ClickHouse/issues/39416, fixed in 23.3 + version := os.Getenv("CLICKHOUSE_VERSION") + if compareVersion(version, "23.3") < 0 { + t.Skipf("Test skipped, BACKUP/RESTORE not production ready for %s version", version) } env, r := NewTestEnvironment(t) - env.runMainIntegrationScenario(t, "GCS_EMULATOR", "config-gcs-custom-endpoint.yml") - env.Cleanup(t, r) -} -func TestIntegrationSFTPAuthPassword(t *testing.T) { - env, r := NewTestEnvironment(t) - env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-password.yaml") - env.Cleanup(t, r) -} + //CUSTOM backup creates folder in each disk, need to clear + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/") + env.runMainIntegrationScenario(t, "EMBEDDED_S3", "config-s3-embedded.yml") -func TestIntegrationFTP(t *testing.T) { - env, r := NewTestEnvironment(t) - if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.3") >= 1 { - env.runMainIntegrationScenario(t, "FTP", "config-ftp.yaml") - } else { - env.runMainIntegrationScenario(t, "FTP", "config-ftp-old.yaml") + //@TODO think about how to implements embedded backup for s3_plain disks + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/") + //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN", "config-s3-plain-embedded.yml") + + t.Log("@TODO clickhouse-server don't close connection properly after FIN from azurite during BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/60447, https://github.com/Azure/Azurite/issues/2053") + //env.DockerExecNoError(r, "azure", "apk", "add", "tcpdump") + //r.NoError(env.DockerExecBackground("azure", "tcpdump", "-i", "any", "-w", "/tmp/azurite_http.pcap", "port", "10000")) + ////CUSTOM backup create folder in each disk + //env.DockerExecNoError(r, "clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/") + //if compareVersion(version, "24.2") >= 0 { + // env.runMainIntegrationScenario(t, "EMBEDDED_AZURE_URL", "config-azblob-embedded-url.yml") + //} + //env.runMainIntegrationScenario(t, "EMBEDDED_AZURE", "config-azblob-embedded.yml") + //env.DockerExecNoError(r, "azure", "pkill", "tcpdump") + //r.NoError(env.DockerCP("azure:/tmp/azurite_http.pcap", "./azurite_http.pcap")) + + if compareVersion(version, "23.8") >= 0 { + //CUSTOM backup creates folder in each disk, need to clear + env.DockerExecNoError(r, "clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_local/backup/") + env.runMainIntegrationScenario(t, "EMBEDDED_LOCAL", "config-s3-embedded-local.yml") + } + if compareVersion(version, "24.3") >= 0 { + //@todo think about named collections to avoid show credentials in logs look to https://github.com/fsouza/fake-gcs-server/issues/1330, https://github.com/fsouza/fake-gcs-server/pull/1164 + env.InstallDebIfNotExists(r, "clickhouse-backup", "ca-certificates", "gettext-base") + env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "cat /etc/clickhouse-backup/config-gcs-embedded-url.yml.template | envsubst > /etc/clickhouse-backup/config-gcs-embedded-url.yml") + env.runMainIntegrationScenario(t, "EMBEDDED_GCS_URL", "config-gcs-embedded-url.yml") + env.runMainIntegrationScenario(t, "EMBEDDED_S3_URL", "config-s3-embedded-url.yml") } env.Cleanup(t, r) } -func TestIntegrationSFTPAuthKey(t *testing.T) { - env, r := NewTestEnvironment(t) - env.uploadSSHKeys(r, "clickhouse-backup") - env.runMainIntegrationScenario(t, "SFTP", "config-sftp-auth-key.yaml") - env.Cleanup(t, r) -} func TestIntegrationCustomKopia(t *testing.T) { env, r := NewTestEnvironment(t) From b4cecca5929af0cc5d04a36325f02f02e149c946 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 30 Jul 2024 22:19:45 +0400 Subject: [PATCH 49/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, debug TestIntegrationAzure, tmpfs for /data --- test/integration/docker-compose.yml | 14 ++++++++++++-- test/integration/docker-compose_advanced.yml | 10 +++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index a255b7bb..97efbdb9 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -57,7 +57,9 @@ services: test: nc 127.0.0.1 10000 -z interval: 1s retries: 30 - command: [ "azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0","--inMemoryPersistence" ] + command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0" ] + volumes: + - azure:/data # environment: # - AZURITE_DB="mysql://root:root@mysql:3306/azurite_blob" @@ -240,4 +242,12 @@ services: image: hello-world depends_on: clickhouse-backup: - condition: service_healthy \ No newline at end of file + condition: service_healthy + +volumes: + azure: + driver: local + driver_opts: + device: tmpfs + type: tmpfs + o: size=60m \ No newline at end of file diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index faccf56c..040cd928 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -66,7 +66,9 @@ services: test: nc 127.0.0.1 10000 -z interval: 1s retries: 30 - command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0", "--inMemoryPersistence"] + command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0" ] + volumes: + - azure:/data # environment: # - AZURITE_DB="mysql://root:root@mysql:3306/azurite_blob" @@ -318,6 +320,12 @@ volumes: type: tmpfs o: size=250m pgsql: + driver: local + driver_opts: + device: tmpfs + type: tmpfs + o: size=60m + azure: driver: local driver_opts: device: tmpfs From 34ed67160fc915980615c48c97d3e7704fd00100 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 31 Jul 2024 07:07:56 +0400 Subject: [PATCH 50/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, RUN_PARALLEL=1 --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e63b95ec..e815be73 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -269,7 +269,7 @@ jobs: - name: Running integration tests env: - RUN_PARALLEL: 4 + RUN_PARALLEL: 1 GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} # options for advanced debug CI/CD From e9eca424ad75cbc81fad180fa2bd238ebc0e7fa5 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 31 Jul 2024 11:08:55 +0400 Subject: [PATCH 51/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, RUN_PARALLEL=2, wtf ;( --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index e815be73..186c28c3 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -269,7 +269,7 @@ jobs: - name: Running integration tests env: - RUN_PARALLEL: 1 + RUN_PARALLEL: 2 GOROOT: ${{ env.GOROOT_1_22_X64 }} CLICKHOUSE_VERSION: ${{ matrix.clickhouse }} # options for advanced debug CI/CD From b80f0a82aa1eeec10361943ef79d079c63ab0201 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 31 Jul 2024 13:42:16 +0400 Subject: [PATCH 52/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, TestIntegrationAzure sequence --- test/integration/integration_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 5a1f3de8..8c444e7f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -438,11 +438,12 @@ var defaultIncrementData = []TestDataStruct{ } func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { + isParallel := os.Getenv("RUN_PARALLEL") != "1" && t.Name() != "TestLongListRemote" && t.Name() != "TestIntegrationAzure" if os.Getenv("COMPOSE_FILE") == "" || os.Getenv("CUR_DIR") == "" { t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } t.Helper() - if os.Getenv("RUN_PARALLEL") != "1" && t.Name() != "TestLongListRemote" { + if isParallel { t.Parallel() } @@ -453,7 +454,7 @@ func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { } env := envObj.(*TestEnvironment) - if os.Getenv("RUN_PARALLEL") != "1" && t.Name() != "TestLongListRemote" { + if isParallel { t.Logf("%s run in parallel mode project=%s", t.Name(), env.ProjectName) } else { t.Logf("%s run in sequence mode project=%s", t.Name(), env.ProjectName) From 492f3f2c295dde7b0b8fdb0e149da0d367d1a324 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 31 Jul 2024 16:23:56 +0400 Subject: [PATCH 53/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, test clickhousepro/azurite:latest --- test/integration/docker-compose.yml | 3 ++- test/integration/docker-compose_advanced.yml | 3 ++- test/integration/integration_test.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 97efbdb9..89c6fc4d 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -51,7 +51,8 @@ services: retries: 30 azure: - image: mcr.microsoft.com/azure-storage/azurite:latest + # image: mcr.microsoft.com/azure-storage/azurite:latest + image: docker.io/clickhousepro/azurite:latest hostname: devstoreaccount1.blob.azure healthcheck: test: nc 127.0.0.1 10000 -z diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 040cd928..5ff1b954 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -60,7 +60,8 @@ services: retries: 30 azure: - image: mcr.microsoft.com/azure-storage/azurite:latest + # image: mcr.microsoft.com/azure-storage/azurite:latest + image: docker.io/clickhousepro/azurite:latest hostname: devstoreaccount1.blob.azure healthcheck: test: nc 127.0.0.1 10000 -z diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 8c444e7f..85dd2008 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -14,6 +14,7 @@ import ( "path" "reflect" "regexp" + "slices" "strconv" "strings" "sync" @@ -438,7 +439,7 @@ var defaultIncrementData = []TestDataStruct{ } func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { - isParallel := os.Getenv("RUN_PARALLEL") != "1" && t.Name() != "TestLongListRemote" && t.Name() != "TestIntegrationAzure" + isParallel := os.Getenv("RUN_PARALLEL") != "1" && slices.Index([]string{"TestLongListRemote","TestIntegrationAzure"}, t.Name()) == -1 if os.Getenv("COMPOSE_FILE") == "" || os.Getenv("CUR_DIR") == "" { t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } From 79dfc7c310d168b04d29d21e23cec0f020c2e05c Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 31 Jul 2024 17:16:48 +0400 Subject: [PATCH 54/54] debug https://github.com/Altinity/clickhouse-backup/issues/888, return TestIntegrationAzure to parallel execution and upgrade testflows version --- test/integration/integration_test.go | 2 +- test/testflows/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 85dd2008..356fcf58 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -439,7 +439,7 @@ var defaultIncrementData = []TestDataStruct{ } func NewTestEnvironment(t *testing.T) (*TestEnvironment, *require.Assertions) { - isParallel := os.Getenv("RUN_PARALLEL") != "1" && slices.Index([]string{"TestLongListRemote","TestIntegrationAzure"}, t.Name()) == -1 + isParallel := os.Getenv("RUN_PARALLEL") != "1" && slices.Index([]string{"TestLongListRemote"/*,"TestIntegrationAzure"*/}, t.Name()) == -1 if os.Getenv("COMPOSE_FILE") == "" || os.Getenv("CUR_DIR") == "" { t.Fatal("please setup COMPOSE_FILE and CUR_DIR environment variables") } diff --git a/test/testflows/requirements.txt b/test/testflows/requirements.txt index 541c9085..aab89fbe 100644 --- a/test/testflows/requirements.txt +++ b/test/testflows/requirements.txt @@ -1,4 +1,4 @@ -testflows==1.9.71 +testflows==2.4.11 requests setuptools PyYAML \ No newline at end of file