diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3941f7fd..bc86495e 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -12,7 +12,7 @@ on: jobs: build: name: Build - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 strategy: matrix: golang-version: @@ -27,6 +27,11 @@ jobs: with: go-version: '^${{ matrix.golang-version }}' + - name: Setup musl + id: setup-musl + run: | + sudo apt-get install -y musl-tools musl-dev + - name: Cache golang id: cache-golang uses: actions/cache@v3 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 0d2eccaf..a38f473b 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -26,7 +26,7 @@ jobs: - name: Setup fpm and make run: | - sudo apt-get install -y --no-install-recommends ruby ruby-dev gcc g++ rpm + sudo apt-get install -y --no-install-recommends ruby ruby-dev gcc g++ rpm musl-dev musl-tools sudo apt-get install --no-install-recommends -y make sudo gem install --no-document fpm diff --git a/ChangeLog.md b/ChangeLog.md index f63505ba..ab2f6ac7 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,3 +1,13 @@ +# v2.3.1 +IMPROVEMENTS +- add support `use_environment_credentials` option inside `clickhouse-server` backup object disk definition, fix [691](https://github.com/Altinity/clickhouse-backup/issues/691) +- add but skip tests for `azure_blob_storage` backup disk for `use_embbeded_backup_restore: true`, it works, but slow, look https://github.com/ClickHouse/ClickHouse/issues/52088 for details + +BUG FIXES +- fix static build for FIPS compatible mode fix [693](https://github.com/Altinity/clickhouse-backup/issues/693) +- complete success/failure server callback notification even when main context canceled, fix [680](https://github.com/Altinity/clickhouse-backup/pull/680) +- `clean` command will not return error when shadow directory not exists, fix [686](https://github.com/Altinity/clickhouse-backup/issues/686) + # v2.3.0 IMPROVEMENTS - add FIPS compatible builds and examples, fix [656](https://github.com/Altinity/clickhouse-backup/issues/656), fix [674](https://github.com/Altinity/clickhouse-backup/issues/674) diff --git a/Dockerfile b/Dockerfile index 5f604558..5dabec87 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN rm -fv /etc/apt/sources.list.d/clickhouse.list && \ echo "deb https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" > /etc/apt/sources.list.d/golang.list && \ echo "deb-src https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" >> /etc/apt/sources.list.d/golang.list && \ ( apt-get update || true ) && \ - apt-get install -y --no-install-recommends libc-dev golang-1.20 make git gcc && \ + apt-get install -y --no-install-recommends libc-dev golang-1.20 make git gcc musl-dev musl-tools && \ wget -q -P /root/ https://musl.cc/aarch64-linux-musl-cross.tgz && \ tar -xvf /root/aarch64-linux-musl-cross.tgz -C /root/ && \ mkdir -p /root/go/ @@ -32,9 +32,9 @@ FROM builder-base AS builder-race ARG TARGETPLATFORM COPY ./ /src/ RUN mkdir -p ./clickhouse-backup/ -RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup +RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CC=musl-gcc CGO_ENABLED=1 go build -a -cover -buildvcs=false -ldflags "-X 'main.version=race' -linkmode=external -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup RUN cp -l ./clickhouse-backup/clickhouse-backup-race /bin/clickhouse-backup && ldd ./clickhouse-backup/clickhouse-backup-race -RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup +RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) GOEXPERIMENT=boringcrypto CC=musl-gcc CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -linkmode=external -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup RUN cp -l ./clickhouse-backup/clickhouse-backup-race-fips /bin/clickhouse-backup-fips && ldd ./clickhouse-backup/clickhouse-backup-race-fips COPY entrypoint.sh /entrypoint.sh diff --git a/Makefile b/Makefile index 2f6bec8c..15368c36 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ define DESC = Support of incremental backups on remote storages' endef GO_BUILD = go build -buildvcs=false -ldflags "-X 'main.version=$(VERSION)' -X 'main.gitCommit=$(GIT_COMMIT)' -X 'main.buildDate=$(DATE)'" -GO_BUILD_FIPS = go build -buildvcs=false -ldflags "-X 'main.version=$(VERSION)-fips' -X 'main.gitCommit=$(GIT_COMMIT)' -X 'main.buildDate=$(DATE)'" +GO_BUILD_STATIC = go build -buildvcs=false -ldflags "-X 'main.version=$(VERSION)-fips' -X 'main.gitCommit=$(GIT_COMMIT)' -X 'main.buildDate=$(DATE)' -linkmode=external -extldflags '-static'" PKG_FILES = build/$(NAME)_$(VERSION).amd64.deb build/$(NAME)_$(VERSION).arm64.deb build/$(NAME)-$(VERSION)-1.amd64.rpm build/$(NAME)-$(VERSION)-1.arm64.rpm HOST_OS = $(shell bash -c 'source <(go env) && echo $$GOHOSTOS') HOST_ARCH = $(shell bash -c 'source <(go env) && echo $$GOHOSTARCH') @@ -55,7 +55,7 @@ build/linux/arm64/$(NAME)-fips build/darwin/arm64/$(NAME)-fips: GOARCH = arm64 build/linux/amd64/$(NAME)-fips build/linux/arm64/$(NAME)-fips: GOOS = linux build/darwin/amd64/$(NAME)-fips build/darwin/arm64/$(NAME)-fips: GOOS = darwin build/linux/amd64/$(NAME)-fips build/darwin/amd64/$(NAME)-fips: - GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_FIPS) -o $@ ./cmd/$(NAME) && \ + CC=musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_STATIC) -o $@ ./cmd/$(NAME) && \ go tool nm $@ > /tmp/$(NAME)-fips-tags.txt && \ grep '_Cfunc__goboringcrypto_' /tmp/$(NAME)-fips-tags.txt 1> /dev/null && \ rm -fv /tmp/$(NAME)-fips-tags.txt @@ -63,7 +63,7 @@ build/linux/amd64/$(NAME)-fips build/darwin/amd64/$(NAME)-fips: # TODO remove ugly workaround, https://www.perplexity.ai/search/2ead4c04-060a-4d78-a75f-f26835238438 build/linux/arm64/$(NAME)-fips build/darwin/arm64/$(NAME)-fips: bash -xce 'if [[ ! -f ~/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc ]]; then wget -q -P ~ https://musl.cc/aarch64-linux-musl-cross.tgz; tar -xvf ~/aarch64-linux-musl-cross.tgz -C ~; fi' && \ - CC=~/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_FIPS) -o $@ ./cmd/$(NAME) && \ + CC=~/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_STATIC) -o $@ ./cmd/$(NAME) && \ go tool nm $@ > /tmp/$(NAME)-fips-tags.txt && \ grep '_Cfunc__goboringcrypto_' /tmp/$(NAME)-fips-tags.txt 1> /dev/null && \ rm -fv /tmp/$(NAME)-fips-tags.txt @@ -123,12 +123,12 @@ $(PKG_FILES): build/linux/amd64/pkg build/linux/arm64/pkg build-race: $(NAME)/$(NAME)-race $(NAME)/$(NAME)-race: - CGO_ENABLED=1 $(GO_BUILD) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME) + CC=musl-gcc CGO_ENABLED=1 $(GO_BUILD_STATIC) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME) build-race-fips: $(NAME)/$(NAME)-race-fips $(NAME)/$(NAME)-race-fips: - GOEXPERIMENT=boringcrypto CGO_ENABLED=1 $(GO_BUILD_FIPS) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME) + CC=musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 $(GO_BUILD_STATIC) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME) # run `docker buildx create --use` first time diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 6381dba6..e3af2029 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -277,6 +277,9 @@ func (b *Backuper) cleanRemoteEmbedded(ctx context.Context, backup storage.Backu } apexLog.Debugf("object_disk.ReadMetadataFromReader(%s)", f.Name()) meta, err := object_disk.ReadMetadataFromReader(r, f.Name()) + if err != nil { + return err + } for _, o := range meta.StorageObjects { if err = object_disk.DeleteFile(ctx, b.cfg.ClickHouse.EmbeddedBackupDisk, o.ObjectRelativePath); err != nil { return err diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 8c8a0178..98ad6b19 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -369,6 +369,28 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, if (diffFrom != "" || diffFromRemote != "") && b.cfg.ClickHouse.UseEmbeddedBackupRestore { log.Warnf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") } + + if b.cfg.ClickHouse.UseEmbeddedBackupRestore { + fatalMsg := fmt.Sprintf("`general->remote_storage: %s` `clickhouse->use_embedded_backup_restore: %v` require %s->compression_format: none, actual %%s", b.cfg.General.RemoteStorage, b.cfg.ClickHouse.UseEmbeddedBackupRestore, b.cfg.General.RemoteStorage) + if b.cfg.General.RemoteStorage == "s3" && b.cfg.S3.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.S3.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "gcs" && b.cfg.GCS.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.GCS.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "azblob" && b.cfg.AzureBlob.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.AzureBlob.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "sftp" && b.cfg.SFTP.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.SFTP.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "ftp" && b.cfg.FTP.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.FTP.CompressionFormat) + } + if b.cfg.General.RemoteStorage == "cos" && b.cfg.COS.CompressionFormat != "none" { + log.Fatalf(fatalMsg, b.cfg.COS.CompressionFormat) + } + } if b.cfg.General.RemoteStorage == "custom" && b.resume { return fmt.Errorf("can't resume for `remote_storage: custom`") } diff --git a/pkg/storage/azblob.go b/pkg/storage/azblob.go index b46ffab3..921e3556 100644 --- a/pkg/storage/azblob.go +++ b/pkg/storage/azblob.go @@ -36,16 +36,16 @@ func (s *AzureBlob) Kind() string { // Connect - connect to Azure func (s *AzureBlob) Connect(ctx context.Context) error { if s.Config.EndpointSuffix == "" { - return fmt.Errorf("endpoint suffix not set") + return fmt.Errorf("azblob endpoint suffix not set") } if s.Config.Container == "" { - return fmt.Errorf("container name not set") + return fmt.Errorf("azblob container name not set") } if s.Config.AccountName == "" { - return fmt.Errorf("account name not set") + return fmt.Errorf("azblob account name not set") } if s.Config.AccountKey == "" && s.Config.SharedAccessSignature == "" && !s.Config.UseManagedIdentity { - return fmt.Errorf("account key or SAS or use_managed_identity must be set") + return fmt.Errorf("azblob account key or SAS or use_managed_identity must be set") } var ( err error diff --git a/pkg/storage/object_disk/object_disk.go b/pkg/storage/object_disk/object_disk.go index 5cf2051e..7b3d899c 100644 --- a/pkg/storage/object_disk/object_disk.go +++ b/pkg/storage/object_disk/object_disk.go @@ -300,7 +300,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) ( Type: "s3", } if batchDeleteNode := d.SelectElement("support_batch_delete"); batchDeleteNode != nil { - if strings.Trim(batchDeleteNode.InnerText(), "\r\n \t") == "true" { + if strings.Trim(batchDeleteNode.InnerText(), "\r\n \t") == "false" { creds.Type = "gcs" } } @@ -314,13 +314,18 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) ( } accessKeyNode := d.SelectElement("access_key_id") secretKeyNode := d.SelectElement("secret_access_key") - if accessKeyNode == nil || secretKeyNode == nil { + useEnvironmentCredentials := d.SelectElement("use_environment_credentials") + if accessKeyNode != nil && secretKeyNode != nil { + creds.S3AccessKey = strings.Trim(accessKeyNode.InnerText(), "\r\n \t") + creds.S3SecretKey = strings.Trim(secretKeyNode.InnerText(), "\r\n \t") + } else { apexLog.Warnf("%s -> /%s/storage_configuration/disks/%s doesn't contains and environment variables will use", configFile, root.Data, diskName) creds.S3AssumeRole = os.Getenv("AWS_ROLE_ARN") - break + if useEnvironmentCredentials != nil { + creds.S3AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") + creds.S3SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") + } } - creds.S3AccessKey = strings.Trim(accessKeyNode.InnerText(), "\r\n \t") - creds.S3SecretKey = strings.Trim(secretKeyNode.InnerText(), "\r\n \t") credentials[diskName] = creds break case "azure_blob_storage": @@ -346,7 +351,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) ( if containerNameNode == nil { return nil, fmt.Errorf("%s -> /%s/storage_configuration/disks/%s doesn't contains ", configFile, root.Data, diskName) } - creds.AzureAccountName = strings.Trim(accountKeyNode.InnerText(), "\r\n \t") + creds.AzureAccountKey = strings.Trim(accountKeyNode.InnerText(), "\r\n \t") credentials[diskName] = creds break } @@ -425,7 +430,12 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf break case "azblob": connection.Type = "azure_blob_storage" - azureCfg := config.AzureBlobConfig{} + azureCfg := config.AzureBlobConfig{ + Timeout: "15m", + BufferSize: 2 * 1024 * 1024, + MaxBuffers: 3, + MaxPartsCount: 5000, + } azureURL, err := url.Parse(creds.EndPoint) if err != nil { return nil, err @@ -435,11 +445,14 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf azureCfg.EndpointSchema = azureURL.Scheme } azureCfg.EndpointSuffix = azureURL.Host + if creds.AzureAccountName != "" { + azureCfg.AccountName = creds.AzureAccountName + } if azureURL.Path != "" { azureCfg.Path = azureURL.Path - } - if creds.AzureAccountKey != "" { - azureCfg.AccountName = creds.AzureAccountName + if azureCfg.AccountName != "" && strings.HasPrefix(azureCfg.Path, "/"+creds.AzureAccountName) { + azureCfg.Path = strings.TrimPrefix(azureURL.Path, "/"+creds.AzureAccountName) + } } if creds.AzureAccountKey != "" { azureCfg.AccountKey = creds.AzureAccountKey diff --git a/test/integration/config-azblob-embedded.yml b/test/integration/config-azblob-embedded.yml index 77c632d1..7e46d38e 100644 --- a/test/integration/config-azblob-embedded.yml +++ b/test/integration/config-azblob-embedded.yml @@ -28,7 +28,7 @@ azblob: endpoint_schema: http container: container1 path: backup - compression_format: tar + compression_format: none api: listen: :7171 create_integration_tables: true diff --git a/test/integration/docker-compose.yml b/test/integration/docker-compose.yml index 2a25c442..6d4d0545 100644 --- a/test/integration/docker-compose.yml +++ b/test/integration/docker-compose.yml @@ -32,6 +32,10 @@ services: MINIO_DEFAULT_BUCKETS: 'clickhouse' MINIO_ROOT_USER: access-key MINIO_ROOT_PASSWORD: it-is-my-super-secret-key + healthcheck: + test: curl -sL http://localhost:9000/ + interval: 10s + retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh networks: @@ -54,9 +58,31 @@ services: image: mcr.microsoft.com/azure-storage/azurite:latest container_name: azure hostname: devstoreaccount1.blob.azure + healthcheck: + test: nc 127.0.0.1 10000 -z + interval: 1s + retries: 30 + command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] networks: - clickhouse-backup + # azure_init: + # image: mcr.microsoft.com/azure-cli:latest + # command: + # - /bin/sh + # - -xc + # - | + # az storage container create --debug --name azure-backup-disk && + # az storage container create --debug --name azure-disk + # depends_on: + # azure: + # condition: service_healthy + # environment: + # # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools + # AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1; + # networks: + # - clickhouse-backup + zookeeper: image: docker.io/zookeeper:${ZOOKEEPER_VERSION:-latest} container_name: zookeeper @@ -94,6 +120,9 @@ services: QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY} QA_AWS_BUCKET: ${QA_AWS_BUCKET} QA_AWS_REGION: ${QA_AWS_REGION} +# https://github.com/Altinity/clickhouse-backup/issues/691: + AWS_ACCESS_KEY_ID: access-key + AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key volumes: - ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml - ${CLICKHOUSE_BACKUP_BIN:-../../clickhouse-backup/clickhouse-backup-race}:/usr/bin/clickhouse-backup @@ -134,6 +163,12 @@ services: depends_on: zookeeper: condition: service_healthy + minio: + condition: service_healthy + azure: + condition: service_healthy +# azure_init: +# condition: service_completed_successfully all_services_ready: image: hello-world diff --git a/test/integration/docker-compose_advanced.yml b/test/integration/docker-compose_advanced.yml index 42dc0ed6..b81e1547 100644 --- a/test/integration/docker-compose_advanced.yml +++ b/test/integration/docker-compose_advanced.yml @@ -43,6 +43,10 @@ services: MINIO_DEFAULT_BUCKETS: 'clickhouse' MINIO_ROOT_USER: access-key MINIO_ROOT_PASSWORD: it-is-my-super-secret-key + healthcheck: + test: curl -sL http://localhost:9000/ + interval: 10s + retries: 30 volumes: - ./minio_nodelete.sh:/bin/minio_nodelete.sh ports: @@ -58,7 +62,7 @@ services: # - /bin/sh # command: # - -c -# - "mkdir -p /storage/altinity-qa-test && fake-gcs-server -scheme http -port 8080 -public-host gsc:8080" +# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080" # networks: # - clickhouse-backup @@ -67,9 +71,31 @@ services: image: mcr.microsoft.com/azure-storage/azurite:latest container_name: azure hostname: devstoreaccount1.blob.azure + healthcheck: + test: nc 127.0.0.1 10000 -z + interval: 1s + retries: 30 + command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"] networks: - clickhouse-backup +# azure_init: +# image: mcr.microsoft.com/azure-cli:latest +# command: +# - /bin/sh +# - -xc +# - | +# az storage container create --debug --name azure-backup-disk && +# az storage container create --debug --name azure-disk +# depends_on: +# azure: +# condition: service_healthy +# environment: +# # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools +# AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1; +# networks: +# - clickhouse-backup + mysql: image: docker.io/mysql:${MYSQL_VERSION:-latest} command: --default-authentication-plugin=mysql_native_password --gtid_mode=on --enforce_gtid_consistency=ON @@ -141,6 +167,9 @@ services: QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY} QA_AWS_BUCKET: ${QA_AWS_BUCKET} QA_AWS_REGION: ${QA_AWS_REGION} +# https://github.com/Altinity/clickhouse-backup/issues/691: + AWS_ACCESS_KEY_ID: access-key + AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key volumes: - ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml - ./enable-access_management.xml:/etc/clickhouse-server/users.d/enable-access_management.xml @@ -189,6 +218,12 @@ services: condition: service_healthy zookeeper: condition: service_healthy + minio: + condition: service_healthy + azure: + condition: service_healthy +# azure_init: +# condition: service_completed_successfully all_services_ready: image: hello-world diff --git a/test/integration/dynamic_settings.sh b/test/integration/dynamic_settings.sh index 861a21ab..62561582 100644 --- a/test/integration/dynamic_settings.sh +++ b/test/integration/dynamic_settings.sh @@ -81,7 +81,7 @@ EOT fi -if [[ "${CLICKHOUSE_VERSION}" =~ ^21\.[8-9]|^21\.[0-9]{2} || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.[8-9]|^21\.[0-9]{2} || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then cat < /etc/clickhouse-server/config.d/storage_configuration_s3.xml @@ -90,8 +90,11 @@ cat < /etc/clickhouse-server/config.d/storage_configuration_s3.xml s3 http://minio:9000/clickhouse/disk_s3/ + + 1 true @@ -110,7 +113,7 @@ EOT fi -if [[ "${CLICKHOUSE_VERSION}" =~ ^21\.12 || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^21\.12 || "${CLICKHOUSE_VERSION}" =~ ^2[2-9]\.[0-9]+ ]]; then cat < /etc/clickhouse-server/config.d/storage_configuration_encrypted_s3.xml @@ -119,8 +122,11 @@ cat < /etc/clickhouse-server/config.d/storage_configuration_encrypted_s3.x s3 http://minio:9000/clickhouse/disk_s3/ + + 1 true @@ -157,80 +163,102 @@ chown -R clickhouse /var/lib/clickhouse/disks/ /var/lib/clickhouse/backups_embed cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3.xml + + + + + true + s3 + http://minio:9000/clickhouse/backups_s3/ + + 1 + false + + + + + backups_s3 + /var/lib/clickhouse/backups_embedded/ + + + 1 + + +EOT + +fi + +# s3_plain and azure backup configuration +if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^23\.3 || "${CLICKHOUSE_VERSION}" =~ ^23\.[4-9] || "${CLICKHOUSE_VERSION}" =~ ^23\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[4-9]\.[1-9]+ ]]; then + +mkdir -p /var/lib/clickhouse/disks/backups_s3_plain/ +chown -R clickhouse /var/lib/clickhouse/disks/ + +cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plain.xml + + + + + + s3_plain + http://minio:9000/clickhouse/backups_s3_plain/ + + 1 + false + + + + + backups_s3 + backups_s3_plain + + +EOT + +mkdir -p /var/lib/clickhouse/disks/backups_azure/ +chown -R clickhouse /var/lib/clickhouse/disks/ + +cat < /etc/clickhouse-server/config.d/backup_storage_configuration_azure.xml + - - true - s3 - http://minio:9000/clickhouse/backups_s3/ - access-key - it-is-my-super-secret-key + + azure_blob_storage + http://azure:10000/devstoreaccount1 + azure-disk + + devstoreaccount1 + Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== + false + + + azure_blob_storage + http://azure:10000/devstoreaccount1 + azure-backup-disk + + devstoreaccount1 + Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== false - + backups_s3 - /var/lib/clickhouse/backups_embedded/ + backups_s3_plain + backups_azure - - 1 - EOT fi -# embedded s3_plain and azure backup configuration -if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^23\.3 || "${CLICKHOUSE_VERSION}" =~ ^23\.1[0-9]+ || "${CLICKHOUSE_VERSION}" =~ ^2[4-9]\.[1-9]+ ]]; then - -mkdir -p /var/lib/clickhouse/disks/backups_azure/ /var/lib/clickhouse/disks/backups_s3_plain/ -chown -R clickhouse /var/lib/clickhouse/disks/ - -#cat < /etc/clickhouse-server/config.d/backup_storage_configuration_s3_plain.xml -# -# -# -# -# -# true -# s3_plain -# http://minio:9000/clickhouse/backups_plain/ -# access-key -# it-is-my-super-secret-key -# false -# -# -# -# -# backups_azure -# -# -#EOT - -#cat < /etc/clickhouse-server/config.d/backup_storage_configuration_azure.xml -# -# -# -# -# -# azure_blob_storage -# http://azure:10000 -# container-embedded -# devstoreaccount1 -# Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw== -# -# -# -# -# backups_azure -# -# -#EOT - -fi - if [[ "${CLICKHOUSE_VERSION}" == "head" || "${CLICKHOUSE_VERSION}" =~ ^22\.[7-9]|^22\.[0-9]{2}|^2[3-9]\. ]]; then diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 2ee7efc9..a34ac6fe 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -754,13 +754,15 @@ func TestIntegrationEmbedded(t *testing.T) { //CUSTOM backup create folder in each disk r.NoError(dockerExec("clickhouse", "rm", "-rfv", "/var/lib/clickhouse/disks/backups_s3/backup/")) r.NoError(dockerCP("config-s3-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - runMainIntegrationScenario(t, "EMBEDDED") - //r.NoError(dockerExec("clickhouse","rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup", )) + runMainIntegrationScenario(t, "EMBEDDED_S3") + //@TODO uncomment when resolve slow azure BACKUP/RESTORE https://github.com/ClickHouse/ClickHouse/issues/52088 + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup/")) + //r.NoError(dockerCP("config-azblob-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) + //runMainIntegrationScenario(t, "EMBEDDED_AZURE") + //@TODO think about how to implements embedded backup for s3_plain disks + //r.NoError(dockerExec("clickhouse", "rm", "-rf", "/var/lib/clickhouse/disks/backups_s3_plain/backup/")) //r.NoError(dockerCP("config-s3-plain-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - //runMainIntegrationScenario(t, "EMBEDDED") - //r.NoError(dockerExec("clickhouse","rm", "-rf", "/var/lib/clickhouse/disks/backups_azure/backup", )) - //r.NoError(dockerCP("config-azure-embedded.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) - //runMainIntegrationScenario(t, "EMBEDDED") + //runMainIntegrationScenario(t, "EMBEDDED_S3_PLAIN") } func TestLongListRemote(t *testing.T) { @@ -993,8 +995,8 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) backupDir := "/var/lib/clickhouse/backup" - if remoteStorageType == "EMBEDDED" { - backupDir = "/var/lib/clickhouse/disks/backups_s3" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + backupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) } out, err = dockerExecOut("clickhouse", "ls", "-lha", backupDir) r.NoError(err) @@ -1066,12 +1068,10 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { // test end log.Info("Clean after finish") - // why CUSTOM delete only local database? - if remoteStorageType == "CUSTOM" { - fullCleanup(r, ch, []string{}, []string{}, databaseList, false, true) - } else if remoteStorageType == "EMBEDDED" { + // CUSTOM and EMBEDDED download increment doesn't download full + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullCleanup(r, ch, []string{incrementBackupName}, []string{"local"}, nil, true, false) fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true) - fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"local"}, nil, false, false) } else { fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, true, true) } @@ -1079,7 +1079,7 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { // backupCmd = fmt.Sprintf("%s & PID=$!; sleep 0.7; kill -9 $PID; cat /var/lib/clickhouse/backup/%s/upload.state; sleep 0.3; %s", backupCmd, testBackupName, backupCmd) - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { backupCmd = strings.Replace(backupCmd, "--resume", "", 1) } else { backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) @@ -2113,7 +2113,7 @@ func (ch *TestClickHouse) createTestSchema(data TestDataStruct, remoteStorageTyp return nil } // @TODO remove it when resolve https://github.com/ClickHouse/ClickHouse/issues/43971 - if strings.Contains(createSQL, "8192)") && remoteStorageType == "EMBEDDED" { + if strings.Contains(createSQL, "8192)") && strings.HasPrefix(remoteStorageType, "EMBEDDED") { matches := mergeTreeOldSyntax.FindStringSubmatch(createSQL) if len(matches) >= 3 { substitution := "MergeTree() PARTITION BY toYYYYMMDD($1) ORDER BY $2 SETTINGS index_granularity=$3" @@ -2327,7 +2327,7 @@ func installDebIfNotExists(r *require.Assertions, container string, pkgs ...stri container, "bash", "-xec", fmt.Sprintf( - "export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then apt-get -y update; apt-get install --no-install-recommends -y %s; fi", + "export DEBIAN_FRONTEND=noniteractive; if [[ '%d' != $(dpkg -l | grep -c -E \"%s\" ) ]]; then rm -fv /etc/apt/sources.list.d/clickhouse.list; apt-get -y update; apt-get install --no-install-recommends -y %s; fi", len(pkgs), "^ii\\s+"+strings.Join(pkgs, "|^ii\\s+"), strings.Join(pkgs, " "), ), )) @@ -2356,15 +2356,15 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", fullBackupName)) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", "--partitions=('2022-01-02'),('2022-01-03')", fullBackupName)) fullBackupDir := "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" - if remoteStorageType == "EMBEDDED" { - fullBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + fullBackupName + "/data/default/t?" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+fullBackupDir+" | wc -l") r.NoError(err) expectedLines := "13" // custom storage doesn't support --partitions for upload / download now // embedded storage contain hardLink files and will download additional data parts - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { expectedLines = "17" } r.Equal(expectedLines, strings.Trim(out, "\r\n\t ")) @@ -2372,8 +2372,8 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", fullBackupName)) fullBackupDir = "/var/lib/clickhouse/backup/" + fullBackupName + "/shadow/default/t?/default/" - if remoteStorageType == "EMBEDDED" { - fullBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + fullBackupName + "/data/default/t?" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + fullBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + fullBackupName + "/data/default/t?" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+fullBackupDir+"| wc -l") r.NoError(err) @@ -2393,8 +2393,8 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re // check create + partitions r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--tables=default.t1", "--partitions=20220102,20220103", partitionBackupName)) partitionBackupDir := "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" - if remoteStorageType == "EMBEDDED" { - partitionBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + partitionBackupName + "/data/default/t1" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+partitionBackupDir+"| wc -l") r.NoError(err) @@ -2404,8 +2404,8 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re // check create > upload + partitions r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", "--tables=default.t1", partitionBackupName)) partitionBackupDir = "/var/lib/clickhouse/backup/" + partitionBackupName + "/shadow/default/t1/default/" - if remoteStorageType == "EMBEDDED" { - partitionBackupDir = "/var/lib/clickhouse/disks/backups_s3/" + partitionBackupName + "/data/default/t1" + if strings.HasPrefix(remoteStorageType, "EMBEDDED") { + partitionBackupDir = "/var/lib/clickhouse/disks/backups" + strings.ToLower(strings.TrimPrefix(remoteStorageType, "EMBEDDED")) + "/" + partitionBackupName + "/data/default/t1" } out, err = dockerExecOut("clickhouse", "bash", "-c", "ls -la "+partitionBackupDir+" | wc -l") r.NoError(err) @@ -2422,7 +2422,7 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re expectedCount = 20 // custom and embedded doesn't support --partitions in upload and download - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { expectedCount = 40 } r.Equal(expectedCount, result, fmt.Sprintf("expect count=%d", expectedCount)) @@ -2432,7 +2432,7 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re r.NoError(ch.chbackend.SelectSingleRowNoCtx(&result, "SELECT count() FROM default.t1 WHERE dt NOT IN ('2022-01-02','2022-01-03')")) expectedCount = 0 // custom and embedded doesn't support --partitions in upload and download - if remoteStorageType == "CUSTOM" || remoteStorageType == "EMBEDDED" { + if remoteStorageType == "CUSTOM" || strings.HasPrefix(remoteStorageType, "EMBEDDED") { expectedCount = 20 } r.Equal(expectedCount, result, "expect count=0")