Skip to content

Commit

Permalink
Merge branch 'Altinity:master' into sharded-backup
Browse files Browse the repository at this point in the history
  • Loading branch information
mskwon authored Jul 14, 2023
2 parents 4dabe85 + c23fd55 commit cd21df6
Show file tree
Hide file tree
Showing 14 changed files with 266 additions and 115 deletions.
7 changes: 6 additions & 1 deletion .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ on:
jobs:
build:
name: Build
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
strategy:
matrix:
golang-version:
Expand All @@ -27,6 +27,11 @@ jobs:
with:
go-version: '^${{ matrix.golang-version }}'

- name: Setup musl
id: setup-musl
run: |
sudo apt-get install -y musl-tools musl-dev
- name: Cache golang
id: cache-golang
uses: actions/cache@v3
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:

- name: Setup fpm and make
run: |
sudo apt-get install -y --no-install-recommends ruby ruby-dev gcc g++ rpm
sudo apt-get install -y --no-install-recommends ruby ruby-dev gcc g++ rpm musl-dev musl-tools
sudo apt-get install --no-install-recommends -y make
sudo gem install --no-document fpm
Expand Down
10 changes: 10 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,13 @@
# v2.3.1
IMPROVEMENTS
- add support `use_environment_credentials` option inside `clickhouse-server` backup object disk definition, fix [691](https://github.com/Altinity/clickhouse-backup/issues/691)
- add but skip tests for `azure_blob_storage` backup disk for `use_embbeded_backup_restore: true`, it works, but slow, look https://github.com/ClickHouse/ClickHouse/issues/52088 for details

BUG FIXES
- fix static build for FIPS compatible mode fix [693](https://github.com/Altinity/clickhouse-backup/issues/693)
- complete success/failure server callback notification even when main context canceled, fix [680](https://github.com/Altinity/clickhouse-backup/pull/680)
- `clean` command will not return error when shadow directory not exists, fix [686](https://github.com/Altinity/clickhouse-backup/issues/686)

# v2.3.0
IMPROVEMENTS
- add FIPS compatible builds and examples, fix [656](https://github.com/Altinity/clickhouse-backup/issues/656), fix [674](https://github.com/Altinity/clickhouse-backup/issues/674)
Expand Down
6 changes: 3 additions & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ RUN rm -fv /etc/apt/sources.list.d/clickhouse.list && \
echo "deb https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" > /etc/apt/sources.list.d/golang.list && \
echo "deb-src https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" >> /etc/apt/sources.list.d/golang.list && \
( apt-get update || true ) && \
apt-get install -y --no-install-recommends libc-dev golang-1.20 make git gcc && \
apt-get install -y --no-install-recommends libc-dev golang-1.20 make git gcc musl-dev musl-tools && \
wget -q -P /root/ https://musl.cc/aarch64-linux-musl-cross.tgz && \
tar -xvf /root/aarch64-linux-musl-cross.tgz -C /root/ && \
mkdir -p /root/go/
Expand All @@ -32,9 +32,9 @@ FROM builder-base AS builder-race
ARG TARGETPLATFORM
COPY ./ /src/
RUN mkdir -p ./clickhouse-backup/
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CC=musl-gcc CGO_ENABLED=1 go build -a -cover -buildvcs=false -ldflags "-X 'main.version=race' -linkmode=external -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race ./cmd/clickhouse-backup
RUN cp -l ./clickhouse-backup/clickhouse-backup-race /bin/clickhouse-backup && ldd ./clickhouse-backup/clickhouse-backup-race
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup
RUN --mount=type=cache,id=clickhouse-backup-gobuild,target=/root/.cache/go GOOS=$( echo ${TARGETPLATFORM} | cut -d "/" -f 1) GOARCH=$( echo ${TARGETPLATFORM} | cut -d "/" -f 2) GOEXPERIMENT=boringcrypto CC=musl-gcc CGO_ENABLED=1 go build -cover -buildvcs=false -ldflags "-X 'main.version=race-fips' -linkmode=external -extldflags '-static'" -gcflags "all=-N -l" -race -o ./clickhouse-backup/clickhouse-backup-race-fips ./cmd/clickhouse-backup
RUN cp -l ./clickhouse-backup/clickhouse-backup-race-fips /bin/clickhouse-backup-fips && ldd ./clickhouse-backup/clickhouse-backup-race-fips
COPY entrypoint.sh /entrypoint.sh

Expand Down
10 changes: 5 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ define DESC =
Support of incremental backups on remote storages'
endef
GO_BUILD = go build -buildvcs=false -ldflags "-X 'main.version=$(VERSION)' -X 'main.gitCommit=$(GIT_COMMIT)' -X 'main.buildDate=$(DATE)'"
GO_BUILD_FIPS = go build -buildvcs=false -ldflags "-X 'main.version=$(VERSION)-fips' -X 'main.gitCommit=$(GIT_COMMIT)' -X 'main.buildDate=$(DATE)'"
GO_BUILD_STATIC = go build -buildvcs=false -ldflags "-X 'main.version=$(VERSION)-fips' -X 'main.gitCommit=$(GIT_COMMIT)' -X 'main.buildDate=$(DATE)' -linkmode=external -extldflags '-static'"
PKG_FILES = build/$(NAME)_$(VERSION).amd64.deb build/$(NAME)_$(VERSION).arm64.deb build/$(NAME)-$(VERSION)-1.amd64.rpm build/$(NAME)-$(VERSION)-1.arm64.rpm
HOST_OS = $(shell bash -c 'source <(go env) && echo $$GOHOSTOS')
HOST_ARCH = $(shell bash -c 'source <(go env) && echo $$GOHOSTARCH')
Expand Down Expand Up @@ -55,15 +55,15 @@ build/linux/arm64/$(NAME)-fips build/darwin/arm64/$(NAME)-fips: GOARCH = arm64
build/linux/amd64/$(NAME)-fips build/linux/arm64/$(NAME)-fips: GOOS = linux
build/darwin/amd64/$(NAME)-fips build/darwin/arm64/$(NAME)-fips: GOOS = darwin
build/linux/amd64/$(NAME)-fips build/darwin/amd64/$(NAME)-fips:
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_FIPS) -o $@ ./cmd/$(NAME) && \
CC=musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_STATIC) -o $@ ./cmd/$(NAME) && \
go tool nm $@ > /tmp/$(NAME)-fips-tags.txt && \
grep '_Cfunc__goboringcrypto_' /tmp/$(NAME)-fips-tags.txt 1> /dev/null && \
rm -fv /tmp/$(NAME)-fips-tags.txt

# TODO remove ugly workaround, https://www.perplexity.ai/search/2ead4c04-060a-4d78-a75f-f26835238438
build/linux/arm64/$(NAME)-fips build/darwin/arm64/$(NAME)-fips:
bash -xce 'if [[ ! -f ~/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc ]]; then wget -q -P ~ https://musl.cc/aarch64-linux-musl-cross.tgz; tar -xvf ~/aarch64-linux-musl-cross.tgz -C ~; fi' && \
CC=~/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_FIPS) -o $@ ./cmd/$(NAME) && \
CC=~/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 GOOS=$(GOOS) GOARCH=$(GOARCH) $(GO_BUILD_STATIC) -o $@ ./cmd/$(NAME) && \
go tool nm $@ > /tmp/$(NAME)-fips-tags.txt && \
grep '_Cfunc__goboringcrypto_' /tmp/$(NAME)-fips-tags.txt 1> /dev/null && \
rm -fv /tmp/$(NAME)-fips-tags.txt
Expand Down Expand Up @@ -123,12 +123,12 @@ $(PKG_FILES): build/linux/amd64/pkg build/linux/arm64/pkg
build-race: $(NAME)/$(NAME)-race

$(NAME)/$(NAME)-race:
CGO_ENABLED=1 $(GO_BUILD) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME)
CC=musl-gcc CGO_ENABLED=1 $(GO_BUILD_STATIC) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME)

build-race-fips: $(NAME)/$(NAME)-race-fips

$(NAME)/$(NAME)-race-fips:
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 $(GO_BUILD_FIPS) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME)
CC=musl-gcc GOEXPERIMENT=boringcrypto CGO_ENABLED=1 $(GO_BUILD_STATIC) -cover -gcflags "all=-N -l" -race -o $@ ./cmd/$(NAME)


# run `docker buildx create --use` first time
Expand Down
3 changes: 3 additions & 0 deletions pkg/backup/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,9 @@ func (b *Backuper) cleanRemoteEmbedded(ctx context.Context, backup storage.Backu
}
apexLog.Debugf("object_disk.ReadMetadataFromReader(%s)", f.Name())
meta, err := object_disk.ReadMetadataFromReader(r, f.Name())
if err != nil {
return err
}
for _, o := range meta.StorageObjects {
if err = object_disk.DeleteFile(ctx, b.cfg.ClickHouse.EmbeddedBackupDisk, o.ObjectRelativePath); err != nil {
return err
Expand Down
22 changes: 22 additions & 0 deletions pkg/backup/upload.go
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,28 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string,
if (diffFrom != "" || diffFromRemote != "") && b.cfg.ClickHouse.UseEmbeddedBackupRestore {
log.Warnf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`")
}

if b.cfg.ClickHouse.UseEmbeddedBackupRestore {
fatalMsg := fmt.Sprintf("`general->remote_storage: %s` `clickhouse->use_embedded_backup_restore: %v` require %s->compression_format: none, actual %%s", b.cfg.General.RemoteStorage, b.cfg.ClickHouse.UseEmbeddedBackupRestore, b.cfg.General.RemoteStorage)
if b.cfg.General.RemoteStorage == "s3" && b.cfg.S3.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.S3.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "gcs" && b.cfg.GCS.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.GCS.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "azblob" && b.cfg.AzureBlob.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.AzureBlob.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "sftp" && b.cfg.SFTP.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.SFTP.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "ftp" && b.cfg.FTP.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.FTP.CompressionFormat)
}
if b.cfg.General.RemoteStorage == "cos" && b.cfg.COS.CompressionFormat != "none" {
log.Fatalf(fatalMsg, b.cfg.COS.CompressionFormat)
}
}
if b.cfg.General.RemoteStorage == "custom" && b.resume {
return fmt.Errorf("can't resume for `remote_storage: custom`")
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/storage/azblob.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,16 +36,16 @@ func (s *AzureBlob) Kind() string {
// Connect - connect to Azure
func (s *AzureBlob) Connect(ctx context.Context) error {
if s.Config.EndpointSuffix == "" {
return fmt.Errorf("endpoint suffix not set")
return fmt.Errorf("azblob endpoint suffix not set")
}
if s.Config.Container == "" {
return fmt.Errorf("container name not set")
return fmt.Errorf("azblob container name not set")
}
if s.Config.AccountName == "" {
return fmt.Errorf("account name not set")
return fmt.Errorf("azblob account name not set")
}
if s.Config.AccountKey == "" && s.Config.SharedAccessSignature == "" && !s.Config.UseManagedIdentity {
return fmt.Errorf("account key or SAS or use_managed_identity must be set")
return fmt.Errorf("azblob account key or SAS or use_managed_identity must be set")
}
var (
err error
Expand Down
33 changes: 23 additions & 10 deletions pkg/storage/object_disk/object_disk.go
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) (
Type: "s3",
}
if batchDeleteNode := d.SelectElement("support_batch_delete"); batchDeleteNode != nil {
if strings.Trim(batchDeleteNode.InnerText(), "\r\n \t") == "true" {
if strings.Trim(batchDeleteNode.InnerText(), "\r\n \t") == "false" {
creds.Type = "gcs"
}
}
Expand All @@ -314,13 +314,18 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) (
}
accessKeyNode := d.SelectElement("access_key_id")
secretKeyNode := d.SelectElement("secret_access_key")
if accessKeyNode == nil || secretKeyNode == nil {
useEnvironmentCredentials := d.SelectElement("use_environment_credentials")
if accessKeyNode != nil && secretKeyNode != nil {
creds.S3AccessKey = strings.Trim(accessKeyNode.InnerText(), "\r\n \t")
creds.S3SecretKey = strings.Trim(secretKeyNode.InnerText(), "\r\n \t")
} else {
apexLog.Warnf("%s -> /%s/storage_configuration/disks/%s doesn't contains <access_key_id> and <secret_access_key> environment variables will use", configFile, root.Data, diskName)
creds.S3AssumeRole = os.Getenv("AWS_ROLE_ARN")
break
if useEnvironmentCredentials != nil {
creds.S3AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
creds.S3SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
}
}
creds.S3AccessKey = strings.Trim(accessKeyNode.InnerText(), "\r\n \t")
creds.S3SecretKey = strings.Trim(secretKeyNode.InnerText(), "\r\n \t")
credentials[diskName] = creds
break
case "azure_blob_storage":
Expand All @@ -346,7 +351,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) (
if containerNameNode == nil {
return nil, fmt.Errorf("%s -> /%s/storage_configuration/disks/%s doesn't contains <account_key>", configFile, root.Data, diskName)
}
creds.AzureAccountName = strings.Trim(accountKeyNode.InnerText(), "\r\n \t")
creds.AzureAccountKey = strings.Trim(accountKeyNode.InnerText(), "\r\n \t")
credentials[diskName] = creds
break
}
Expand Down Expand Up @@ -425,7 +430,12 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf
break
case "azblob":
connection.Type = "azure_blob_storage"
azureCfg := config.AzureBlobConfig{}
azureCfg := config.AzureBlobConfig{
Timeout: "15m",
BufferSize: 2 * 1024 * 1024,
MaxBuffers: 3,
MaxPartsCount: 5000,
}
azureURL, err := url.Parse(creds.EndPoint)
if err != nil {
return nil, err
Expand All @@ -435,11 +445,14 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf
azureCfg.EndpointSchema = azureURL.Scheme
}
azureCfg.EndpointSuffix = azureURL.Host
if creds.AzureAccountName != "" {
azureCfg.AccountName = creds.AzureAccountName
}
if azureURL.Path != "" {
azureCfg.Path = azureURL.Path
}
if creds.AzureAccountKey != "" {
azureCfg.AccountName = creds.AzureAccountName
if azureCfg.AccountName != "" && strings.HasPrefix(azureCfg.Path, "/"+creds.AzureAccountName) {
azureCfg.Path = strings.TrimPrefix(azureURL.Path, "/"+creds.AzureAccountName)
}
}
if creds.AzureAccountKey != "" {
azureCfg.AccountKey = creds.AzureAccountKey
Expand Down
2 changes: 1 addition & 1 deletion test/integration/config-azblob-embedded.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ azblob:
endpoint_schema: http
container: container1
path: backup
compression_format: tar
compression_format: none
api:
listen: :7171
create_integration_tables: true
Expand Down
35 changes: 35 additions & 0 deletions test/integration/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ services:
MINIO_DEFAULT_BUCKETS: 'clickhouse'
MINIO_ROOT_USER: access-key
MINIO_ROOT_PASSWORD: it-is-my-super-secret-key
healthcheck:
test: curl -sL http://localhost:9000/
interval: 10s
retries: 30
volumes:
- ./minio_nodelete.sh:/bin/minio_nodelete.sh
networks:
Expand All @@ -54,9 +58,31 @@ services:
image: mcr.microsoft.com/azure-storage/azurite:latest
container_name: azure
hostname: devstoreaccount1.blob.azure
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"]
networks:
- clickhouse-backup

# azure_init:
# image: mcr.microsoft.com/azure-cli:latest
# command:
# - /bin/sh
# - -xc
# - |
# az storage container create --debug --name azure-backup-disk &&
# az storage container create --debug --name azure-disk
# depends_on:
# azure:
# condition: service_healthy
# environment:
# # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools
# AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1;
# networks:
# - clickhouse-backup

zookeeper:
image: docker.io/zookeeper:${ZOOKEEPER_VERSION:-latest}
container_name: zookeeper
Expand Down Expand Up @@ -94,6 +120,9 @@ services:
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
QA_AWS_BUCKET: ${QA_AWS_BUCKET}
QA_AWS_REGION: ${QA_AWS_REGION}
# https://github.com/Altinity/clickhouse-backup/issues/691:
AWS_ACCESS_KEY_ID: access-key
AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key
volumes:
- ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml
- ${CLICKHOUSE_BACKUP_BIN:-../../clickhouse-backup/clickhouse-backup-race}:/usr/bin/clickhouse-backup
Expand Down Expand Up @@ -134,6 +163,12 @@ services:
depends_on:
zookeeper:
condition: service_healthy
minio:
condition: service_healthy
azure:
condition: service_healthy
# azure_init:
# condition: service_completed_successfully

all_services_ready:
image: hello-world
Expand Down
37 changes: 36 additions & 1 deletion test/integration/docker-compose_advanced.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ services:
MINIO_DEFAULT_BUCKETS: 'clickhouse'
MINIO_ROOT_USER: access-key
MINIO_ROOT_PASSWORD: it-is-my-super-secret-key
healthcheck:
test: curl -sL http://localhost:9000/
interval: 10s
retries: 30
volumes:
- ./minio_nodelete.sh:/bin/minio_nodelete.sh
ports:
Expand All @@ -58,7 +62,7 @@ services:
# - /bin/sh
# command:
# - -c
# - "mkdir -p /storage/altinity-qa-test && fake-gcs-server -scheme http -port 8080 -public-host gsc:8080"
# - "mkdir -p /data/clickhouse-backup-test-gcs && fake-gcs-server -data /data -scheme http -port 8080 -public-host gsc:8080"
# networks:
# - clickhouse-backup

Expand All @@ -67,9 +71,31 @@ services:
image: mcr.microsoft.com/azure-storage/azurite:latest
container_name: azure
hostname: devstoreaccount1.blob.azure
healthcheck:
test: nc 127.0.0.1 10000 -z
interval: 1s
retries: 30
command: ["azurite", "--debug", "/data/debug.log" , "-l", "/data", "--blobHost", "0.0.0.0","--queueHost", "0.0.0.0", "--tableHost", "0.0.0.0"]
networks:
- clickhouse-backup

# azure_init:
# image: mcr.microsoft.com/azure-cli:latest
# command:
# - /bin/sh
# - -xc
# - |
# az storage container create --debug --name azure-backup-disk &&
# az storage container create --debug --name azure-disk
# depends_on:
# azure:
# condition: service_healthy
# environment:
# # https://github.com/Azure/Azurite/blob/main/README.md#usage-with-azure-storage-sdks-or-tools
# AZURE_STORAGE_CONNECTION_STRING: DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azure:10000/devstoreaccount1;
# networks:
# - clickhouse-backup

mysql:
image: docker.io/mysql:${MYSQL_VERSION:-latest}
command: --default-authentication-plugin=mysql_native_password --gtid_mode=on --enforce_gtid_consistency=ON
Expand Down Expand Up @@ -141,6 +167,9 @@ services:
QA_AWS_SECRET_KEY: ${QA_AWS_SECRET_KEY}
QA_AWS_BUCKET: ${QA_AWS_BUCKET}
QA_AWS_REGION: ${QA_AWS_REGION}
# https://github.com/Altinity/clickhouse-backup/issues/691:
AWS_ACCESS_KEY_ID: access-key
AWS_SECRET_ACCESS_KEY: it-is-my-super-secret-key
volumes:
- ./backup-user.xml:/etc/clickhouse-server/users.d/backup-user.xml
- ./enable-access_management.xml:/etc/clickhouse-server/users.d/enable-access_management.xml
Expand Down Expand Up @@ -189,6 +218,12 @@ services:
condition: service_healthy
zookeeper:
condition: service_healthy
minio:
condition: service_healthy
azure:
condition: service_healthy
# azure_init:
# condition: service_completed_successfully

all_services_ready:
image: hello-world
Expand Down
Loading

0 comments on commit cd21df6

Please sign in to comment.