Skip to content

Commit

Permalink
skip CopyObject execution for keys which have zero size, to allow pro…
Browse files Browse the repository at this point in the history
…perly backup S3, GCS over S3 and Azure disks
  • Loading branch information
Slach committed Jan 9, 2024
1 parent f8b1995 commit 44d00ac
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 0 deletions.
4 changes: 4 additions & 0 deletions ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# v2.4.16
BUG FIXES
- skip CopyObject execution for keys which have zero size, to allow properly backup S3, GCS over S3 and Azure disks

# v2.4.16
BUG FIXES
- increase `AZBLOB_TIMEOUT` to 4h, instead 15m to allow download long size data parts
Expand Down
3 changes: 3 additions & 0 deletions pkg/backup/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -657,6 +657,9 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName, backup

uploadObjectDiskPartsWorkingGroup.Go(func() error {
for _, storageObject := range objPartFileMeta.StorageObjects {
if storageObject.ObjectSize == 0 {
continue
}
if objSize, err = b.dst.CopyObject(
ctx,
srcDiskConnection.GetRemoteBucket(),
Expand Down
3 changes: 3 additions & 0 deletions pkg/backup/restore.go
Original file line number Diff line number Diff line change
Expand Up @@ -873,6 +873,9 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin
downloadObjectDiskPartsWorkingGroup.Go(func() error {
var srcBucket, srcKey string
for _, storageObject := range objMeta.StorageObjects {
if storageObject.ObjectSize == 0 {
continue
}
if b.cfg.General.RemoteStorage == "s3" && diskType == "s3" {
srcBucket = b.cfg.S3.Bucket
srcKey = path.Join(b.cfg.S3.ObjectDiskPath, backupName, diskName, storageObject.ObjectRelativePath)
Expand Down

0 comments on commit 44d00ac

Please sign in to comment.