Skip to content

Commit

Permalink
change S3_MAX_PARTS_COUNT default value from 256 to 2000 to fix…
Browse files Browse the repository at this point in the history
… memory usage for s3 which increased for 2.4.16+
  • Loading branch information
Slach committed Jan 26, 2024
1 parent f2edb85 commit e22d174
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 11 deletions.
6 changes: 5 additions & 1 deletion ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
# v2.4.22
BUG FIXES
- change `S3_MAX_PARTS_COUNT` default value from `256` to `2000` to fix memory usage for s3 which increased for 2.4.16+

# v2.4.21
BUG FIXES
- refactoring execution UpdateBackupMetrics, to avoid context canceled error, fix [814](https://github.com/Altinity/clickhouse-backup/issues/814)
Expand All @@ -24,7 +28,7 @@ BUG FIXES
# v2.4.16
BUG FIXES
- increase `AZBLOB_TIMEOUT` to 4h, instead 15m to allow download long size data parts
- change `S3_MAX_PARTS_COUNT` from `5000` to `1000` and minimal `S3_PART_SIZE` from 5Mb to 25Mb from by default to improve speedup S3 uploading / downloading
- change `S3_MAX_PARTS_COUNT` default from `5000` to `256` and minimal `S3_PART_SIZE` from 5Mb to 25Mb from by default to improve speedup S3 uploading / downloading

# v2.4.15
BUG FIXES
Expand Down
2 changes: 1 addition & 1 deletion pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -558,7 +558,7 @@ func DefaultConfig() *Config {
StorageClass: string(s3types.StorageClassStandard),
Concurrency: int(downloadConcurrency + 1),
PartSize: 0,
MaxPartsCount: 256,
MaxPartsCount: 2000,
},
GCS: GCSConfig{
CompressionLevel: 1,
Expand Down
11 changes: 5 additions & 6 deletions pkg/storage/gcs.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,14 @@ func (gcs *GCS) Connect(ctx context.Context) error {

if gcs.Config.ForceHttp {
customTransport := &http.Transport{
WriteBufferSize: 8388608,
WriteBufferSize: 128 * 1024,
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: false,
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
Expand All @@ -130,9 +129,9 @@ func (gcs *GCS) Connect(ctx context.Context) error {
}
clientOptions = append(clientOptions, internaloption.WithDefaultEndpoint(endpoint))

customRountripper := &rewriteTransport{base: customTransport}
customRoundTripper := &rewriteTransport{base: customTransport}
gcpTransport, _, err := googleHTTPTransport.NewClient(ctx, clientOptions...)
transport, err := googleHTTPTransport.NewTransport(ctx, customRountripper, clientOptions...)
transport, err := googleHTTPTransport.NewTransport(ctx, customRoundTripper, clientOptions...)
gcpTransport.Transport = transport
if err != nil {
return fmt.Errorf("failed to create GCP transport: %v", err)
Expand Down
6 changes: 3 additions & 3 deletions pkg/storage/general.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (

const (
// BufferSize - size of ring buffer between stream handlers
BufferSize = 512 * 1024
BufferSize = 128 * 1024
)

type readerWrapperForContext func(p []byte) (n int, err error)
Expand Down Expand Up @@ -635,7 +635,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous
if bufferSize <= 0 {
bufferSize = int(cfg.General.MaxFileSize) / cfg.AzureBlob.MaxPartsCount
if int(cfg.General.MaxFileSize)%cfg.AzureBlob.MaxPartsCount > 0 {
bufferSize++
bufferSize += int(cfg.General.MaxFileSize) % cfg.AzureBlob.MaxPartsCount
}
if bufferSize < 2*1024*1024 {
bufferSize = 2 * 1024 * 1024
Expand Down Expand Up @@ -669,7 +669,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous
s3Storage := &S3{
Config: &cfg.S3,
Concurrency: cfg.S3.Concurrency,
BufferSize: 512 * 1024,
BufferSize: 128 * 1024,
PartSize: partSize,
Log: log.WithField("logger", "S3"),
}
Expand Down

0 comments on commit e22d174

Please sign in to comment.