diff --git a/ChangeLog.md b/ChangeLog.md index 85bce1a2..e5da36c4 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,16 +1,17 @@ -# v2.5.0 (not released yet) +# v2.4.3 IMPROVEMENTS - add `list` command to API /backup/actions, fix [772](https://github.com/Altinity/clickhouse-backup/issues/772) BUG FIXES -- fix behavior for `restore_as_attach: true` for non-replicated MergeTree fix [773](https://github.com/Altinity/clickhouse-backup/issues/773) +- fix behavior for `restore_as_attach: true` for non-replicated MergeTree, fix [773](https://github.com/Altinity/clickhouse-backup/issues/773) +- tables with `ENGINE=Dictionary` shall create after all `dictionaries` to avoid retry, fix [771](https://github.com/Altinity/clickhouse-backup/issues/771) # v2.4.2 IMPROVEMENTS - add `cpu_nice_priority` and `io_nice_priority` to config, which allow us to throttle CPU and IO usage for the whole `clickhouse-backup` process, fix [757](https://github.com/Altinity/clickhouse-backup/issues/757) BUG FIXES -- fix restore for object disk frozen_metadata.txt fix [752](https://github.com/Altinity/clickhouse-backup/issues/752) +- fix restore for object disk frozen_metadata.txt, fix [752](https://github.com/Altinity/clickhouse-backup/issues/752) - fix more corner cases for `check_parts_columns: true`, fix [747](https://github.com/Altinity/clickhouse-backup/issues/747) - fix applying macros to s3 endpoint in object disk during restore embedded backups, fix [750](https://github.com/Altinity/clickhouse-backup/issues/750) - rewrite GCS clients pool, set default GCS_CLIENT_POOL_SIZE as max(upload_concurrency, download_concurrency) * 3 to avoid stuck, fix [753](https://github.com/Altinity/clickhouse-backup/pull/753), thanks @minguyen-jumptrading diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index a71a247b..95fa96ec 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -107,8 +107,8 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } } - // do not create UDF when use --data flag, https://github.com/Altinity/clickhouse-backup/issues/697 - if schemaOnly || (schemaOnly == dataOnly) { + // do not create UDF when use --data, --rbac-only, --configs-only flags, https://github.com/Altinity/clickhouse-backup/issues/697 + if schemaOnly || (schemaOnly == dataOnly && !rbacOnly && !configsOnly) { for _, function := range backupMetadata.Functions { if err = b.ch.CreateUserDefinedFunction(function.Name, function.CreateQuery, b.cfg.General.RestoreSchemaOnCluster); err != nil { return err diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index 3e4ed9c9..ae4a9aa7 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -416,8 +416,15 @@ func getTableListByPatternRemote(ctx context.Context, b *Backuper, remoteBackupM return result, nil } +var lowPriorityEnginesRE = regexp.MustCompile(`ENGINE = (Distributed|Dictionary|Merge)\(`) +var streamingEnginesRE = regexp.MustCompile(`ENGINE = (Kafka|NATS|RabbitMQ|S3Queue)`) + func getOrderByEngine(query string, dropTable bool) int64 { - if strings.Contains(query, "ENGINE = Distributed") || strings.Contains(query, "ENGINE = Kafka") || strings.Contains(query, "ENGINE = RabbitMQ") { + if lowPriorityEnginesRE.MatchString(query) { + return 5 + } + + if streamingEnginesRE.MatchString(query) { return 4 } if strings.HasPrefix(query, "CREATE DICTIONARY") {