diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index ea12d0f9..484c66e3 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -7,6 +7,7 @@ import ( "os" "strconv" "strings" + "time" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -308,7 +309,10 @@ func main() { UsageText: "clickhouse-backup list [all|local|remote] [latest|previous]", Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) - return b.List(c.Args().Get(0), c.Args().Get(1)) + listStart := time.Now() + err := b.List(c.Args().Get(0), c.Args().Get(1)) + log.Info().TimeDiff("listTimeMs", time.Now(), listStart).Send() + return err }, Flags: cliapp.Flags, }, diff --git a/pkg/backup/list.go b/pkg/backup/list.go index 9646512c..f7c01711 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -4,7 +4,14 @@ import ( "context" "encoding/json" "fmt" + "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" + "github.com/Altinity/clickhouse-backup/v2/pkg/custom" + "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" + "github.com/Altinity/clickhouse-backup/v2/pkg/status" + "github.com/Altinity/clickhouse-backup/v2/pkg/storage" + "github.com/Altinity/clickhouse-backup/v2/pkg/utils" "github.com/ricochet2200/go-disk-usage/du" + "github.com/rs/zerolog/log" "io" "os" "path" @@ -13,15 +20,6 @@ import ( "strings" "text/tabwriter" "time" - - "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" - "github.com/Altinity/clickhouse-backup/v2/pkg/custom" - "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" - "github.com/Altinity/clickhouse-backup/v2/pkg/status" - "github.com/Altinity/clickhouse-backup/v2/pkg/storage" - "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - - "github.com/rs/zerolog/log" ) // List - list backups to stdout from command line @@ -306,11 +304,9 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { // PrintRemoteBackups - print all backups stored on remote storage func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error { if !b.ch.IsOpen { - chConnectStart := time.Now() if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } - log.Debug().TimeDiff("chConnectStart", time.Now(), chConnectStart).Send() defer b.ch.Close() } w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) @@ -319,15 +315,11 @@ func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error log.Error().Msgf("can't flush tabular writer error: %v", err) } }() - getRemoteBackupsStart := time.Now() backupList, err := b.GetRemoteBackups(ctx, true) if err != nil { return err } - log.Debug().TimeDiff("b.GetRemoteBackups", time.Now(), getRemoteBackupsStart).Send() - printBackupsRemoteStart := time.Now() err = printBackupsRemote(w, backupList, format) - log.Debug().TimeDiff("printBackupsRemote", time.Now(), printBackupsRemoteStart).Send() return err } @@ -370,7 +362,7 @@ func (b *Backuper) GetRemoteBackups(ctx context.Context, parseMetadata bool) ([] if err := bd.Connect(ctx); err != nil { return []storage.Backup{}, err } - log.Debug().TimeDiff("bd.Connect", time.Now(), bdConnectStart).Send() + log.Debug().TimeDiff("bd.Connect", time.Now(), bdConnectStart) defer func() { if err := bd.Close(ctx); err != nil { log.Warn().Msgf("can't close BackupDestination error: %v", err) diff --git a/pkg/storage/general.go b/pkg/storage/general.go index 415b4915..0ae10e9e 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -75,11 +75,6 @@ func (bd *BackupDestination) RemoveBackupRemote(ctx context.Context, backup Back } func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string]Backup, error) { - loadMetadataCacheStart := time.Now() - defer func() { - log.Debug().TimeDiff("bd.loadMetadataCache", time.Now(), loadMetadataCacheStart).Send() - }() - listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) listCache := map[string]Backup{} if _, err := os.Stat(listCacheFile); os.IsNotExist(err) { @@ -165,7 +160,7 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, parseMetadataOnly string) ([]Backup, error) { backupListStart := time.Now() defer func() { - log.Debug().TimeDiff("bd.BackupList", time.Now(), backupListStart).Send() + log.Debug().TimeDiff("bd.BackupList", time.Now(), backupListStart) }() result := make([]Backup, 0) metadataCacheLock.Lock() @@ -178,15 +173,12 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, parseMetadata = true } cacheMiss := false - cacheMissCount := 0 - walkStart := time.Now() err = bd.Walk(ctx, "/", false, func(ctx context.Context, o RemoteFile) error { backupName := strings.Trim(o.Name(), "/") if !parseMetadata || (parseMetadataOnly != "" && parseMetadataOnly != backupName) { if cachedMetadata, isCached := listCache[backupName]; isCached { result = append(result, cachedMetadata) } else { - cacheMissCount++ result = append(result, Backup{ BackupMetadata: metadata.BackupMetadata{ BackupName: backupName, @@ -199,7 +191,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, result = append(result, cachedMetadata) return nil } - cacheMissCount++ mf, err := bd.StatFile(ctx, path.Join(o.Name(), "metadata.json")) if err != nil { brokenBackup := Backup{ @@ -260,8 +251,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, if err != nil { log.Warn().Msgf("BackupList bd.Walk return error: %v", err) } - log.Debug().TimeDiff("bd.Walk", time.Now(), walkStart).Int("cacheMissCount", cacheMissCount).Send() - sortStart := time.Now() // sort by name for the same not parsed metadata.json sort.SliceStable(result, func(i, j int) bool { return result[i].BackupName < result[j].BackupName @@ -269,7 +258,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, sort.SliceStable(result, func(i, j int) bool { return result[i].UploadDate.Before(result[j].UploadDate) }) - log.Debug().TimeDiff("sort.SliceStable", time.Now(), sortStart).Send() if cacheMiss || len(result) < len(listCache) { if err = bd.saveMetadataCache(ctx, listCache, result); err != nil { return nil, fmt.Errorf("bd.saveMetadataCache return error: %v", err) @@ -542,6 +530,10 @@ func (bd *BackupDestination) throttleSpeed(startTime time.Time, size int64, maxS } func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, backupName string) (*BackupDestination, error) { + newBackupDestinationStart := time.Now() + defer func() { + log.Debug().TimeDiff("NewBackupDestination", time.Now(), newBackupDestinationStart) + }() var err error switch cfg.General.RemoteStorage { case "azblob": diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index a1b62806..87ca1f73 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -505,6 +505,8 @@ func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) { } +var listTimeMsRE = regexp.MustCompile(`listTimeMs=(\d+)`) + // TestLongListRemote - no parallel, cause need to restart minio func TestLongListRemote(t *testing.T) { env, r := NewTestEnvironment(t) @@ -519,32 +521,40 @@ func TestLongListRemote(t *testing.T) { r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) time.Sleep(2 * time.Second) - startFirst := time.Now() + var err error + var out string + extractListTimeMs := func(out string) float64 { + r.Contains(out, "listTimeMs") + matches := listTimeMsRE.FindStringSubmatch(out) + r.True(len(matches) > 0) + result, parseErr := strconv.ParseFloat(matches[1], 64) + r.NoError(parseErr) + log.Debug().Msg(out) + return result + } env.DockerExecNoError(r, "clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") - noCacheDuration := time.Since(startFirst) + out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") + r.NoError(err) + noCacheDuration := extractListTimeMs(out) env.DockerExecNoError(r, "clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3") - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) - time.Sleep(2 * time.Second) + //r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) - startCashed := time.Now() - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") - cachedDuration := time.Since(startCashed) + out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") + r.NoError(err) + cachedDuration := extractListTimeMs(out) - r.Greater(noCacheDuration, cachedDuration, "noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration) + r.Greater(noCacheDuration, cachedDuration, "noCacheDuration=%f shall be greater cachedDuration=%f", noCacheDuration, cachedDuration) - r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) - time.Sleep(2 * time.Second) + //r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...)) - startCacheClear := time.Now() env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3") - env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") - cacheClearDuration := time.Since(startCacheClear) + out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote") + cacheClearDuration := extractListTimeMs(out) - r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s shall be greater cachedDuration=%s", cacheClearDuration.String(), cachedDuration.String()) - log.Debug().Msgf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cachedDuration.String(), cacheClearDuration.String()) + r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s ms shall be greater cachedDuration=%s ms", cacheClearDuration, cachedDuration) + log.Debug().Msgf("noCacheDuration=%f cachedDuration=%f cacheClearDuration=%f", noCacheDuration, cachedDuration, cacheClearDuration) testListRemoteAllBackups := make([]string, totalCacheCount) for i := 0; i < totalCacheCount; i++ {