Skip to content

Commit

Permalink
debug TestLongListRemote
Browse files Browse the repository at this point in the history
  • Loading branch information
Slach committed Oct 7, 2024
1 parent 433840d commit f1f6e7d
Show file tree
Hide file tree
Showing 4 changed files with 44 additions and 46 deletions.
6 changes: 5 additions & 1 deletion cmd/clickhouse-backup/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import (
"os"
"strconv"
"strings"
"time"

"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
Expand Down Expand Up @@ -308,7 +309,10 @@ func main() {
UsageText: "clickhouse-backup list [all|local|remote] [latest|previous]",
Action: func(c *cli.Context) error {
b := backup.NewBackuper(config.GetConfigFromCli(c))
return b.List(c.Args().Get(0), c.Args().Get(1))
listStart := time.Now()
err := b.List(c.Args().Get(0), c.Args().Get(1))
log.Info().TimeDiff("listTimeMs", time.Now(), listStart).Send()
return err
},
Flags: cliapp.Flags,
},
Expand Down
24 changes: 8 additions & 16 deletions pkg/backup/list.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,14 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse"
"github.com/Altinity/clickhouse-backup/v2/pkg/custom"
"github.com/Altinity/clickhouse-backup/v2/pkg/metadata"
"github.com/Altinity/clickhouse-backup/v2/pkg/status"
"github.com/Altinity/clickhouse-backup/v2/pkg/storage"
"github.com/Altinity/clickhouse-backup/v2/pkg/utils"
"github.com/ricochet2200/go-disk-usage/du"
"github.com/rs/zerolog/log"
"io"
"os"
"path"
Expand All @@ -13,15 +20,6 @@ import (
"strings"
"text/tabwriter"
"time"

"github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse"
"github.com/Altinity/clickhouse-backup/v2/pkg/custom"
"github.com/Altinity/clickhouse-backup/v2/pkg/metadata"
"github.com/Altinity/clickhouse-backup/v2/pkg/status"
"github.com/Altinity/clickhouse-backup/v2/pkg/storage"
"github.com/Altinity/clickhouse-backup/v2/pkg/utils"

"github.com/rs/zerolog/log"
)

// List - list backups to stdout from command line
Expand Down Expand Up @@ -306,11 +304,9 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error {
// PrintRemoteBackups - print all backups stored on remote storage
func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error {
if !b.ch.IsOpen {
chConnectStart := time.Now()
if err := b.ch.Connect(); err != nil {
return fmt.Errorf("can't connect to clickhouse: %v", err)
}
log.Debug().TimeDiff("chConnectStart", time.Now(), chConnectStart).Send()
defer b.ch.Close()
}
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns)
Expand All @@ -319,15 +315,11 @@ func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error
log.Error().Msgf("can't flush tabular writer error: %v", err)
}
}()
getRemoteBackupsStart := time.Now()
backupList, err := b.GetRemoteBackups(ctx, true)
if err != nil {
return err
}
log.Debug().TimeDiff("b.GetRemoteBackups", time.Now(), getRemoteBackupsStart).Send()
printBackupsRemoteStart := time.Now()
err = printBackupsRemote(w, backupList, format)
log.Debug().TimeDiff("printBackupsRemote", time.Now(), printBackupsRemoteStart).Send()
return err
}

Expand Down Expand Up @@ -370,7 +362,7 @@ func (b *Backuper) GetRemoteBackups(ctx context.Context, parseMetadata bool) ([]
if err := bd.Connect(ctx); err != nil {
return []storage.Backup{}, err
}
log.Debug().TimeDiff("bd.Connect", time.Now(), bdConnectStart).Send()
log.Debug().TimeDiff("bd.Connect", time.Now(), bdConnectStart)
defer func() {
if err := bd.Close(ctx); err != nil {
log.Warn().Msgf("can't close BackupDestination error: %v", err)
Expand Down
18 changes: 5 additions & 13 deletions pkg/storage/general.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,6 @@ func (bd *BackupDestination) RemoveBackupRemote(ctx context.Context, backup Back
}

func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string]Backup, error) {
loadMetadataCacheStart := time.Now()
defer func() {
log.Debug().TimeDiff("bd.loadMetadataCache", time.Now(), loadMetadataCacheStart).Send()
}()

listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind()))
listCache := map[string]Backup{}
if _, err := os.Stat(listCacheFile); os.IsNotExist(err) {
Expand Down Expand Up @@ -165,7 +160,7 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma
func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, parseMetadataOnly string) ([]Backup, error) {
backupListStart := time.Now()
defer func() {
log.Debug().TimeDiff("bd.BackupList", time.Now(), backupListStart).Send()
log.Debug().TimeDiff("bd.BackupList", time.Now(), backupListStart)
}()
result := make([]Backup, 0)
metadataCacheLock.Lock()
Expand All @@ -178,15 +173,12 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool,
parseMetadata = true
}
cacheMiss := false
cacheMissCount := 0
walkStart := time.Now()
err = bd.Walk(ctx, "/", false, func(ctx context.Context, o RemoteFile) error {
backupName := strings.Trim(o.Name(), "/")
if !parseMetadata || (parseMetadataOnly != "" && parseMetadataOnly != backupName) {
if cachedMetadata, isCached := listCache[backupName]; isCached {
result = append(result, cachedMetadata)
} else {
cacheMissCount++
result = append(result, Backup{
BackupMetadata: metadata.BackupMetadata{
BackupName: backupName,
Expand All @@ -199,7 +191,6 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool,
result = append(result, cachedMetadata)
return nil
}
cacheMissCount++
mf, err := bd.StatFile(ctx, path.Join(o.Name(), "metadata.json"))
if err != nil {
brokenBackup := Backup{
Expand Down Expand Up @@ -260,16 +251,13 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool,
if err != nil {
log.Warn().Msgf("BackupList bd.Walk return error: %v", err)
}
log.Debug().TimeDiff("bd.Walk", time.Now(), walkStart).Int("cacheMissCount", cacheMissCount).Send()
sortStart := time.Now()
// sort by name for the same not parsed metadata.json
sort.SliceStable(result, func(i, j int) bool {
return result[i].BackupName < result[j].BackupName
})
sort.SliceStable(result, func(i, j int) bool {
return result[i].UploadDate.Before(result[j].UploadDate)
})
log.Debug().TimeDiff("sort.SliceStable", time.Now(), sortStart).Send()
if cacheMiss || len(result) < len(listCache) {
if err = bd.saveMetadataCache(ctx, listCache, result); err != nil {
return nil, fmt.Errorf("bd.saveMetadataCache return error: %v", err)
Expand Down Expand Up @@ -542,6 +530,10 @@ func (bd *BackupDestination) throttleSpeed(startTime time.Time, size int64, maxS
}

func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, backupName string) (*BackupDestination, error) {
newBackupDestinationStart := time.Now()
defer func() {
log.Debug().TimeDiff("NewBackupDestination", time.Now(), newBackupDestinationStart)
}()
var err error
switch cfg.General.RemoteStorage {
case "azblob":
Expand Down
42 changes: 26 additions & 16 deletions test/integration/integration_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -505,6 +505,8 @@ func (env *TestEnvironment) Cleanup(t *testing.T, r *require.Assertions) {

}

var listTimeMsRE = regexp.MustCompile(`listTimeMs=(\d+)`)

// TestLongListRemote - no parallel, cause need to restart minio
func TestLongListRemote(t *testing.T) {
env, r := NewTestEnvironment(t)
Expand All @@ -519,32 +521,40 @@ func TestLongListRemote(t *testing.T) {
r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...))
time.Sleep(2 * time.Second)

startFirst := time.Now()
var err error
var out string
extractListTimeMs := func(out string) float64 {
r.Contains(out, "listTimeMs")
matches := listTimeMsRE.FindStringSubmatch(out)
r.True(len(matches) > 0)
result, parseErr := strconv.ParseFloat(matches[1], 64)
r.NoError(parseErr)
log.Debug().Msg(out)
return result
}
env.DockerExecNoError(r, "clickhouse-backup", "rm", "-rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")
noCacheDuration := time.Since(startFirst)
out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")
r.NoError(err)
noCacheDuration := extractListTimeMs(out)

env.DockerExecNoError(r, "clickhouse-backup", "chmod", "-Rv", "+r", "/tmp/.clickhouse-backup-metadata.cache.S3")

r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...))
time.Sleep(2 * time.Second)
//r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...))

startCashed := time.Now()
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")
cachedDuration := time.Since(startCashed)
out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")
r.NoError(err)
cachedDuration := extractListTimeMs(out)

r.Greater(noCacheDuration, cachedDuration, "noCacheDuration=%s shall be greater cachedDuration=%s", noCacheDuration, cachedDuration)
r.Greater(noCacheDuration, cachedDuration, "noCacheDuration=%f shall be greater cachedDuration=%f", noCacheDuration, cachedDuration)

r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...))
time.Sleep(2 * time.Second)
//r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "restart", "minio")...))

startCacheClear := time.Now()
env.DockerExecNoError(r, "clickhouse-backup", "rm", "-Rfv", "/tmp/.clickhouse-backup-metadata.cache.S3")
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")
cacheClearDuration := time.Since(startCacheClear)
out, err = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "list", "remote")
cacheClearDuration := extractListTimeMs(out)

r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s shall be greater cachedDuration=%s", cacheClearDuration.String(), cachedDuration.String())
log.Debug().Msgf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cachedDuration.String(), cacheClearDuration.String())
r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s ms shall be greater cachedDuration=%s ms", cacheClearDuration, cachedDuration)
log.Debug().Msgf("noCacheDuration=%f cachedDuration=%f cacheClearDuration=%f", noCacheDuration, cachedDuration, cacheClearDuration)

testListRemoteAllBackups := make([]string, totalCacheCount)
for i := 0; i < totalCacheCount; i++ {
Expand Down

0 comments on commit f1f6e7d

Please sign in to comment.