From d0f5b19876946aca4c03079aee7a6255370f7dc9 Mon Sep 17 00:00:00 2001 From: Slach Date: Mon, 12 Jun 2023 12:08:49 +0400 Subject: [PATCH 01/21] migrate from `apex/log` to `rs/zerolog`, fix RaceConditions, fix https://github.com/Altinity/clickhouse-backup/issues/624,see details https://github.com/apex/log/issues/103 --- ChangeLog.md | 1 + cmd/clickhouse-backup/main.go | 17 ++- go.mod | 6 +- go.sum | 52 ++----- pkg/backup/backuper.go | 4 - pkg/backup/create.go | 91 +++++------ pkg/backup/delete.go | 33 ++-- pkg/backup/download.go | 166 ++++++++++---------- pkg/backup/list.go | 40 ++--- pkg/backup/restore.go | 91 ++++++----- pkg/backup/table_pattern.go | 13 +- pkg/backup/upload.go | 76 ++++----- pkg/backup/watch.go | 19 +-- pkg/clickhouse/clickhouse.go | 78 +++++----- pkg/config/config.go | 11 +- pkg/custom/delete_custom.go | 10 +- pkg/custom/download_custom.go | 16 +- pkg/custom/list_custom.go | 16 +- pkg/custom/upload_custom.go | 16 +- pkg/custom/utils.go | 8 +- pkg/filesystemhelper/filesystemhelper.go | 38 ++--- pkg/log_helper/log_level.go | 22 +++ pkg/logcli/cli.go | 63 -------- pkg/logfmt/logfmt.go | 43 ------ pkg/logfmt/logfmt_test.go | 44 ------ pkg/metadata/load.go | 6 +- pkg/resumable/state.go | 24 +-- pkg/server/metrics/metrics.go | 24 +-- pkg/server/server.go | 149 +++++++++--------- pkg/server/utils.go | 12 +- pkg/status/status.go | 28 ++-- pkg/storage/ftp.go | 22 +-- pkg/storage/gcs.go | 10 +- pkg/storage/general.go | 115 +++++++------- pkg/storage/s3.go | 25 ++- pkg/storage/sftp.go | 14 +- pkg/storage/utils.go | 6 +- pkg/utils/utils.go | 10 +- test/integration/integration_test.go | 186 ++++++++++++----------- 39 files changed, 730 insertions(+), 875 deletions(-) create mode 100644 pkg/log_helper/log_level.go delete mode 100644 pkg/logcli/cli.go delete mode 100644 pkg/logfmt/logfmt.go delete mode 100644 pkg/logfmt/logfmt_test.go diff --git a/ChangeLog.md b/ChangeLog.md index fcaf3502..997e474b 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -2,6 +2,7 @@ IMPROVEMENTS - migrate to `clickhouse-go/v2`, fix [540](https://github.com/Altinity/clickhouse-backup/issues/540), close [562](https://github.com/Altinity/clickhouse-backup/pull/562) - add documentation for `AWS_ARN_ROLE` and `AWS_WEB_IDENTITY_TOKEN_FILE`, fix [563](https://github.com/Altinity/clickhouse-backup/issues/563) +- migrate from `apex/log` to `rs/zerolog`, fix RaceConditions, fix [624](https://github.com/Altinity/clickhouse-backup/issues/624),see details https://github.com/apex/log/issues/103 BUG FIXES - add `FTP_ADDRESS` and `SFTP_PORT` in Default config Readme.md section fix [668](https://github.com/Altinity/clickhouse-backup/issues/668) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 9d1c1cd5..7b4b5c75 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -3,16 +3,19 @@ package main import ( "context" "fmt" + stdlog "log" "os" + "time" "github.com/Altinity/clickhouse-backup/pkg/config" - "github.com/Altinity/clickhouse-backup/pkg/logcli" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/Altinity/clickhouse-backup/pkg/backup" "github.com/Altinity/clickhouse-backup/pkg/server" - "github.com/apex/log" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/rs/zerolog/pkgerrors" "github.com/urfave/cli" ) @@ -23,7 +26,9 @@ var ( ) func main() { - log.SetHandler(logcli.New(os.Stdout)) + log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: time.StampMilli}) + stdlog.SetOutput(log.Logger) + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack cliapp := cli.NewApp() cliapp.Name = "clickhouse-backup" cliapp.Usage = "Tool for easy backup of ClickHouse with cloud support" @@ -414,11 +419,11 @@ func main() { Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) if c.Args().Get(1) == "" { - log.Errorf("Backup name must be defined") + log.Err(fmt.Errorf("Backup name must be defined")).Send() cli.ShowCommandHelpAndExit(c, c.Command.Name, 1) } if c.Args().Get(0) != "local" && c.Args().Get(0) != "remote" { - log.Errorf("Unknown command '%s'\n", c.Args().Get(0)) + log.Err(fmt.Errorf("Unknown command '%s'\n", c.Args().Get(0))).Send() cli.ShowCommandHelpAndExit(c, c.Command.Name, 1) } return b.Delete(c.Args().Get(0), c.Args().Get(1), c.Int("command-id")) @@ -553,6 +558,6 @@ func main() { }, } if err := cliapp.Run(os.Args); err != nil { - log.Fatal(err.Error()) + log.Fatal().Err(err).Send() } } diff --git a/go.mod b/go.mod index aac3df8a..da007da8 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/Azure/go-autorest/autorest v0.11.28 github.com/Azure/go-autorest/autorest/adal v0.9.21 github.com/ClickHouse/clickhouse-go/v2 v2.10.1 - github.com/apex/log v1.9.0 github.com/aws/aws-sdk-go-v2 v1.17.6 github.com/aws/aws-sdk-go-v2/config v1.18.16 github.com/aws/aws-sdk-go-v2/credentials v1.13.16 @@ -18,7 +17,6 @@ require ( github.com/djherbis/buffer v1.2.0 github.com/djherbis/nio/v3 v3.0.1 github.com/eapache/go-resiliency v1.3.0 - github.com/go-logfmt/logfmt v0.5.1 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 @@ -32,6 +30,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.5 github.com/prometheus/client_golang v1.14.0 + github.com/rs/zerolog v1.29.1 github.com/stretchr/testify v1.8.4 github.com/tencentyun/cos-go-sdk-v5 v0.7.41 github.com/urfave/cli v1.22.10 @@ -76,6 +75,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect + github.com/fatih/color v1.7.0 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.6.1 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect @@ -92,7 +92,9 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-ieproxy v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect diff --git a/go.sum b/go.sum index 29ca6afb..c5b75a0b 100644 --- a/go.sum +++ b/go.sum @@ -77,12 +77,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= -github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= -github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= -github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= -github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= -github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= @@ -121,7 +115,6 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.18.6 h1:rIFn5J3yDoeuKCE9sESXqM5POTAh github.com/aws/aws-sdk-go-v2/service/sts v1.18.6/go.mod h1:48WJ9l3dwP0GSHWGc5sFGGlCkuA82Mc2xnw+T6Q8aDw= github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -149,6 +142,7 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -176,7 +170,6 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= @@ -192,9 +185,9 @@ github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= @@ -292,18 +285,15 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jlaffaye/ftp v0.1.0 h1:DLGExl5nBoSFoNshAUHwXAezXwXBvFdx7/qwhucWNSE= github.com/jlaffaye/ftp v0.1.0/go.mod h1:hhq4G4crv+nW2qXtNYcuzLeOudG92Ps37HEKeg2e3lE= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jolestar/go-commons-pool/v2 v2.1.2 h1:E+XGo58F23t7HtZiC/W6jzO2Ux2IccSH/yx4nD+J1CM= github.com/jolestar/go-commons-pool/v2 v2.1.2/go.mod h1:r4NYccrkS5UqP1YQI1COyTZ9UjPJAAGTUxzcsK1kqhY= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -330,21 +320,18 @@ github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.9 h1:RvVbLiMv/Hbjf1gRaC2AQyzwbdVhdId7D2vPnXIml4k= github.com/mattn/go-ieproxy v0.0.9/go.mod h1:eF30/rfdQUO9EnzNIZQr0r9HiLMlZNCpJkHbmMuOAE0= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= @@ -352,7 +339,6 @@ github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lL github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archiver/v4 v4.0.0-alpha.8 h1:tRGQuDVPh66WCOelqe6LIGh0gwmfwxUrSSDunscGsRM= github.com/mholt/archiver/v4 v4.0.0-alpha.8/go.mod h1:5f7FUYGXdJWUjESffJaYR4R60VhnHxb2X3T1teMyv5A= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -371,8 +357,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk= github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/otiai10/copy v1.9.0 h1:7KFNiCgZ91Ru4qW4CWPf/7jqtxLagGRmIxWldPP9VY4= github.com/otiai10/copy v1.9.0/go.mod h1:hsfX19wcn0UWIHUQ3/4fHuehhk2UyArQ9dVFAn3FczI= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -422,26 +406,24 @@ github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0ua github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.29.1 h1:cO+d60CHkknCbvzEWxP0S9K6KqyTjrCNUy1LdQLCGPc= +github.com/rs/zerolog v1.29.1/go.mod h1:Le6ESbR7hc+DP6Lt1THiV8CQSdkkNrd3R0XbEgp3ZBU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= -github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -464,13 +446,6 @@ github.com/tencentyun/cos-go-sdk-v5 v0.7.41/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7y github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= -github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= -github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= -github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= -github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= -github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= -github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -504,7 +479,6 @@ go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQP go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -553,7 +527,6 @@ golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -619,10 +592,8 @@ golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -657,6 +628,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -837,8 +810,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -849,7 +820,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index f86f42ed..b3e9d26b 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -7,7 +7,6 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/resumable" "github.com/Altinity/clickhouse-backup/pkg/storage" - apexLog "github.com/apex/log" "path" ) @@ -15,7 +14,6 @@ type Backuper struct { cfg *config.Config ch *clickhouse.ClickHouse dst *storage.BackupDestination - log *apexLog.Entry DiskToPathMap map[string]string DefaultDataPath string EmbeddedBackupDataPath string @@ -27,12 +25,10 @@ type Backuper struct { func NewBackuper(cfg *config.Config) *Backuper { ch := &clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Log: apexLog.WithField("logger", "clickhouse"), } return &Backuper{ cfg: cfg, ch: ch, - log: apexLog.WithField("logger", "backuper"), } } diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 25bef361..c492679e 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "github.com/Altinity/clickhouse-backup/pkg/status" + "github.com/rs/zerolog" "os" "path" "path/filepath" @@ -17,9 +18,9 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/filesystemhelper" "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/Altinity/clickhouse-backup/pkg/utils" - apexLog "github.com/apex/log" "github.com/google/uuid" recursiveCopy "github.com/otiai10/copy" + "github.com/rs/zerolog/log" ) const ( @@ -49,7 +50,7 @@ func addTable(tables []clickhouse.Table, table clickhouse.Table) []clickhouse.Ta } func filterTablesByPattern(tables []clickhouse.Table, tablePattern string) []clickhouse.Table { - log := apexLog.WithField("logger", "filterTablesByPattern") + logger := log.With().Str("logger", "filterTablesByPattern").Logger() if tablePattern == "" { return tables } @@ -61,7 +62,7 @@ func filterTablesByPattern(tables []clickhouse.Table, tablePattern string) []cli if matched, _ := filepath.Match(strings.Trim(pattern, " \t\n\r"), tableName); matched { result = addTable(result, t) } else { - log.Debugf("%s not matched with %s", tableName, pattern) + logger.Debug().Msgf("%s not matched with %s", tableName, pattern) } } } @@ -89,10 +90,10 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st backupName = NewBackupName() } backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") - log := b.log.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "create", - }) + }).Logger() if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -139,9 +140,9 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st partitionsToBackupMap, partitions := filesystemhelper.CreatePartitionsToBackupMap(ctx, b.ch, tables, nil, partitions) // create if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitions, partitionsToBackupMap, schemaOnly, rbacOnly, configsOnly, tables, allDatabases, allFunctions, disks, diskMap, log, startBackup, version) + err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitions, partitionsToBackupMap, schemaOnly, rbacOnly, configsOnly, tables, allDatabases, allFunctions, disks, diskMap, logger, startBackup, version) } else { - err = b.createBackupLocal(ctx, backupName, partitionsToBackupMap, tables, doBackupData, schemaOnly, rbacOnly, configsOnly, version, disks, diskMap, allDatabases, allFunctions, log, startBackup) + err = b.createBackupLocal(ctx, backupName, partitionsToBackupMap, tables, doBackupData, schemaOnly, rbacOnly, configsOnly, version, disks, diskMap, allDatabases, allFunctions, logger, startBackup) } if err != nil { return err @@ -154,7 +155,7 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st return nil } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsToBackupMap common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, rbacOnly bool, configsOnly bool, version string, disks []clickhouse.Disk, diskMap map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry, startBackup time.Time) error { +func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsToBackupMap common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, rbacOnly bool, configsOnly bool, version string, disks []clickhouse.Disk, diskMap map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, logger zerolog.Logger, startBackup time.Time) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { @@ -171,7 +172,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } if _, err := os.Stat(backupPath); os.IsNotExist(err) { if err = filesystemhelper.Mkdir(backupPath, b.ch, disks); err != nil { - log.Errorf("can't create directory %s: %v", backupPath, err) + logger.Error().Msgf("can't create directory %s: %v", backupPath, err) return err } } @@ -183,24 +184,24 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par case <-ctx.Done(): return ctx.Err() default: - log := log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Name)) + tableLog := logger.With().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Logger() if table.Skip { continue } var realSize map[string]int64 var disksToPartsMap map[string][]metadata.Part if doBackupData { - log.Debug("create data") + tableLog.Debug().Msg("create data") shadowBackupUUID := strings.ReplaceAll(uuid.New().String(), "-", "") disksToPartsMap, realSize, err = b.AddTableToBackup(ctx, backupName, shadowBackupUUID, disks, &table, partitionsToBackupMap) if err != nil { - log.Error(err.Error()) + tableLog.Error().Msg(err.Error()) if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - log.Error(removeBackupErr.Error()) + tableLog.Error().Msg(removeBackupErr.Error()) } // fix corner cases after https://github.com/Altinity/clickhouse-backup/issues/379 if cleanShadowErr := b.Clean(ctx); cleanShadowErr != nil { - log.Error(cleanShadowErr.Error()) + tableLog.Error().Msg(cleanShadowErr.Error()) } return err } @@ -210,19 +211,19 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } } // https://github.com/Altinity/clickhouse-backup/issues/529 - log.Debug("get in progress mutations list") + tableLog.Debug().Msg("get in progress mutations list") inProgressMutations := make([]metadata.MutationMetadata, 0) if b.cfg.ClickHouse.BackupMutations { inProgressMutations, err = b.ch.GetInProgressMutations(ctx, table.Database, table.Name) if err != nil { - log.Error(err.Error()) + tableLog.Error().Msg(err.Error()) if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - log.Error(removeBackupErr.Error()) + tableLog.Error().Msg(removeBackupErr.Error()) } return err } } - log.Debug("create metadata") + tableLog.Debug().Msg("create metadata") metadataSize, err := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ Table: table.Name, @@ -236,7 +237,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par }, disks) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - log.Error(removeBackupErr.Error()) + tableLog.Error().Msg(removeBackupErr.Error()) } return err } @@ -245,35 +246,35 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par Database: table.Database, Table: table.Name, }) - log.Infof("done") + tableLog.Info().Msgf("done") } } backupRBACSize, backupConfigSize := uint64(0), uint64(0) if rbacOnly { if backupRBACSize, err = b.createRBACBackup(ctx, backupPath, disks); err != nil { - log.Errorf("error during do RBAC backup: %v", err) + logger.Error().Msgf("error during do RBAC backup: %v", err) } else { - log.WithField("size", utils.FormatBytes(backupRBACSize)).Info("done createRBACBackup") + logger.Info().Str("size", utils.FormatBytes(backupRBACSize)).Msg("done createRBACBackup") } } if configsOnly { if backupConfigSize, err = b.createConfigBackup(ctx, backupPath); err != nil { - log.Errorf("error during do CONFIG backup: %v", err) + logger.Error().Msgf("error during do CONFIG backup: %v", err) } else { - log.WithField("size", utils.FormatBytes(backupConfigSize)).Info("done createConfigBackup") + logger.Info().Str("size", utils.FormatBytes(backupConfigSize)).Msg("done createConfigBackup") } } backupMetaFile := path.Join(defaultPath, "backup", backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, version, "regular", diskMap, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, log); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, version, "regular", diskMap, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, logger); err != nil { return err } - log.WithField("duration", utils.HumanizeDuration(time.Since(startBackup))).Info("done") + logger.Info().Str("duration", utils.HumanizeDuration(time.Since(startBackup))).Msg("done") return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitions []string, partitionsToBackupMap common.EmptyMap, schemaOnly, rbacOnly, configsOnly bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap map[string]string, log *apexLog.Entry, startBackup time.Time, backupVersion string) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitions []string, partitionsToBackupMap common.EmptyMap, schemaOnly, rbacOnly, configsOnly bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap map[string]string, logger zerolog.Logger, startBackup time.Time, backupVersion string) error { if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists { return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) } @@ -353,7 +354,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa }{Size: 0}) } - log.Debug("calculate parts list from embedded backup disk") + logger.Debug().Msg("calculate parts list from embedded backup disk") for _, table := range tables { select { case <-ctx.Done(): @@ -365,7 +366,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa disksToPartsMap, err := b.getPartsFromBackupDisk(backupPath, table, partitionsToBackupMap) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - log.Error(removeBackupErr.Error()) + logger.Error().Msg(removeBackupErr.Error()) } return err } @@ -380,7 +381,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa }, disks) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - log.Error(removeBackupErr.Error()) + logger.Error().Msg(removeBackupErr.Error()) } return err } @@ -388,14 +389,14 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa } } backupMetaFile := path.Join(diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk], backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions, log); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions, logger); err != nil { return err } - log.WithFields(apexLog.Fields{ + logger.Info().Fields(map[string]interface{}{ "operation": "create_embedded", "duration": utils.HumanizeDuration(time.Since(startBackup)), - }).Info("done") + }).Msg("done") return nil } @@ -437,14 +438,14 @@ func (b *Backuper) getPartsFromBackupDisk(backupPath string, table clickhouse.Ta } func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (uint64, error) { - log := b.log.WithField("logger", "createConfigBackup") + logger := log.With().Str("logger", "createConfigBackup").Logger() select { case <-ctx.Done(): return 0, ctx.Err() default: backupConfigSize := uint64(0) configBackupPath := path.Join(backupPath, "configs") - log.Debugf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) + logger.Debug().Msgf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) copyErr := recursiveCopy.Copy(b.cfg.ClickHouse.ConfigDir, configBackupPath, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { backupConfigSize += uint64(srcinfo.Size()) @@ -456,7 +457,7 @@ func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (u } func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disks []clickhouse.Disk) (uint64, error) { - log := b.log.WithField("logger", "createRBACBackup") + logger := log.With().Str("logger", "createRBACBackup").Logger() select { case <-ctx.Done(): return 0, ctx.Err() @@ -467,7 +468,7 @@ func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disk if err != nil { return 0, err } - log.Debugf("copy %s -> %s", accessPath, rbacBackup) + logger.Debug().Msgf("copy %s -> %s", accessPath, rbacBackup) copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { rbacDataSize += uint64(srcinfo.Size()) @@ -479,17 +480,17 @@ func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disk } func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsToBackupMap common.EmptyMap) (map[string][]metadata.Part, map[string]int64, error) { - log := b.log.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "create", "table": fmt.Sprintf("%s.%s", table.Database, table.Name), - }) + }).Logger() if backupName == "" { return nil, nil, fmt.Errorf("backupName is not defined") } if !strings.HasSuffix(table.Engine, "MergeTree") && table.Engine != "MaterializedMySQL" && table.Engine != "MaterializedPostgreSQL" { - log.WithField("engine", table.Engine).Warnf("supports only schema backup") + logger.Warn().Str("engine", table.Engine).Msg("supports only schema backup") return nil, nil, nil } if b.cfg.ClickHouse.CheckPartsColumns { @@ -501,7 +502,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku if err := b.ch.FreezeTable(ctx, table, shadowBackupUUID); err != nil { return nil, nil, err } - log.Debug("frozen") + logger.Debug().Msg("frozen") version, err := b.ch.GetVersion(ctx) if err != nil { return nil, nil, err @@ -530,7 +531,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku } realSize[disk.Name] = size disksToPartsMap[disk.Name] = parts - log.WithField("disk", disk.Name).Debug("shadow moved") + logger.Debug().Str("disk", disk.Name).Msg("shadow moved") // Clean all the files under the shadowPath, cause UNFREEZE unavailable if version < 21004000 { if err := os.RemoveAll(shadowPath); err != nil { @@ -545,11 +546,11 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku return disksToPartsMap, realSize, err } } - log.Debug("done") + logger.Debug().Msg("done") return disksToPartsMap, realSize, nil } -func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, version, tags string, diskMap map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry) error { +func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, version, tags string, diskMap map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, logger zerolog.Logger) error { select { case <-ctx.Done(): return ctx.Err() @@ -585,7 +586,7 @@ func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, bac return err } if err := filesystemhelper.Chown(backupMetaFile, b.ch, disks, false); err != nil { - log.Warnf("can't chown %s: %v", backupMetaFile, err) + logger.Warn().Msgf("can't chown %s: %v", backupMetaFile, err) } return nil } diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 156b22e3..a457a04d 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -13,13 +13,12 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/clickhouse" "github.com/Altinity/clickhouse-backup/pkg/storage" - - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" ) // Clean - removed all data in shadow folder func (b *Backuper) Clean(ctx context.Context) error { - log := b.log.WithField("logger", "Clean") + logger := log.With().Str("logger", "Clean").Logger() if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -37,7 +36,7 @@ func (b *Backuper) Clean(ctx context.Context) error { if err := b.cleanDir(shadowDir); err != nil { return fmt.Errorf("can't clean '%s': %v", shadowDir, err) } - log.Info(shadowDir) + logger.Info().Msg(shadowDir) } return nil } @@ -96,7 +95,7 @@ func (b *Backuper) RemoveOldBackupsLocal(ctx context.Context, keepLastBackup boo } func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, disks []clickhouse.Disk) error { - log := b.log.WithField("logger", "RemoveBackupLocal") + logger := log.With().Str("logger", "RemoveBackupLocal").Logger() var err error start := time.Now() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") @@ -121,17 +120,17 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis if disk.IsBackup { backupPath = path.Join(disk.Path, backupName) } - log.Debugf("remove '%s'", backupPath) + logger.Debug().Msgf("remove '%s'", backupPath) err = os.RemoveAll(backupPath) if err != nil { return err } } - log.WithField("operation", "delete"). - WithField("location", "local"). - WithField("backup", backupName). - WithField("duration", utils.HumanizeDuration(time.Since(start))). - Info("done") + logger.Info().Str("operation", "delete"). + Str("location", "local"). + Str("backup", backupName). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Msg("done") return nil } } @@ -139,12 +138,12 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis } func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) error { - log := b.log.WithField("logger", "RemoveBackupRemote") + logger := log.With().Str("logger", "RemoveBackupRemote").Logger() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") start := time.Now() if b.cfg.General.RemoteStorage == "none" { err := errors.New("aborted: RemoteStorage set to \"none\"") - log.Error(err.Error()) + logger.Error().Msg(err.Error()) return err } if b.cfg.General.RemoteStorage == "custom" { @@ -165,7 +164,7 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er } defer func() { if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + logger.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -176,15 +175,15 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er for _, backup := range backupList { if backup.BackupName == backupName { if err := bd.RemoveBackup(ctx, backup); err != nil { - log.Warnf("bd.RemoveBackup return error: %v", err) + logger.Warn().Msgf("bd.RemoveBackup return error: %v", err) return err } - log.WithFields(apexLog.Fields{ + logger.Info().Fields(map[string]interface{}{ "backup": backupName, "location": "remote", "operation": "delete", "duration": utils.HumanizeDuration(time.Since(start)), - }).Info("done") + }).Msg("done") return nil } } diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 66bb3efe..538c37eb 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -12,6 +12,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/resumable" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/eapache/go-resiliency/retrier" + "github.com/rs/zerolog" "io" "os" "path" @@ -28,8 +29,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/Altinity/clickhouse-backup/pkg/storage" "github.com/Altinity/clickhouse-backup/pkg/utils" - - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" ) var ( @@ -37,10 +37,10 @@ var ( ) func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error { - log := b.log.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "download_legacy", - }) + }).Logger() bd, err := storage.NewBackupDestination(ctx, b.cfg, b.ch, true, "") if err != nil { return err @@ -50,7 +50,7 @@ func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error } defer func() { if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + logger.Warn().Msgf("can't close BackupDestination error: %v", err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -60,7 +60,7 @@ func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error if err != nil { return err } - log.Info("done") + logger.Info().Msg("done") return nil } @@ -77,10 +77,10 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer b.ch.Close() - log := b.log.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "download", - }) + }).Logger() if b.cfg.General.RemoteStorage == "none" { return fmt.Errorf("general->remote_storage shall not be \"none\" for download, change you config or use REMOTE_STORAGE environment variable") } @@ -104,7 +104,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if strings.Contains(localBackups[i].Tags, "embedded") || b.cfg.General.RemoteStorage == "custom" { return ErrBackupIsAlreadyExists } - log.Warnf("%s already exists will try to resume download", backupName) + logger.Warn().Msgf("%s already exists will try to resume download", backupName) } } } @@ -117,7 +117,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer func() { if err := b.dst.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + logger.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -145,7 +145,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if schemaOnly { return fmt.Errorf("'%s' is old format backup and doesn't supports download of schema only", backupName) } - log.Warnf("'%s' is old-format backup", backupName) + logger.Warn().Msgf("'%s' is old-format backup", backupName) return b.legacyDownload(ctx, backupName) } if len(remoteBackup.Tables) == 0 && !b.cfg.General.AllowEmptyBackups { @@ -180,20 +180,20 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ }) } - log.Debugf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) + logger.Debug().Msgf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) downloadSemaphore := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) metadataGroup, metadataCtx := errgroup.WithContext(ctx) for i, t := range tablesForDownload { if err := downloadSemaphore.Acquire(metadataCtx, 1); err != nil { - log.Errorf("can't acquire semaphore during Download metadata: %v", err) + logger.Error().Msgf("can't acquire semaphore during Download metadata: %v", err) break } - log := log.WithField("table_metadata", fmt.Sprintf("%s.%s", t.Database, t.Table)) + tableLogger := logger.With().Str("table_metadata", fmt.Sprintf("%s.%s", t.Database, t.Table)).Logger() idx := i tableTitle := t metadataGroup.Go(func() error { defer downloadSemaphore.Release(1) - downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, log, tableTitle, schemaOnly, partitions) + downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableLogger, tableTitle, schemaOnly, partitions) if err != nil { return err } @@ -210,11 +210,11 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ for disk := range t.Parts { if _, diskExists := b.DiskToPathMap[disk]; !diskExists && disk != b.cfg.ClickHouse.EmbeddedBackupDisk { b.DiskToPathMap[disk] = b.DiskToPathMap["default"] - log.Warnf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %s", t.Database, t.Table, disk, b.DiskToPathMap["default"]) + logger.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %s", t.Database, t.Table, disk, b.DiskToPathMap["default"]) } } } - log.Debugf("prepare table SHADOW concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) + logger.Debug().Msgf("prepare table SHADOW concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) dataGroup, dataCtx := errgroup.WithContext(ctx) for i, tableMetadata := range tableMetadataAfterDownload { @@ -222,7 +222,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ continue } if err := downloadSemaphore.Acquire(dataCtx, 1); err != nil { - log.Errorf("can't acquire semaphore during Download table data: %v", err) + logger.Error().Msgf("can't acquire semaphore during Download table data: %v", err) break } dataSize += tableMetadata.TotalBytes @@ -233,12 +233,12 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, tableMetadataAfterDownload[idx]); err != nil { return err } - log. - WithField("operation", "download_data"). - WithField("table", fmt.Sprintf("%s.%s", tableMetadataAfterDownload[idx].Database, tableMetadataAfterDownload[idx].Table)). - WithField("duration", utils.HumanizeDuration(time.Since(start))). - WithField("size", utils.FormatBytes(tableMetadataAfterDownload[idx].TotalBytes)). - Info("done") + logger.Info(). + Str("operation", "download_data"). + Str("table", fmt.Sprintf("%s.%s", tableMetadataAfterDownload[idx].Database, tableMetadataAfterDownload[idx].Table)). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Str("size", utils.FormatBytes(tableMetadataAfterDownload[idx].TotalBytes)). + Msg("done") return nil }) } @@ -297,24 +297,24 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ b.resumableState.Close() } - log. - WithField("duration", utils.HumanizeDuration(time.Since(startDownload))). - WithField("size", utils.FormatBytes(dataSize+metadataSize+rbacSize+configSize)). - Info("done") + logger.Info(). + Str("duration", utils.HumanizeDuration(time.Since(startDownload))). + Str("size", utils.FormatBytes(dataSize+metadataSize+rbacSize+configSize)). + Msg("done") return nil } -func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, log *apexLog.Entry, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { +func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, logger zerolog.Logger, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { metadataLocalFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata", common.TablePathEncode(tableTitle.Database), fmt.Sprintf("%s.json", common.TablePathEncode(tableTitle.Table))) tm := &metadata.TableMetadata{} if _, err := tm.Load(metadataLocalFile); err == nil { return tm, nil } - tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, log.WithFields(apexLog.Fields{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}), tableTitle, false, nil) + tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, logger.With().Fields(map[string]interface{}{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}).Logger(), tableTitle, false, nil) return tm, err } -func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, log *apexLog.Entry, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string) (*metadata.TableMetadata, uint64, error) { +func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, logger zerolog.Logger, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string) (*metadata.TableMetadata, uint64, error) { start := time.Now() size := uint64(0) metadataFiles := map[string]string{} @@ -398,10 +398,10 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, b.resumableState.AppendToState(localMetadataFile, written) } } - log. - WithField("duration", utils.HumanizeDuration(time.Since(start))). - WithField("size", utils.FormatBytes(size)). - Info("done") + logger.Info(). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Str("size", utils.FormatBytes(size)). + Msg("done") return &tableMetadata, size, nil } @@ -414,7 +414,7 @@ func (b *Backuper) downloadConfigData(ctx context.Context, remoteBackup storage. } func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup storage.Backup, prefix string) (uint64, error) { - log := b.log.WithField("logger", "downloadBackupRelatedDir") + logger := log.With().Str("logger", "downloadBackupRelatedDir").Logger() archiveFile := fmt.Sprintf("%s.%s", prefix, b.cfg.GetArchiveExtension()) remoteFile := path.Join(remoteBackup.BackupName, archiveFile) if b.resume { @@ -425,7 +425,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st localDir := path.Join(b.DefaultDataPath, "backup", remoteBackup.BackupName, prefix) remoteFileInfo, err := b.dst.StatFile(ctx, remoteFile) if err != nil { - log.Debugf("%s not exists on remote storage, skip download", remoteFile) + logger.Debug().Msgf("%s not exists on remote storage, skip download", remoteFile) return 0, nil } retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -442,7 +442,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st } func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata) error { - log := b.log.WithField("logger", "downloadTableData") + logger := log.With().Str("logger", "downloadTableData").Logger() dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) s := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) @@ -455,7 +455,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. capacity += len(table.Files[disk]) downloadOffset[disk] = 0 } - log.Debugf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) breakByErrorArchive: for common.SumMapValuesInt(downloadOffset) < capacity { for disk := range table.Files { @@ -464,7 +464,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } archiveFile := table.Files[disk][downloadOffset[disk]] if err := s.Acquire(dataCtx, 1); err != nil { - log.Errorf("can't acquire semaphore %s archive: %v", archiveFile, err) + logger.Error().Msgf("can't acquire semaphore %s archive: %v", archiveFile, err) break breakByErrorArchive } tableLocalDir := b.getLocalBackupDataPathForTable(remoteBackup.BackupName, disk, dbAndTableDir) @@ -472,7 +472,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. tableRemoteFile := path.Join(remoteBackup.BackupName, "shadow", common.TablePathEncode(table.Database), common.TablePathEncode(table.Table), archiveFile) g.Go(func() error { defer s.Release(1) - log.Debugf("start download %s", tableRemoteFile) + logger.Debug().Msgf("start download %s", tableRemoteFile) if b.resume && b.resumableState.IsAlreadyProcessedBool(tableRemoteFile) { return nil } @@ -486,7 +486,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(tableRemoteFile, 0) } - log.Debugf("finish download %s", tableRemoteFile) + logger.Debug().Msgf("finish download %s", tableRemoteFile) return nil }) } @@ -496,7 +496,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. for disk := range table.Parts { capacity += len(table.Parts[disk]) } - log.Debugf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) breakByErrorDirectory: for disk, parts := range table.Parts { @@ -512,13 +512,13 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } partRemotePath := path.Join(tableRemotePath, part.Name) if err := s.Acquire(dataCtx, 1); err != nil { - log.Errorf("can't acquire semaphore %s directory: %v", partRemotePath, err) + logger.Error().Msgf("can't acquire semaphore %s directory: %v", partRemotePath, err) break breakByErrorDirectory } partLocalPath := path.Join(tableLocalPath, part.Name) g.Go(func() error { defer s.Release(1) - log.Debugf("start %s -> %s", partRemotePath, partLocalPath) + logger.Debug().Msgf("start %s -> %s", partRemotePath, partLocalPath) if b.resume && b.resumableState.IsAlreadyProcessedBool(partRemotePath) { return nil } @@ -528,7 +528,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(partRemotePath, 0) } - log.Debugf("finish %s -> %s", partRemotePath, partLocalPath) + logger.Debug().Msgf("finish %s -> %s", partRemotePath, partLocalPath) return nil }) } @@ -549,8 +549,8 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string) error { - log := b.log.WithField("operation", "downloadDiffParts") - log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Debug("start") + logger := log.With().Str("operation", "downloadDiffParts").Logger() + logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("start") start := time.Now() downloadedDiffParts := uint32(0) s := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) @@ -576,14 +576,14 @@ breakByError: } if err != nil && os.IsNotExist(err) { if err := s.Acquire(downloadDiffCtx, 1); err != nil { - log.Errorf("can't acquire semaphore during downloadDiffParts: %v", err) + logger.Error().Msgf("can't acquire semaphore during downloadDiffParts: %v", err) break breakByError } partForDownload := part diskForDownload := disk downloadDiffGroup.Go(func() error { defer s.Release(1) - tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload, log) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload, logger) if err != nil { return err } @@ -630,22 +630,22 @@ breakByError: if err := downloadDiffGroup.Wait(); err != nil { return fmt.Errorf("one of downloadDiffParts go-routine return error: %v", err) } - log.WithField("duration", utils.HumanizeDuration(time.Since(start))).WithField("diff_parts", strconv.Itoa(int(downloadedDiffParts))).Info("done") + logger.Info().Str("duration", utils.HumanizeDuration(time.Since(start))).Str("diff_parts", strconv.Itoa(int(downloadedDiffParts))).Msg("done") return nil } func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLock *sync.Mutex, diffRemoteFilesCache map[string]*sync.Mutex, tableRemoteFile string, tableLocalDir string) error { - log := b.log.WithField("logger", "downloadDiffRemoteFile") + logger := log.With().Str("logger", "downloadDiffRemoteFile").Logger() diffRemoteFilesLock.Lock() namedLock, isCached := diffRemoteFilesCache[tableRemoteFile] if isCached { - log.Debugf("wait download begin %s", tableRemoteFile) + logger.Debug().Msgf("wait download begin %s", tableRemoteFile) namedLock.Lock() diffRemoteFilesLock.Unlock() namedLock.Unlock() - log.Debugf("wait download end %s", tableRemoteFile) + logger.Debug().Msgf("wait download end %s", tableRemoteFile) } else { - log.Debugf("start download from %s", tableRemoteFile) + logger.Debug().Msgf("start download from %s", tableRemoteFile) namedLock = &sync.Mutex{} diffRemoteFilesCache[tableRemoteFile] = namedLock namedLock.Lock() @@ -656,18 +656,18 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo return b.dst.DownloadCompressedStream(ctx, tableRemoteFile, tableLocalDir) }) if err != nil { - log.Warnf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + logger.Warn().Msgf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } else { // remoteFile could be a directory if err := b.dst.DownloadPath(ctx, 0, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { - log.Warnf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + logger.Warn().Msgf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } namedLock.Unlock() - log.Debugf("finish download from %s", tableRemoteFile) + logger.Debug().Msgf("finish download from %s", tableRemoteFile) } return nil } @@ -686,21 +686,21 @@ func (b *Backuper) checkNewPath(newPath string, part metadata.Part) error { return nil } -func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part, log *apexLog.Entry) (map[string]string, error) { +func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part, logger zerolog.Logger) (map[string]string, error) { var requiredTable *metadata.TableMetadata - log.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Debugf("start") + logger.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Msg("start") requiredBackup, err := b.ReadBackupMetadataRemote(ctx, backup.RequiredBackup) if err != nil { return nil, err } - requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, log, metadata.TableTitle{Database: table.Database, Table: table.Table}) + requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, logger, metadata.TableTitle{Database: table.Database, Table: table.Table}) if err != nil { - log.Warnf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) + logger.Warn().Msgf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) return nil, err } // recursive find if part in RequiredBackup also Required - tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, log, table, requiredTable, part, disk) + tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, logger, table, requiredTable, part, disk) if found { return tableRemoteFiles, nil } @@ -741,18 +741,18 @@ func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadat return nil, fmt.Errorf("%s.%s %s not found on %s and all required backups sequence", table.Database, table.Table, part.Name, requiredBackup.BackupName) } -func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, log *apexLog.Entry, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { - log.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Debugf("start") +func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, logger zerolog.Logger, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { + logger.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Msg("start") found := false for _, requiredParts := range requiredTable.Parts { for _, requiredPart := range requiredParts { if requiredPart.Name == part.Name { found = true if requiredPart.Required { - tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part, log) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part, logger) if err != nil { found = false - log.Warnf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) + logger.Warn().Msgf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) } return tableRemoteFiles, found, err } @@ -767,8 +767,8 @@ func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metada } func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (map[string]string, error, bool) { - log := apexLog.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}) - log.Debugf("start") + logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}).Logger() + logger.Debug().Msg("start") tableRemoteFiles := make(map[string]string) // find same disk and part name archive if requiredBackup.DataFormat != "directory" { @@ -787,8 +787,8 @@ func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata } func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - log := apexLog.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}) - log.Debugf("start") + logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}).Logger() + logger.Debug().Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, remoteDisk, part.Name) tableRemoteFile := path.Join(tableRemotePath, "checksums.txt") @@ -796,8 +796,8 @@ func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup } func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - log := apexLog.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}) - log.Debugf("start") + logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}).Logger() + logger.Debug().Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) remoteExt := config.ArchiveExtensions[requiredBackup.DataFormat] tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, fmt.Sprintf("%s_%s.%s", remoteDisk, common.TablePathEncode(part.Name), remoteExt)) @@ -806,10 +806,10 @@ func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *m } func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metadata.BackupMetadata, tableRemoteFile string, tableRemotePath string, localDisk string, dbAndTableDir string, part metadata.Part) (string, string, error) { + logger := log.With().Str("logger", "findDiffFileExist").Logger() _, err := b.dst.StatFile(ctx, tableRemoteFile) - log := b.log.WithField("logger", "findDiffFileExist") if err != nil { - log.WithFields(apexLog.Fields{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Debugf("findDiffFileExist not found") + logger.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist not found") return "", "", err } if tableLocalDir, diskExists := b.DiskToPathMap[localDisk]; !diskExists { @@ -820,7 +820,7 @@ func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metada } else { tableLocalDir = path.Join(tableLocalDir, "backup", requiredBackup.BackupName, "shadow", dbAndTableDir, localDisk) } - log.WithFields(apexLog.Fields{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Debugf("findDiffFileExist found") + logger.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist found") return tableRemotePath, tableLocalDir, nil } } @@ -839,14 +839,14 @@ func (b *Backuper) ReadBackupMetadataRemote(ctx context.Context, backupName stri } func (b *Backuper) makePartHardlinks(exists, new string) error { - log := apexLog.WithField("logger", "makePartHardlinks") + logger := log.With().Str("logger", "makePartHardlinks").Logger() ex, err := os.Open(exists) if err != nil { return err } defer func() { if err = ex.Close(); err != nil { - log.Warnf("Can't close %s", exists) + logger.Warn().Msgf("Can't close %s", exists) } }() files, err := ex.Readdirnames(-1) @@ -854,7 +854,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { return err } if err := os.MkdirAll(new, 0750); err != nil { - log.Warnf("MkDirAll(%s) error: %v", new, err) + logger.Warn().Msgf("MkDirAll(%s) error: %v", new, err) return err } for _, f := range files { @@ -864,7 +864,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { existsFInfo, existsStatErr := os.Stat(existsF) newFInfo, newStatErr := os.Stat(newF) if existsStatErr != nil || newStatErr != nil || !os.SameFile(existsFInfo, newFInfo) { - log.Warnf("Link %s -> %s error: %v", newF, existsF, err) + logger.Warn().Msgf("Link %s -> %s error: %v", newF, existsF, err) return err } } @@ -876,7 +876,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri if b.resume && b.resumableState.IsAlreadyProcessedBool(remoteFile) { return nil } - log := b.log.WithField("logger", "downloadSingleBackupFile") + logger := log.With().Str("logger", "downloadSingleBackupFile").Logger() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { remoteReader, err := b.dst.GetFileReader(ctx, remoteFile) @@ -886,7 +886,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = remoteReader.Close() if err != nil { - log.Warnf("can't close remoteReader %s", remoteFile) + logger.Warn().Msgf("can't close remoteReader %s", remoteFile) } }() localWriter, err := os.OpenFile(localFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640) @@ -897,7 +897,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = localWriter.Close() if err != nil { - log.Warnf("can't close localWriter %s", localFile) + logger.Warn().Msgf("can't close localWriter %s", localFile) } }() diff --git a/pkg/backup/list.go b/pkg/backup/list.go index 6fa4b056..d490a090 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/Altinity/clickhouse-backup/pkg/custom" "github.com/Altinity/clickhouse-backup/pkg/status" - apexLog "github.com/apex/log" "io" "os" "path" @@ -18,6 +17,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/Altinity/clickhouse-backup/pkg/storage" "github.com/Altinity/clickhouse-backup/pkg/utils" + "github.com/rs/zerolog/log" ) // List - list backups to stdout from command line @@ -35,7 +35,7 @@ func (b *Backuper) List(what, format string) error { return nil } func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) error { - log := apexLog.WithField("logger", "printBackupsRemote") + logger := log.With().Str("logger", "printBackupsRemote").Logger() switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -73,7 +73,7 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, uploadDate, "remote", required, description); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } default: @@ -83,7 +83,7 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) } func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBackup, format string) error { - log := apexLog.WithField("logger", "printBackupsLocal") + logger := log.With().Str("logger", "printBackupsLocal").Logger() switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -122,7 +122,7 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, creationDate, "local", required, description); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } } @@ -134,7 +134,7 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku // PrintLocalBackups - print all backups stored locally func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { - log := apexLog.WithField("logger", "PrintLocalBackups") + logger := log.With().Str("logger", "PrintLocalBackups").Logger() if !b.ch.IsOpen { if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) @@ -144,7 +144,7 @@ func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + logger.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, _, err := b.GetLocalBackups(ctx, nil) @@ -163,7 +163,7 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) } defer b.ch.Close() } - log := b.log.WithField("logger", "GetLocalBackups") + logger := log.With().Str("logger", "GetLocalBackups").Logger() if disks == nil { disks, err = b.ch.GetDisks(ctx) if err != nil { @@ -244,7 +244,7 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) }) } if closeErr := d.Close(); closeErr != nil { - log.Errorf("can't close %s openError: %v", backupPath, closeErr) + logger.Error().Msgf("can't close %s openError: %v", backupPath, closeErr) } } } @@ -262,10 +262,10 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { } defer b.ch.Close() } - log := b.log.WithField("logger", "PrintAllBackups") + logger := log.With().Str("logger", "PrintAllBackups").Logger() defer func() { if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + logger.Error().Msgf("can't flush tabular writer error: %v", err) } }() localBackups, _, err := b.GetLocalBackups(ctx, nil) @@ -273,7 +273,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsLocal(ctx, w, localBackups, format); err != nil { - log.Warnf("printBackupsLocal return error: %v", err) + logger.Warn().Msgf("printBackupsLocal return error: %v", err) } if b.cfg.General.RemoteStorage != "none" { @@ -282,7 +282,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsRemote(w, remoteBackups, format); err != nil { - log.Warnf("printBackupsRemote return error: %v", err) + logger.Warn().Msgf("printBackupsRemote return error: %v", err) } } return nil @@ -296,11 +296,11 @@ func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error } defer b.ch.Close() } - log := b.log.WithField("logger", "PrintRemoteBackups") + logger := log.With().Str("logger", "PrintRemoteBackups").Logger() w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + logger.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, err := b.GetRemoteBackups(ctx, true) @@ -350,7 +350,7 @@ func (b *Backuper) GetRemoteBackups(ctx context.Context, parseMetadata bool) ([] } defer func() { if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() backupList, err := bd.BackupList(ctx, parseMetadata, "") @@ -392,7 +392,7 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { return fmt.Errorf("can't connect to clickhouse: %v", err) } defer b.ch.Close() - log := b.log.WithField("logger", "PrintTables") + logger := log.With().Str("logger", "PrintTables").Logger() allTables, err := b.GetTables(ctx, tablePattern) if err != nil { return err @@ -412,16 +412,16 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { } if table.Skip { if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\tskip\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ",")); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } continue } if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\t\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ",")); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + logger.Error().Msgf("can't flush tabular writer error: %v", err) } return nil } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index f20ddb5e..207e0405 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "github.com/Altinity/clickhouse-backup/pkg/status" + "github.com/rs/zerolog" "os" "os/exec" "path" @@ -12,16 +13,14 @@ import ( "strings" "time" - "github.com/Altinity/clickhouse-backup/pkg/common" - - "github.com/mattn/go-shellwords" - "github.com/Altinity/clickhouse-backup/pkg/clickhouse" + "github.com/Altinity/clickhouse-backup/pkg/common" "github.com/Altinity/clickhouse-backup/pkg/filesystemhelper" "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/Altinity/clickhouse-backup/pkg/utils" - apexLog "github.com/apex/log" + "github.com/mattn/go-shellwords" recursiveCopy "github.com/otiai10/copy" + "github.com/rs/zerolog/log" "github.com/yargevad/filepathx" ) @@ -40,10 +39,10 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return err } - log := apexLog.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "restore", - }) + }).Logger() doRestoreData := !schemaOnly || dataOnly if err := b.ch.Connect(); err != nil { @@ -61,7 +60,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { - log.Warnf("%v", err) + logger.Warn().Msgf("%v", err) return ErrUnknownClickhouseDataPath } backupMetafileLocalPaths := []string{path.Join(defaultDataPath, "backup", backupName, "metadata.json")} @@ -71,7 +70,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par if err == nil && embeddedBackupPath != "" { backupMetafileLocalPaths = append(backupMetafileLocalPaths, path.Join(embeddedBackupPath, backupName, "metadata.json")) } else if b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "" { - log.Warnf("%v", err) + logger.Warn().Msgf("%v", err) } else if err != nil { return err } @@ -107,7 +106,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } if len(backupMetadata.Tables) == 0 { - log.Warnf("'%s' doesn't contains tables for restore", backupName) + logger.Warn().Msgf("'%s' doesn't contains tables for restore", backupName) if (!rbacOnly) && (!configsOnly) { return nil } @@ -130,13 +129,13 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if needRestart { - log.Warnf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) + logger.Warn().Msgf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) cmd, err := shellwords.Parse(b.ch.Config.RestartCommand) if err != nil { return err } ctx, cancel := context.WithTimeout(ctx, 180*time.Second) - log.Infof("run %s", b.ch.Config.RestartCommand) + logger.Info().Msgf("run %s", b.ch.Config.RestartCommand) var out []byte if len(cmd) > 1 { out, err = exec.CommandContext(ctx, cmd[0], cmd[1:]...).CombinedOutput() @@ -144,7 +143,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par out, err = exec.CommandContext(ctx, cmd[0]).CombinedOutput() } cancel() - log.Debug(string(out)) + logger.Debug().Msg(string(out)) return err } @@ -158,7 +157,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return err } } - log.Info("done") + logger.Info().Msg("done") return nil } @@ -216,14 +215,14 @@ func (b *Backuper) prepareRestoreDatabaseMapping(databaseMapping []string) error // restoreRBAC - copy backup_name>/rbac folder to access_data_path func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []clickhouse.Disk) error { - log := b.log.WithField("logger", "restoreRBAC") + logger := log.With().Str("logger", "restoreRBAC").Logger() accessPath, err := b.ch.GetAccessManagementPath(ctx, nil) if err != nil { return err } if err = b.restoreBackupRelatedDir(backupName, "access", accessPath, disks); err == nil { markFile := path.Join(accessPath, "need_rebuild_lists.mark") - log.Infof("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) + logger.Info().Msgf("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) file, err := os.Create(markFile) if err != nil { return err @@ -231,7 +230,7 @@ func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []c _ = file.Close() _ = filesystemhelper.Chown(markFile, b.ch, disks, false) listFilesPattern := path.Join(accessPath, "*.list") - log.Infof("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) + logger.Info().Msgf("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) if listFiles, err := filepathx.Glob(listFilesPattern); err != nil { return err } else { @@ -258,7 +257,7 @@ func (b *Backuper) restoreConfigs(backupName string, disks []clickhouse.Disk) er } func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinationDir string, disks []clickhouse.Disk) error { - log := b.log.WithField("logger", "restoreBackupRelatedDir") + logger := log.With().Str("logger", "restoreBackupRelatedDir").Logger() defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -272,7 +271,7 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat if !info.IsDir() { return fmt.Errorf("%s is not a dir", srcBackupDir) } - log.Debugf("copy %s -> %s", srcBackupDir, destinationDir) + logger.Debug().Msgf("copy %s -> %s", srcBackupDir, destinationDir) copyOptions := recursiveCopy.Options{OnDirExists: func(src, dest string) recursiveCopy.DirExistsAction { return recursiveCopy.Merge }} @@ -295,10 +294,10 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat // RestoreSchema - restore schemas matched by tablePattern from backupName func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern string, dropTable, ignoreDependencies bool, disks []clickhouse.Disk, isEmbedded bool) error { - log := apexLog.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "restore", - }) + }).Logger() defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { @@ -340,14 +339,14 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern s if len(tablesForRestore) == 0 { return fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) } - if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, log); dropErr != nil { + if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, logger); dropErr != nil { return dropErr } var restoreErr error if isEmbedded { restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore) } else { - restoreErr = b.restoreSchemaRegular(tablesForRestore, version, log) + restoreErr = b.restoreSchemaRegular(tablesForRestore, version, logger) } if restoreErr != nil { return restoreErr @@ -361,7 +360,7 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, return b.restoreEmbedded(ctx, backupName, true, tablesForRestore, nil) } -func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int, log *apexLog.Entry) error { +func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int, logger zerolog.Logger) error { totalRetries := len(tablesForRestore) restoreRetries := 0 isDatabaseCreated := common.EmptyMap{} @@ -390,7 +389,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i // https://github.com/Altinity/clickhouse-backup/issues/466 if b.cfg.General.RestoreSchemaOnCluster == "" && strings.Contains(schema.Query, "{uuid}") && strings.Contains(schema.Query, "Replicated") { if !strings.Contains(schema.Query, "UUID") { - log.Warnf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") + logger.Warn().Msgf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") } else { schema.Query = UUIDWithReplicatedMergeTreeRE.ReplaceAllString(schema.Query, "$1$2$3'$4'$5$4$7") } @@ -408,7 +407,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i schema.Database, schema.Table, restoreErr, restoreRetries, ) } else { - log.Warnf( + logger.Warn().Msgf( "can't create table '%s.%s': %v, will try again", schema.Database, schema.Table, restoreErr, ) } @@ -423,7 +422,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i return nil } -func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int, log *apexLog.Entry) error { +func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int, logger zerolog.Logger) error { var dropErr error dropRetries := 0 totalRetries := len(tablesForDrop) @@ -464,7 +463,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci schema.Database, schema.Table, dropErr, dropRetries, ) } else { - log.Warnf( + logger.Warn().Msgf( "can't drop table '%s.%s': %v, will try again", schema.Database, schema.Table, dropErr, ) } @@ -482,10 +481,10 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci // RestoreData - restore data for tables matched by tablePattern from backupName func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePattern string, partitions []string, disks []clickhouse.Disk, isEmbedded bool) error { startRestore := time.Now() - log := apexLog.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "restore", - }) + }).Logger() defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -518,16 +517,16 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePatt if len(tablesForRestore) == 0 { return fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) } - log.Debugf("found %d tables with data in backup", len(tablesForRestore)) + logger.Debug().Msgf("found %d tables with data in backup", len(tablesForRestore)) if isEmbedded { err = b.restoreDataEmbedded(ctx, backupName, tablesForRestore, partitions) } else { - err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, disks, log) + err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, disks, logger) } if err != nil { return err } - log.WithField("duration", utils.HumanizeDuration(time.Since(startRestore))).Info("done") + logger.Info().Str("duration", utils.HumanizeDuration(time.Since(startRestore))).Msg("done") return nil } @@ -535,7 +534,7 @@ func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, t return b.restoreEmbedded(ctx, backupName, false, tablesForRestore, partitions) } -func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk, log *apexLog.Entry) error { +func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk, logger zerolog.Logger) error { if len(b.cfg.General.RestoreDatabaseMapping) > 0 { tablePattern = b.changeTablePatternFromRestoreDatabaseMapping(tablePattern) } @@ -543,7 +542,7 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta if err != nil { return err } - disks = b.adjustDisksFromTablesWithSystemDisks(tablesForRestore, diskMap, log, disks) + disks = b.adjustDisksFromTablesWithSystemDisks(tablesForRestore, diskMap, logger, disks) dstTablesMap := b.prepareDstTablesMap(chTables) missingTables := b.checkMissingTables(tablesForRestore, chTables) @@ -560,7 +559,7 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta tablesForRestore[i].Database = targetDB } } - log := log.WithField("table", fmt.Sprintf("%s.%s", dstDatabase, table.Table)) + tableLog := logger.With().Str("table", fmt.Sprintf("%s.%s", dstDatabase, table.Table)).Logger() dstTable, ok := dstTablesMap[metadata.TableTitle{ Database: dstDatabase, Table: table.Table}] @@ -569,41 +568,41 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta } // https://github.com/Altinity/clickhouse-backup/issues/529 if b.cfg.ClickHouse.RestoreAsAttach { - if err = b.restoreDataRegularByAttach(ctx, backupName, table, disks, dstTable, log, tablesForRestore, i); err != nil { + if err = b.restoreDataRegularByAttach(ctx, backupName, table, disks, dstTable, tableLog, tablesForRestore, i); err != nil { return err } } else { - if err = b.restoreDataRegularByParts(backupName, table, disks, dstTable, log, tablesForRestore, i); err != nil { + if err = b.restoreDataRegularByParts(backupName, table, disks, dstTable, tableLog, tablesForRestore, i); err != nil { return err } } // https://github.com/Altinity/clickhouse-backup/issues/529 for _, mutation := range table.Mutations { if err := b.ch.ApplyMutation(ctx, tablesForRestore[i], mutation); err != nil { - log.Warnf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[i].Database, tablesForRestore[i].Table, err) + tableLog.Warn().Msgf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[i].Database, tablesForRestore[i].Table, err) } } - log.Info("done") + tableLog.Info().Msg("done") } return nil } -func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry, tablesForRestore ListOfTables, i int) error { +func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger, tablesForRestore ListOfTables, i int) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, dstTable.DataPaths, b.ch, false); err != nil { return fmt.Errorf("can't copy data to storage '%s.%s': %v", table.Database, table.Table, err) } - log.Debugf("data to 'storage' copied") + logger.Debug().Msg("data to 'storage' copied") if err := b.ch.AttachTable(ctx, tablesForRestore[i]); err != nil { return fmt.Errorf("can't attach table '%s.%s': %v", tablesForRestore[i].Database, tablesForRestore[i].Table, err) } return nil } -func (b *Backuper) restoreDataRegularByParts(backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry, tablesForRestore ListOfTables, i int) error { +func (b *Backuper) restoreDataRegularByParts(backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger, tablesForRestore ListOfTables, i int) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, dstTable.DataPaths, b.ch, true); err != nil { return fmt.Errorf("can't copy data to datached '%s.%s': %v", table.Database, table.Table, err) } - log.Debugf("data to 'detached' copied") + logger.Debug().Msg("data to 'detached' copied") if err := b.ch.AttachDataParts(tablesForRestore[i], disks); err != nil { return fmt.Errorf("can't attach data parts for table '%s.%s': %v", tablesForRestore[i].Database, tablesForRestore[i].Table, err) } @@ -644,11 +643,11 @@ func (b *Backuper) prepareDstTablesMap(chTables []clickhouse.Table) map[metadata return dstTablesMap } -func (b *Backuper) adjustDisksFromTablesWithSystemDisks(tablesForRestore ListOfTables, diskMap map[string]string, log *apexLog.Entry, disks []clickhouse.Disk) []clickhouse.Disk { +func (b *Backuper) adjustDisksFromTablesWithSystemDisks(tablesForRestore ListOfTables, diskMap map[string]string, logger zerolog.Logger, disks []clickhouse.Disk) []clickhouse.Disk { for _, t := range tablesForRestore { for disk := range t.Parts { if _, diskExists := diskMap[disk]; !diskExists { - log.Warnf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will restored to %s", t.Database, t.Table, disk, diskMap["default"]) + logger.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will restored to %s", t.Database, t.Table, disk, diskMap["default"]) found := false for _, d := range disks { if d.Name == disk { diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index dee1dba2..26bf188c 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -4,10 +4,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/clickhouse" - "github.com/Altinity/clickhouse-backup/pkg/config" - apexLog "github.com/apex/log" - "github.com/google/uuid" "io" "net/url" "os" @@ -17,10 +13,13 @@ import ( "sort" "strings" + "github.com/Altinity/clickhouse-backup/pkg/clickhouse" "github.com/Altinity/clickhouse-backup/pkg/common" + "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/filesystemhelper" - "github.com/Altinity/clickhouse-backup/pkg/metadata" + "github.com/google/uuid" + "github.com/rs/zerolog/log" ) type ListOfTables []metadata.TableMetadata @@ -50,7 +49,7 @@ func addTableToListIfNotExistsOrEnrichQueryAndParts(tables ListOfTables, table m func getTableListByPatternLocal(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, metadataPath string, tablePattern string, dropTable bool, partitions []string) (ListOfTables, error) { result := ListOfTables{} tablePatterns := []string{"*"} - log := apexLog.WithField("logger", "getTableListByPatternLocal") + logger := log.With().Str("logger", "getTableListByPatternLocal").Logger() if tablePattern != "" { tablePatterns = strings.Split(tablePattern, ",") } @@ -111,7 +110,7 @@ func getTableListByPatternLocal(ctx context.Context, cfg *config.Config, ch *cli } dataParts, err := os.ReadDir(dataPartsPath) if err != nil { - log.Warn(err.Error()) + logger.Warn().Msg(err.Error()) } parts := map[string][]metadata.Part{ cfg.ClickHouse.EmbeddedBackupDisk: make([]metadata.Part, len(dataParts)), diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 9f5b8d06..ce7459b1 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -27,7 +27,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/filesystemhelper" "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/Altinity/clickhouse-backup/pkg/utils" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" "github.com/yargevad/filepathx" ) @@ -56,10 +56,10 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if b.cfg.General.RemoteStorage == "custom" { return custom.Upload(ctx, b.cfg, backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly) } - log := apexLog.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "upload", - }) + }).Logger() if _, disks, err = b.getLocalBackup(ctx, backupName, nil); err != nil { return fmt.Errorf("can't find local backup: %v", err) } @@ -68,7 +68,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str } defer func() { if err := b.dst.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + logger.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -81,7 +81,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if !b.resume { return fmt.Errorf("'%s' already exists on remote storage", backupName) } else { - log.Warnf("'%s' already exists on remote, will try to resume upload", backupName) + logger.Warn().Msgf("'%s' already exists on remote, will try to resume upload", backupName) } } } @@ -125,13 +125,13 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str compressedDataSize := int64(0) metadataSize := int64(0) - log.Debugf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) + logger.Debug().Msgf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) uploadSemaphore := semaphore.NewWeighted(int64(b.cfg.General.UploadConcurrency)) uploadGroup, uploadCtx := errgroup.WithContext(ctx) for i, table := range tablesForUpload { if err := uploadSemaphore.Acquire(uploadCtx, 1); err != nil { - log.Errorf("can't acquire semaphore during Upload table: %v", err) + logger.Error().Msgf("can't acquire semaphore during Upload table: %v", err) break } start := time.Now() @@ -163,11 +163,11 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str return err } atomic.AddInt64(&metadataSize, tableMetadataSize) - log. - WithField("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)). - WithField("duration", utils.HumanizeDuration(time.Since(start))). - WithField("size", utils.FormatBytes(uint64(uploadedBytes+tableMetadataSize))). - Info("done") + logger.Info(). + Str("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Str("size", utils.FormatBytes(uint64(uploadedBytes+tableMetadataSize))). + Msg("done") return nil }) } @@ -230,10 +230,10 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if b.resume { b.resumableState.Close() } - log. - WithField("duration", utils.HumanizeDuration(time.Since(startUpload))). - WithField("size", utils.FormatBytes(uint64(compressedDataSize)+uint64(metadataSize)+uint64(len(newBackupMetadataBody))+backupMetadata.RBACSize+backupMetadata.ConfigSize)). - Info("done") + logger.Info(). + Str("duration", utils.HumanizeDuration(time.Since(startUpload))). + Str("size", utils.FormatBytes(uint64(compressedDataSize)+uint64(metadataSize)+uint64(len(newBackupMetadataBody))+backupMetadata.RBACSize+backupMetadata.ConfigSize)). + Msg("done") // Clean if err = b.dst.RemoveOldBackups(ctx, b.cfg.General.BackupsToKeepRemote); err != nil { @@ -246,14 +246,14 @@ func (b *Backuper) uploadSingleBackupFile(ctx context.Context, localFile, remote if b.resume && b.resumableState.IsAlreadyProcessedBool(remoteFile) { return nil } - log := b.log.WithField("logger", "uploadSingleBackupFile") + logger := log.With().Str("logger", "uploadSingleBackupFile").Logger() f, err := os.Open(localFile) if err != nil { return fmt.Errorf("can't open %s: %v", localFile, err) } defer func() { if err := f.Close(); err != nil { - log.Warnf("can't close %v: %v", f, err) + logger.Warn().Msgf("can't close %v: %v", f, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -348,7 +348,7 @@ func (b *Backuper) getTablesForUploadDiffRemote(ctx context.Context, diffFromRem } func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, diffFrom string, diffFromRemote string) error { - log := b.log.WithField("logger", "validateUploadParams") + logger := log.With().Str("logger", "validateUploadParams").Logger() if b.cfg.General.RemoteStorage == "none" { return fmt.Errorf("general->remote_storage shall not be \"none\" for upload, change you config or use REMOTE_STORAGE environment variable") } @@ -369,7 +369,7 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, return fmt.Errorf("%s->`compression_format`=%s incompatible with general->upload_by_part=%v", b.cfg.General.RemoteStorage, b.cfg.GetCompressionFormat(), b.cfg.General.UploadByPart) } if (diffFrom != "" || diffFromRemote != "") && b.cfg.ClickHouse.UseEmbeddedBackupRestore { - log.Warnf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") + logger.Warn().Msgf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") } if b.cfg.General.RemoteStorage == "custom" && b.resume { return fmt.Errorf("can't resume for `remote_storage: custom`") @@ -451,8 +451,8 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, table for disk := range table.Parts { capacity += len(table.Parts[disk]) } - log := b.log.WithField("logger", "uploadTableData") - log.Debugf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity) + logger := log.With().Str("logger", "uploadTableData").Logger() + logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity) s := semaphore.NewWeighted(int64(b.cfg.General.UploadConcurrency)) g, ctx := errgroup.WithContext(ctx) var uploadedBytes int64 @@ -477,7 +477,7 @@ breakByError: continue } if err := s.Acquire(ctx, 1); err != nil { - log.Errorf("can't acquire semaphore during Upload data parts: %v", err) + logger.Error().Msgf("can't acquire semaphore during Upload data parts: %v", err) break breakByError } backupPath := b.getLocalBackupDataPathForTable(backupName, disk, dbAndTablePath) @@ -497,9 +497,9 @@ breakByError: return nil } } - log.Debugf("start upload %d files to %s", len(partFiles), remotePath) + logger.Debug().Msgf("start upload %d files to %s", len(partFiles), remotePath) if uploadPathBytes, err := b.dst.UploadPath(ctx, 0, backupPath, partFiles, remotePath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { - log.Errorf("UploadPath return error: %v", err) + logger.Error().Msgf("UploadPath return error: %v", err) return fmt.Errorf("can't upload: %v", err) } else { atomic.AddInt64(&uploadedBytes, uploadPathBytes) @@ -507,7 +507,7 @@ breakByError: b.resumableState.AppendToState(remotePathFull, uploadPathBytes) } } - log.Debugf("finish upload %d files to %s", len(partFiles), remotePath) + logger.Debug().Msgf("finish upload %d files to %s", len(partFiles), remotePath) return nil }) } else { @@ -523,13 +523,13 @@ breakByError: return nil } } - log.Debugf("start upload %d files to %s", len(localFiles), remoteDataFile) + logger.Debug().Msgf("start upload %d files to %s", len(localFiles), remoteDataFile) retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { return b.dst.UploadCompressedStream(ctx, backupPath, localFiles, remoteDataFile) }) if err != nil { - log.Errorf("UploadCompressedStream return error: %v", err) + logger.Error().Msgf("UploadCompressedStream return error: %v", err) return fmt.Errorf("can't upload: %v", err) } remoteFile, err := b.dst.StatFile(ctx, remoteDataFile) @@ -540,7 +540,7 @@ breakByError: if b.resume { b.resumableState.AppendToState(remoteDataFile, remoteFile.Size()) } - log.Debugf("finish upload to %s", remoteDataFile) + logger.Debug().Msgf("finish upload to %s", remoteDataFile) return nil }) } @@ -549,7 +549,7 @@ breakByError: if err := g.Wait(); err != nil { return nil, 0, fmt.Errorf("one of uploadTableData go-routine return error: %v", err) } - log.Debugf("finish %s.%s with concurrency=%d len(table.Parts[...])=%d uploadedFiles=%v, uploadedBytes=%v", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity, uploadedFiles, uploadedBytes) + logger.Debug().Msgf("finish %s.%s with concurrency=%d len(table.Parts[...])=%d uploadedFiles=%v, uploadedBytes=%v", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity, uploadedFiles, uploadedBytes) return uploadedFiles, uploadedBytes, nil } @@ -596,7 +596,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s return processedSize, nil } } - log := b.log.WithField("logger", "uploadTableMetadataEmbedded") + logger := log.With().Str("logger", "uploadTableMetadataEmbedded").Logger() localTableMetaFile := path.Join(b.EmbeddedBackupDataPath, backupName, "metadata", common.TablePathEncode(tableMetadata.Database), fmt.Sprintf("%s.sql", common.TablePathEncode(tableMetadata.Table))) localReader, err := os.Open(localTableMetaFile) if err != nil { @@ -604,7 +604,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } defer func() { if err := localReader.Close(); err != nil { - log.Warnf("can't close %v: %v", localReader, err) + logger.Warn().Msgf("can't close %v: %v", localReader, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -625,7 +625,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTable *metadata.TableMetadata, newTable *metadata.TableMetadata, checkLocal bool) { - log := b.log.WithField("logger", "markDuplicatedParts") + logger := log.With().Str("logger", "markDuplicatedParts").Logger() for disk, newParts := range newTable.Parts { if _, diskExists := existsTable.Parts[disk]; diskExists { if len(existsTable.Parts[disk]) == 0 { @@ -645,7 +645,7 @@ func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTa newPath := path.Join(b.DiskToPathMap[disk], "backup", backup.BackupName, "shadow", dbAndTablePath, disk, newParts[i].Name) if err := filesystemhelper.IsDuplicatedParts(existsPath, newPath); err != nil { - log.Debugf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) + logger.Debug().Msgf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) continue } } @@ -705,7 +705,7 @@ func (b *Backuper) splitPartFiles(basePath string, parts []metadata.Part) ([]met } func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - log := b.log.WithField("logger", "splitFilesByName") + logger := log.With().Str("logger", "splitFilesByName").Logger() result := make([]metadata.SplitPartFiles, 0) for i := range parts { if parts[i].Required { @@ -725,7 +725,7 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - log.Warnf("filepath.Walk return error: %v", err) + logger.Warn().Msgf("filepath.Walk return error: %v", err) } result = append(result, metadata.SplitPartFiles{ Prefix: parts[i].Name, @@ -736,7 +736,7 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m } func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - log := b.log.WithField("logger", "splitFilesBySize") + logger := log.With().Str("logger", "splitFilesBySize").Logger() var size int64 var files []string maxSize := b.cfg.General.MaxFileSize @@ -769,7 +769,7 @@ func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - log.Warnf("filepath.Walk return error: %v", err) + logger.Warn().Msgf("filepath.Walk return error: %v", err) } } if len(files) > 0 { diff --git a/pkg/backup/watch.go b/pkg/backup/watch.go index ecc1ad14..e8c51b45 100644 --- a/pkg/backup/watch.go +++ b/pkg/backup/watch.go @@ -3,14 +3,15 @@ package backup import ( "context" "fmt" + "regexp" + "strings" + "time" + "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/server/metrics" "github.com/Altinity/clickhouse-backup/pkg/status" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" "github.com/urfave/cli" - "regexp" - "strings" - "time" ) var watchBackupTemplateTimeRE = regexp.MustCompile(`{time:([^}]+)}`) @@ -100,17 +101,17 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t if cfg, err := config.LoadConfig(config.GetConfigPath(cliCtx)); err == nil { b.cfg = cfg } else { - b.log.Warnf("watch config.LoadConfig error: %v", err) + log.Warn().Msgf("watch config.LoadConfig error: %v", err) } if err := b.ValidateWatchParams(watchInterval, fullInterval, watchBackupNameTemplate); err != nil { return err } } backupName, err := b.NewBackupWatchName(ctx, backupType) - log := b.log.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "watch", - }) + }).Logger() if err != nil { return err } @@ -129,14 +130,14 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } else { createRemoteErr = b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, rbac, backupConfig, skipCheckPartsColumns, false, version, commandId) if createRemoteErr != nil { - log.Errorf("create_remote %s return error: %v", backupName, createRemoteErr) + logger.Error().Msgf("create_remote %s return error: %v", backupName, createRemoteErr) createRemoteErrCount += 1 } else { createRemoteErrCount = 0 } deleteLocalErr = b.RemoveBackupLocal(ctx, backupName, nil) if deleteLocalErr != nil { - log.Errorf("delete local %s return error: %v", backupName, deleteLocalErr) + logger.Error().Msgf("delete local %s return error: %v", backupName, deleteLocalErr) deleteLocalErrCount += 1 } else { deleteLocalErrCount = 0 diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index dc9aed8d..af115651 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -7,8 +7,7 @@ import ( "database/sql" "errors" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/common" - "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + "github.com/rs/zerolog" "os" "path" "path/filepath" @@ -17,16 +16,17 @@ import ( "strings" "time" + "github.com/Altinity/clickhouse-backup/pkg/common" "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/ClickHouse/clickhouse-go/v2" - apexLog "github.com/apex/log" + "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + "github.com/rs/zerolog/log" ) // ClickHouse - provide type ClickHouse struct { Config *config.ClickHouseConfig - Log *apexLog.Entry conn driver.Conn disks []Disk version int @@ -38,7 +38,7 @@ type ClickHouse struct { func (ch *ClickHouse) Connect() error { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Log.Errorf("close previous connection error: %v", err) + log.Error().Msgf("close previous connection error: %v", err) } } ch.IsOpen = false @@ -78,7 +78,7 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCert != "" || ch.Config.TLSKey != "" { cert, err := tls.LoadX509KeyPair(ch.Config.TLSCert, ch.Config.TLSKey) if err != nil { - ch.Log.Errorf("tls.LoadX509KeyPair error: %v", err) + log.Error().Msgf("tls.LoadX509KeyPair error: %v", err) return err } tlsConfig.Certificates = []tls.Certificate{cert} @@ -86,12 +86,12 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCa != "" { caCert, err := os.ReadFile(ch.Config.TLSCa) if err != nil { - ch.Log.Errorf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) + log.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) return err } caCertPool := x509.NewCertPool() if caCertPool.AppendCertsFromPEM(caCert) != true { - ch.Log.Errorf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) + log.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) return fmt.Errorf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) } tlsConfig.RootCAs = caCertPool @@ -104,23 +104,23 @@ func (ch *ClickHouse) Connect() error { } if ch.conn, err = clickhouse.Open(opt); err != nil { - ch.Log.Errorf("clickhouse connection: %s, sql.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Error().Msgf("clickhouse connection: %s, sql.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } - logFunc := ch.Log.Infof + logFunc := log.Info() if !ch.Config.LogSQLQueries { - logFunc = ch.Log.Debugf + logFunc = log.Debug() } - logFunc("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + logFunc.Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) err = ch.conn.Ping(context.Background()) if err != nil { - ch.Log.Errorf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } else { ch.IsOpen = true } - logFunc("clickhouse connection open: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + logFunc.Msgf("clickhouse connection open: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) return err } @@ -259,13 +259,13 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro func (ch *ClickHouse) Close() { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Log.Warnf("can't close clickhouse connection: %v", err) + log.Warn().Msgf("can't close clickhouse connection: %v", err) } } if ch.Config.LogSQLQueries { - ch.Log.Info("clickhouse connection closed") + log.Info().Msg("clickhouse connection closed") } else { - ch.Log.Debug("clickhouse connection closed") + log.Debug().Msg("clickhouse connection closed") } ch.IsOpen = false } @@ -461,7 +461,7 @@ func (ch *ClickHouse) GetDatabases(ctx context.Context, cfg *config.Config, tabl var result string // 19.4 doesn't have /var/lib/clickhouse/metadata/default.sql if err := ch.SelectSingleRow(ctx, &result, showDatabaseSQL); err != nil { - ch.Log.Warnf("can't get create database query: %v", err) + log.Warn().Msgf("can't get create database query: %v", err) allDatabases[i].Query = fmt.Sprintf("CREATE DATABASE `%s` ENGINE = %s", db.Name, db.Engine) } else { // 23.3+ masked secrets https://github.com/Altinity/clickhouse-backup/issues/640 @@ -486,7 +486,7 @@ func (ch *ClickHouse) getTableSizeFromParts(ctx context.Context, table Table) ui } query := fmt.Sprintf("SELECT sum(bytes_on_disk) as size FROM system.parts WHERE active AND database='%s' AND table='%s' GROUP BY database, table", table.Database, table.Name) if err := ch.SelectContext(ctx, &tablesSize, query); err != nil { - ch.Log.Warnf("error parsing tablesSize: %v", err) + log.Warn().Msgf("error parsing tablesSize: %v", err) } if len(tablesSize) > 0 { return tablesSize[0].Size @@ -517,7 +517,7 @@ func (ch *ClickHouse) fixVariousVersions(ctx context.Context, t Table, metadataP if strings.Contains(t.CreateTableQuery, "'[HIDDEN]'") { tableSQLPath := path.Join(metadataPath, common.TablePathEncode(t.Database), common.TablePathEncode(t.Name)+".sql") if attachSQL, err := os.ReadFile(tableSQLPath); err != nil { - ch.Log.Warnf("can't read %s: %v", tableSQLPath, err) + log.Warn().Msgf("can't read %s: %v", tableSQLPath, err) } else { t.CreateTableQuery = strings.Replace(string(attachSQL), "ATTACH", "CREATE", 1) t.CreateTableQuery = strings.Replace(t.CreateTableQuery, " _ ", " `"+t.Database+"`.`"+t.Name+"` ", 1) @@ -536,7 +536,7 @@ func (ch *ClickHouse) GetVersion(ctx context.Context) (int, error) { var err error query := "SELECT value FROM `system`.`build_options` where name='VERSION_INTEGER'" if err = ch.SelectSingleRow(ctx, &result, query); err != nil { - ch.Log.Warnf("can't get ClickHouse version: %v", err) + log.Warn().Msgf("can't get ClickHouse version: %v", err) return 0, nil } ch.version, err = strconv.Atoi(result) @@ -567,7 +567,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name withNameQuery = fmt.Sprintf("WITH NAME '%s'", name) } for _, item := range partitions { - ch.Log.Debugf(" partition '%v'", item.PartitionID) + log.Debug().Msgf(" partition '%v'", item.PartitionID) query := fmt.Sprintf( "ALTER TABLE `%v`.`%v` FREEZE PARTITION ID '%v' %s;", table.Database, @@ -585,7 +585,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name } if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Log.Warnf("can't freeze partition: %v", err) + log.Warn().Msgf("can't freeze partition: %v", err) } else { return fmt.Errorf("can't freeze partition '%s': %w", item.PartitionID, err) } @@ -604,9 +604,9 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string if strings.HasPrefix(table.Engine, "Replicated") && ch.Config.SyncReplicatedTables { query := fmt.Sprintf("SYSTEM SYNC REPLICA `%s`.`%s`;", table.Database, table.Name) if err := ch.QueryContext(ctx, query); err != nil { - ch.Log.Warnf("can't sync replica: %v", err) + log.Warn().Msgf("can't sync replica: %v", err) } else { - ch.Log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Debugf("replica synced") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") } } if version < 19001005 || ch.Config.FreezeByPart { @@ -619,7 +619,7 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string query := fmt.Sprintf("ALTER TABLE `%s`.`%s` FREEZE %s;", table.Database, table.Name, withNameQuery) if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Log.Warnf("can't freeze table: %v", err) + log.Warn().Msgf("can't freeze table: %v", err) return nil } return fmt.Errorf("can't freeze table: %v", err) @@ -643,7 +643,7 @@ func (ch *ClickHouse) AttachDataParts(table metadata.TableMetadata, disks []Disk if err := ch.Query(query); err != nil { return err } - ch.Log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).WithField("disk", disk.Name).WithField("part", part.Name).Debug("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk.Name).Str("part", part.Name).Msg("attached") } } } @@ -656,7 +656,7 @@ var uuidRE = regexp.MustCompile(`UUID '([^']+)'`) // AttachTable - execute ATTACH TABLE command for specific table func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetadata) error { if len(table.Parts) == 0 { - apexLog.Warnf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) + log.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) return nil } canContinue, err := ch.CheckReplicationInProgress(table) @@ -703,7 +703,7 @@ func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetad return err } - ch.Log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Debug("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") return nil } func (ch *ClickHouse) ShowCreateTable(ctx context.Context, database, name string) string { @@ -834,7 +834,7 @@ func (ch *ClickHouse) CreateTable(table Table, query string, dropTable, ignoreDe if onCluster != "" && distributedRE.MatchString(query) { matches := distributedRE.FindAllStringSubmatch(query, -1) if onCluster != strings.Trim(matches[0][2], "'\" ") { - apexLog.Warnf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) + log.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) query = distributedRE.ReplaceAllString(query, fmt.Sprintf("${1}(%s,${3})", onCluster)) } } @@ -857,7 +857,7 @@ func (ch *ClickHouse) IsClickhouseShadow(path string) bool { } defer func() { if err := d.Close(); err != nil { - ch.Log.Warnf("can't close directory %v", err) + log.Warn().Msgf("can't close directory %v", err) } }() names, err := d.Readdirnames(-1) @@ -908,16 +908,16 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args } func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { - var logF func(msg string) + var logF *zerolog.Event if !ch.Config.LogSQLQueries { - logF = ch.Log.Debug + logF = log.Debug() } else { - logF = ch.Log.Info + logF = log.Info() } if len(args) > 0 { - logF(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) + logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) } else { - logF(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(query)) + logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(query)) } return query } @@ -1049,10 +1049,10 @@ func (ch *ClickHouse) CheckReplicationInProgress(table metadata.TableMetadata) ( return false, fmt.Errorf("invalid result for check exists replicas: %+v", existsReplicas) } if existsReplicas[0].InProgress > 0 { - ch.Log.Warnf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) + log.Warn().Msgf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) return false, nil } else { - ch.Log.Infof("replication_in_progress status = %+v", existsReplicas) + log.Info().Msgf("replication_in_progress status = %+v", existsReplicas) } } return true, nil @@ -1089,7 +1089,7 @@ func (ch *ClickHouse) CheckSystemPartsColumns(ctx context.Context, table *Table) } if len(isPartsColumnsInconsistent) > 0 { for i := range isPartsColumnsInconsistent { - ch.Log.Errorf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) + log.Error().Msgf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) } return fmt.Errorf("`%s`.`%s` have inconsistent data types for active data part in system.parts_columns", table.Database, table.Name) } diff --git a/pkg/config/config.go b/pkg/config/config.go index 38cc1f35..bc6907ed 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -3,15 +3,16 @@ package config import ( "crypto/tls" "fmt" - s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "math" "os" "runtime" "strings" "time" - "github.com/apex/log" + "github.com/Altinity/clickhouse-backup/pkg/log_helper" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/kelseyhightower/envconfig" + "github.com/rs/zerolog/log" "github.com/urfave/cli" "gopkg.in/yaml.v3" ) @@ -289,7 +290,7 @@ func LoadConfig(configLocation string) (*Config, error) { return nil, err } - //auto tuning upload_concurrency for storage types which not have SDK level concurrency, https://github.com/Altinity/clickhouse-backup/issues/658 + //adjust upload_concurrency for storage types which not have SDK level concurrency, https://github.com/Altinity/clickhouse-backup/issues/658 cfgWithoutDefault := &Config{} if err := yaml.Unmarshal(configYaml, &cfgWithoutDefault); err != nil { return nil, fmt.Errorf("can't parse config file: %v", err) @@ -303,7 +304,7 @@ func LoadConfig(configLocation string) (*Config, error) { cfg.AzureBlob.Path = strings.TrimPrefix(cfg.AzureBlob.Path, "/") cfg.S3.Path = strings.TrimPrefix(cfg.S3.Path, "/") cfg.GCS.Path = strings.TrimPrefix(cfg.GCS.Path, "/") - log.SetLevelFromString(cfg.General.LogLevel) + log_helper.SetLogLevelFromString(cfg.General.LogLevel) return cfg, ValidateConfig(cfg) } @@ -548,7 +549,7 @@ func GetConfigFromCli(ctx *cli.Context) *Config { configPath := GetConfigPath(ctx) cfg, err := LoadConfig(configPath) if err != nil { - log.Fatal(err.Error()) + log.Fatal().Stack().Err(err).Send() } return cfg } diff --git a/pkg/custom/delete_custom.go b/pkg/custom/delete_custom.go index aa59691d..d737865f 100644 --- a/pkg/custom/delete_custom.go +++ b/pkg/custom/delete_custom.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/utils" - "github.com/apex/log" + "github.com/rs/zerolog/log" "time" ) @@ -25,17 +25,17 @@ func DeleteRemote(ctx context.Context, cfg *config.Config, backupName string) er args := ApplyCommandTemplate(cfg.Custom.DeleteCommand, templateData) err := utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...) if err == nil { - log.WithFields(log.Fields{ + log.Info().Fields(map[string]interface{}{ "backup": backupName, "operation": "delete_custom", "duration": utils.HumanizeDuration(time.Since(startCustomDelete)), - }).Info("done") + }).Msg("done") return nil } else { - log.WithFields(log.Fields{ + log.Error().Fields(map[string]interface{}{ "backup": backupName, "operation": "delete_custom", - }).Error(err.Error()) + }).Msg(err.Error()) return err } diff --git a/pkg/custom/download_custom.go b/pkg/custom/download_custom.go index cb78b65e..64b16230 100644 --- a/pkg/custom/download_custom.go +++ b/pkg/custom/download_custom.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/utils" - "github.com/apex/log" "github.com/eapache/go-resiliency/retrier" + "github.com/rs/zerolog/log" "time" ) @@ -39,15 +39,15 @@ func Download(ctx context.Context, cfg *config.Config, backupName string, tableP return utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...) }) if err == nil { - log. - WithField("operation", "download_custom"). - WithField("duration", utils.HumanizeDuration(time.Since(startCustomDownload))). - Info("done") + log.Info(). + Str("operation", "download_custom"). + Str("duration", utils.HumanizeDuration(time.Since(startCustomDownload))). + Msg("done") return nil } else { - log. - WithField("operation", "download_custom"). - Error(err.Error()) + log.Error(). + Str("operation", "download_custom"). + Err(err).Send() return err } } diff --git a/pkg/custom/list_custom.go b/pkg/custom/list_custom.go index 8ed02d03..57e28735 100644 --- a/pkg/custom/list_custom.go +++ b/pkg/custom/list_custom.go @@ -7,7 +7,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/storage" "github.com/Altinity/clickhouse-backup/pkg/utils" - "github.com/apex/log" + "github.com/rs/zerolog/log" "strings" "time" ) @@ -32,15 +32,15 @@ func List(ctx context.Context, cfg *config.Config) ([]storage.Backup, error) { } } } - log. - WithField("operation", "list_custom"). - WithField("duration", utils.HumanizeDuration(time.Since(startCustomList))). - Info("done") + log.Info(). + Str("operation", "list_custom"). + Str("duration", utils.HumanizeDuration(time.Since(startCustomList))). + Msg("done") return backupList, nil } else { - log. - WithField("operation", "list_custom"). - Error(err.Error()) + log.Error(). + Str("operation", "list_custom"). + Err(err).Send() return nil, err } } diff --git a/pkg/custom/upload_custom.go b/pkg/custom/upload_custom.go index 6f48be42..24bff2a9 100644 --- a/pkg/custom/upload_custom.go +++ b/pkg/custom/upload_custom.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/utils" - "github.com/apex/log" "github.com/eapache/go-resiliency/retrier" + "github.com/rs/zerolog/log" "time" ) @@ -45,15 +45,15 @@ func Upload(ctx context.Context, cfg *config.Config, backupName, diffFrom, diffF return utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...) }) if err == nil { - log. - WithField("operation", "upload_custom"). - WithField("duration", utils.HumanizeDuration(time.Since(startCustomUpload))). - Info("done") + log.Info(). + Str("operation", "upload_custom"). + Str("duration", utils.HumanizeDuration(time.Since(startCustomUpload))). + Msg("done") return nil } else { - log. - WithField("operation", "upload_custom"). - Error(err.Error()) + log.Error(). + Str("operation", "upload_custom"). + Err(err).Send() return err } } diff --git a/pkg/custom/utils.go b/pkg/custom/utils.go index 245dd5f5..498ae3b7 100644 --- a/pkg/custom/utils.go +++ b/pkg/custom/utils.go @@ -2,8 +2,8 @@ package custom import ( "bytes" - "github.com/apex/log" "github.com/google/shlex" + "github.com/rs/zerolog/log" "text/template" ) @@ -11,18 +11,18 @@ func ApplyCommandTemplate(command string, templateData interface{}) []string { var b bytes.Buffer tpl, err := template.New("").Parse(command) if err != nil { - log.Warnf("custom command template.Parse error: %v", err) + log.Warn().Msgf("custom command template.Parse error: %v", err) return []string{command} } err = tpl.Execute(&b, templateData) if err != nil { - log.Warnf("custom command template.Execute error: %v", err) + log.Warn().Msgf("custom command template.Execute error: %v", err) return []string{command} } args, err := shlex.Split(b.String()) if err != nil { - log.Warnf("parse shell command %s error: %v", b.String(), err) + log.Warn().Msgf("parse shell command %s error: %v", b.String(), err) return []string{command} } return args diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index d2940c0c..2321cea5 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -3,8 +3,6 @@ package filesystemhelper import ( "context" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/partition" - "github.com/Altinity/clickhouse-backup/pkg/utils" "os" "path" "path/filepath" @@ -17,7 +15,9 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/clickhouse" "github.com/Altinity/clickhouse-backup/pkg/common" "github.com/Altinity/clickhouse-backup/pkg/metadata" - apexLog "github.com/apex/log" + "github.com/Altinity/clickhouse-backup/pkg/partition" + "github.com/Altinity/clickhouse-backup/pkg/utils" + "github.com/rs/zerolog/log" ) var ( @@ -116,15 +116,15 @@ func MkdirAll(path string, ch *clickhouse.ClickHouse, disks []clickhouse.Disk) e return nil } -// HardlinkBackupPartsToStorage - copy partitions for specific table to detached folder +// HardlinkBackupPartsToStorage - copy parts for specific table to detached folder func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableMetadata, disks []clickhouse.Disk, tableDataPaths []string, ch *clickhouse.ClickHouse, toDetached bool) error { dstDataPaths := clickhouse.GetDisksByPaths(disks, tableDataPaths) - log := apexLog.WithFields(apexLog.Fields{"operation": "HardlinkBackupPartsToStorage"}) + logger := log.With().Fields(map[string]interface{}{"operation": "HardlinkBackupPartsToStorage"}).Logger() start := time.Now() for _, backupDisk := range disks { backupDiskName := backupDisk.Name if len(backupTable.Parts[backupDiskName]) == 0 { - log.Debugf("%s disk have no parts", backupDisk.Name) + logger.Debug().Msgf("%s disk have no parts", backupDisk.Name) continue } dstParentDir := dstDataPaths[backupDiskName] @@ -136,9 +136,9 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM info, err := os.Stat(dstPartPath) if err != nil { if os.IsNotExist(err) { - log.Debugf("MkDirAll %s", dstPartPath) + logger.Debug().Msgf("MkDirAll %s", dstPartPath) if mkdirErr := MkdirAll(dstPartPath, ch, disks); mkdirErr != nil { - log.Warnf("error during Mkdir %+v", mkdirErr) + logger.Warn().Msgf("error during Mkdir %+v", mkdirErr) } } else { return err @@ -159,14 +159,14 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM filename := strings.Trim(strings.TrimPrefix(filePath, partPath), "/") dstFilePath := filepath.Join(dstPartPath, filename) if info.IsDir() { - log.Debugf("MkDir %s", dstFilePath) + logger.Debug().Msgf("MkDir %s", dstFilePath) return Mkdir(dstFilePath, ch, disks) } if !info.Mode().IsRegular() { - log.Debugf("'%s' is not a regular file, skipping.", filePath) + logger.Debug().Msgf("'%s' is not a regular file, skipping.", filePath) return nil } - log.Debugf("Link %s -> %s", filePath, dstFilePath) + logger.Debug().Msgf("Link %s -> %s", filePath, dstFilePath) if err := os.Link(filePath, dstFilePath); err != nil { if !os.IsExist(err) { return fmt.Errorf("failed to create hard link '%s' -> '%s': %w", filePath, dstFilePath, err) @@ -178,7 +178,7 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM } } } - log.WithField("duration", utils.HumanizeDuration(time.Since(start))).Debugf("done") + logger.Debug().Str("duration", utils.HumanizeDuration(time.Since(start))).Msg("done") return nil } @@ -194,7 +194,7 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa } func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap) ([]metadata.Part, int64, error) { - log := apexLog.WithField("logger", "MoveShadow") + logger := log.With().Str("logger", "MoveShadow").Logger() size := int64(0) parts := make([]metadata.Part, 0) err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error { @@ -221,7 +221,7 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E return os.MkdirAll(dstFilePath, 0750) } if !info.Mode().IsRegular() { - log.Debugf("'%s' is not a regular file, skipping", filePath) + logger.Debug().Msgf("'%s' is not a regular file, skipping", filePath) return nil } size += info.Size() @@ -231,14 +231,14 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E } func IsDuplicatedParts(part1, part2 string) error { - log := apexLog.WithField("logger", "IsDuplicatedParts") + logger := log.With().Str("logger", "IsDuplicatedParts").Logger() p1, err := os.Open(part1) if err != nil { return err } defer func() { if err = p1.Close(); err != nil { - log.Warnf("Can't close %s", part1) + logger.Warn().Msgf("Can't close %s", part1) } }() p2, err := os.Open(part2) @@ -247,7 +247,7 @@ func IsDuplicatedParts(part1, part2 string) error { } defer func() { if err = p2.Close(); err != nil { - log.Warnf("Can't close %s", part2) + logger.Warn().Msgf("Can't close %s", part2) } }() pf1, err := p1.Readdirnames(-1) @@ -295,7 +295,7 @@ func CreatePartitionsToBackupMap(ctx context.Context, ch *clickhouse.ClickHouse, for _, partitionTuple := range partitionTupleRE.Split(partitionArg, -1) { for _, item := range tablesFromClickHouse { if err, partitionId := partition.GetPartitionId(ctx, ch, item.Database, item.Name, item.CreateTableQuery, partitionTuple); err != nil { - apexLog.Errorf("partition.GetPartitionId error: %v", err) + log.Error().Msgf("partition.GetPartitionId error: %v", err) return make(common.EmptyMap, 0), partitions } else if partitionId != "" { partitionsMap[partitionId] = struct{}{} @@ -303,7 +303,7 @@ func CreatePartitionsToBackupMap(ctx context.Context, ch *clickhouse.ClickHouse, } for _, item := range tablesFromMetadata { if err, partitionId := partition.GetPartitionId(ctx, ch, item.Database, item.Table, item.Query, partitionTuple); err != nil { - apexLog.Errorf("partition.GetPartitionId error: %v", err) + log.Error().Msgf("partition.GetPartitionId error: %v", err) return make(common.EmptyMap, 0), partitions } else if partitionId != "" { partitionsMap[partitionId] = struct{}{} diff --git a/pkg/log_helper/log_level.go b/pkg/log_helper/log_level.go new file mode 100644 index 00000000..f9fbbaf8 --- /dev/null +++ b/pkg/log_helper/log_level.go @@ -0,0 +1,22 @@ +package log_helper + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func SetLogLevelFromString(logLevel string) { + allLogLevels := map[string]zerolog.Level{ + "error": zerolog.ErrorLevel, + "warning": zerolog.WarnLevel, + "info": zerolog.InfoLevel, + "debug": zerolog.DebugLevel, + } + level := zerolog.InfoLevel + var ok bool + if level, ok = allLogLevels[logLevel]; !ok { + log.Warn().Msgf("unexpected log_level=%v, will apply `info`", logLevel) + level = zerolog.InfoLevel + } + zerolog.SetGlobalLevel(level) +} diff --git a/pkg/logcli/cli.go b/pkg/logcli/cli.go deleted file mode 100644 index bba80cfb..00000000 --- a/pkg/logcli/cli.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package logcli implements a colored text handler suitable for command-line interfaces. -package logcli - -import ( - "fmt" - "github.com/apex/log" - "io" - "os" - "sync" -) - -// Strings mapping. -var Strings = [...]string{ - log.DebugLevel: "debug", - log.InfoLevel: " info", - log.WarnLevel: " warn", - log.ErrorLevel: "error", - log.FatalLevel: "error", -} - -// Handler implementation. -type Handler struct { - mu sync.Mutex - Writer io.Writer - Padding int -} - -// New handler. -func New(w io.Writer) *Handler { - if f, ok := w.(*os.File); ok { - return &Handler{ - Writer: f, - Padding: 3, - } - } - - return &Handler{ - Writer: w, - Padding: 3, - } -} - -// HandleLog implements log.Handler. -func (h *Handler) HandleLog(e *log.Entry) error { - level := Strings[e.Level] - names := e.Fields.Names() - - h.mu.Lock() - defer h.mu.Unlock() - - _, _ = fmt.Fprintf(h.Writer, "%s %-5s %-25s", e.Timestamp.Format("2006/01/02 15:04:05.000000"), level, e.Message) - - for _, name := range names { - if name == "source" { - continue - } - _, _ = fmt.Fprintf(h.Writer, " %s=%v", name, e.Fields.Get(name)) - } - - _, _ = fmt.Fprintln(h.Writer) - - return nil -} diff --git a/pkg/logfmt/logfmt.go b/pkg/logfmt/logfmt.go deleted file mode 100644 index e92ddbd2..00000000 --- a/pkg/logfmt/logfmt.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package logfmt implements a "logfmt" format handler. -package logfmt - -import ( - "io" - "sync" - - "github.com/apex/log" - "github.com/go-logfmt/logfmt" -) - -// Handler implementation. -type Handler struct { - mu sync.Mutex - enc *logfmt.Encoder -} - -// New handler. -func New(w io.Writer) *Handler { - return &Handler{ - enc: logfmt.NewEncoder(w), - } -} - -// HandleLog implements log.Handler. -func (h *Handler) HandleLog(e *log.Entry) error { - names := e.Fields.Names() - - h.mu.Lock() - defer h.mu.Unlock() - - _ = h.enc.EncodeKeyval("ts", e.Timestamp) - _ = h.enc.EncodeKeyval("lvl", e.Level.String()) - _ = h.enc.EncodeKeyval("msg", e.Message) - - for _, name := range names { - _ = h.enc.EncodeKeyval(name, e.Fields.Get(name)) - } - - _ = h.enc.EndRecord() - - return nil -} diff --git a/pkg/logfmt/logfmt_test.go b/pkg/logfmt/logfmt_test.go deleted file mode 100644 index 625eb03a..00000000 --- a/pkg/logfmt/logfmt_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package logfmt_test - -import ( - "bytes" - "github.com/Altinity/clickhouse-backup/pkg/logfmt" - "io" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/apex/log" -) - -func init() { - log.Now = func() time.Time { - return time.Unix(0, 0).UTC() - } -} - -func TestLogFmt(t *testing.T) { - var buf bytes.Buffer - - log.SetHandler(logfmt.New(&buf)) - log.WithField("user", "tj").WithField("id", "123").Info("hello") - log.Info("world") - log.Error("boom") - - expected := `ts=1970-01-01T00:00:00Z lvl=info msg=hello id=123 user=tj -ts=1970-01-01T00:00:00Z lvl=info msg=world -ts=1970-01-01T00:00:00Z lvl=error msg=boom -` - - assert.Equal(t, expected, buf.String()) -} - -func Benchmark(b *testing.B) { - log.SetHandler(logfmt.New(io.Discard)) - ctx := log.WithField("user", "tj").WithField("id", "123") - - for i := 0; i < b.N; i++ { - ctx.Info("hello") - } -} diff --git a/pkg/metadata/load.go b/pkg/metadata/load.go index 84dae823..f996da92 100644 --- a/pkg/metadata/load.go +++ b/pkg/metadata/load.go @@ -2,12 +2,12 @@ package metadata import ( "encoding/json" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" "os" ) func (tm *TableMetadata) Load(location string) (uint64, error) { - log := apexLog.WithField("logger", "metadata.Load") + logger := log.With().Str("logger", "metadata.Load").Logger() data, err := os.ReadFile(location) if err != nil { return 0, err @@ -15,6 +15,6 @@ func (tm *TableMetadata) Load(location string) (uint64, error) { if err := json.Unmarshal(data, tm); err != nil { return 0, err } - log.Debugf("success %s", location) + logger.Debug().Msgf("success %s", location) return uint64(len(data)), nil } diff --git a/pkg/resumable/state.go b/pkg/resumable/state.go index 8c653682..83396c6f 100644 --- a/pkg/resumable/state.go +++ b/pkg/resumable/state.go @@ -3,19 +3,21 @@ package resumable import ( "encoding/json" "fmt" - apexLog "github.com/apex/log" + "github.com/rs/zerolog" "os" "path" "strconv" "strings" "sync" + + "github.com/rs/zerolog/log" ) type State struct { stateFile string currentState string params map[string]interface{} - log *apexLog.Entry + logger zerolog.Logger fp *os.File mx *sync.RWMutex } @@ -25,11 +27,11 @@ func NewState(defaultDiskPath, backupName, command string, params map[string]int stateFile: path.Join(defaultDiskPath, "backup", backupName, fmt.Sprintf("%s.state", command)), currentState: "", mx: &sync.RWMutex{}, - log: apexLog.WithField("logger", "resumable"), + logger: log.With().Str("logger", "resumable").Logger(), } fp, err := os.OpenFile(s.stateFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) if err != nil { - s.log.Warnf("can't open %s error: %v", s.stateFile, err) + s.logger.Warn().Msgf("can't open %s error: %v", s.stateFile, err) } s.fp = fp s.LoadState() @@ -55,7 +57,7 @@ func (s *State) LoadParams() { //size 0 during write lines[0] = strings.TrimSuffix(lines[0], ":0") if err := json.Unmarshal([]byte(lines[0]), &s.params); err != nil { - apexLog.Errorf("can't parse state file line 0 as []interface{}: %s", lines[0]) + s.logger.Error().Msgf("can't parse state file line 0 as []interface{}: %s", lines[0]) } } @@ -67,9 +69,9 @@ func (s *State) LoadState() { } else { s.currentState = "" if !os.IsNotExist(err) { - s.log.Warnf("can't read %s error: %v", s.stateFile, err) + s.logger.Warn().Msgf("can't read %s error: %v", s.stateFile, err) } else { - s.log.Warnf("%s empty, will continue from scratch error: %v", s.stateFile, err) + s.logger.Warn().Msgf("%s empty, will continue from scratch error: %v", s.stateFile, err) } } s.mx.Unlock() @@ -81,11 +83,11 @@ func (s *State) AppendToState(path string, size int64) { if s.fp != nil { _, err := s.fp.WriteString(path + "\n") if err != nil { - s.log.Warnf("can't write %s error: %v", s.stateFile, err) + s.logger.Warn().Msgf("can't write %s error: %v", s.stateFile, err) } err = s.fp.Sync() if err != nil { - s.log.Warnf("can't sync %s error: %v", s.stateFile, err) + s.logger.Warn().Msgf("can't sync %s error: %v", s.stateFile, err) } } s.currentState += path + "\n" @@ -102,12 +104,12 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) { s.mx.RLock() res := strings.Index(s.currentState, path+":") if res >= 0 { - s.log.Infof("%s already processed", path) + s.logger.Info().Msgf("%s already processed", path) sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")] sSize = sSize[strings.Index(sSize, ":")+1:] size, err = strconv.ParseInt(sSize, 10, 64) if err != nil { - s.log.Warnf("invalid size %s in upload state: %v", sSize, err) + s.logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) } } s.mx.RUnlock() diff --git a/pkg/server/metrics/metrics.go b/pkg/server/metrics/metrics.go index a72b8b36..062656a6 100644 --- a/pkg/server/metrics/metrics.go +++ b/pkg/server/metrics/metrics.go @@ -2,9 +2,11 @@ package metrics import ( "fmt" - apexLog "github.com/apex/log" - "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog/log" ) type APIMetricsInterface interface { @@ -32,7 +34,7 @@ type APIMetrics struct { NumberBackupsLocalExpected prometheus.Gauge SubCommands map[string][]string - log *apexLog.Entry + logger zerolog.Logger } func NewAPIMetrics() *APIMetrics { @@ -41,7 +43,7 @@ func NewAPIMetrics() *APIMetrics { "create_remote": {"create", "upload"}, "restore_remote": {"download", "restore"}, }, - log: apexLog.WithField("logger", "metrics"), + logger: log.With().Str("logger", "metrics").Logger(), } return metrics } @@ -173,7 +175,7 @@ func (m *APIMetrics) Start(command string, startTime time.Time) { } } } else { - m.log.Warnf("%s not found in LastStart metrics", command) + m.logger.Warn().Msgf("%s not found in LastStart metrics", command) } } func (m *APIMetrics) Finish(command string, startTime time.Time) { @@ -189,19 +191,19 @@ func (m *APIMetrics) Finish(command string, startTime time.Time) { } } } else { - m.log.Warnf("%s not found in LastFinish", command) + m.logger.Warn().Msgf("%s not found in LastFinish", command) } } func (m *APIMetrics) Success(command string) { if _, exists := m.SuccessfulCounter[command]; exists { m.SuccessfulCounter[command].Inc() } else { - m.log.Warnf("%s not found in SuccessfulCounter metrics", command) + m.logger.Warn().Msgf("%s not found in SuccessfulCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(1) } else { - m.log.Warnf("%s not found in LastStatus metrics", command) + m.logger.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -209,12 +211,12 @@ func (m *APIMetrics) Failure(command string) { if _, exists := m.FailedCounter[command]; exists { m.FailedCounter[command].Inc() } else { - m.log.Warnf("%s not found in FailedCounter metrics", command) + m.logger.Warn().Msgf("%s not found in FailedCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(0) } else { - m.log.Warnf("%s not found in LastStatus metrics", command) + m.logger.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -224,7 +226,7 @@ func (m *APIMetrics) ExecuteWithMetrics(command string, errCounter int, f func() err := f() m.Finish(command, startTime) if err != nil { - m.log.Errorf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) + m.logger.Error().Msgf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) errCounter += 1 m.Failure(command) } else { diff --git a/pkg/server/server.go b/pkg/server/server.go index f639aa40..d121f22e 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "github.com/rs/zerolog" "io" "net/http" "net/http/pprof" @@ -28,11 +29,10 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/server/metrics" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/Altinity/clickhouse-backup/pkg/utils" - - apexLog "github.com/apex/log" "github.com/google/shlex" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/zerolog/log" "github.com/urfave/cli" ) @@ -44,7 +44,7 @@ type APIServer struct { server *http.Server restart chan struct{} metrics *metrics.APIMetrics - log *apexLog.Entry + log zerolog.Logger routes []string clickhouseBackupVersion string } @@ -55,25 +55,24 @@ var ( // Run - expose CLI commands as REST API func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBackupVersion string) error { - log := apexLog.WithField("logger", "server.Run") + logger := log.With().Str("logger", "server.Run").Logger() var ( cfg *config.Config err error ) - log.Debug("Wait for ClickHouse") + logger.Debug().Msg("Wait for ClickHouse") for { cfg, err = config.LoadConfig(configPath) if err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() time.Sleep(5 * time.Second) continue } ch := clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Log: apexLog.WithField("logger", "clickhouse"), } if err := ch.Connect(); err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() time.Sleep(5 * time.Second) continue } @@ -88,16 +87,16 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack restart: make(chan struct{}), clickhouseBackupVersion: clickhouseBackupVersion, metrics: metrics.NewAPIMetrics(), - log: apexLog.WithField("logger", "server"), + log: log.With().Str("logger", "server").Logger(), } if cfg.API.CreateIntegrationTables { if err := api.CreateIntegrationTables(); err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() } } api.metrics.RegisterMetrics() - log.Infof("Starting API server on %s", api.config.API.ListenAddr) + logger.Info().Msgf("Starting API server on %s", api.config.API.ListenAddr) sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, os.Interrupt, syscall.SIGTERM) sighup := make(chan os.Signal, 1) @@ -108,14 +107,14 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack if api.config.API.CompleteResumableAfterRestart { go func() { if err := api.ResumeOperationsAfterRestart(); err != nil { - log.Errorf("ResumeOperationsAfterRestart return error: %v", err) + logger.Error().Msgf("ResumeOperationsAfterRestart return error: %v", err) } }() } go func() { if err := api.UpdateBackupMetrics(context.Background(), false); err != nil { - log.Errorf("UpdateBackupMetrics return error: %v", err) + logger.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() @@ -127,18 +126,18 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack select { case <-api.restart: if err := api.Restart(); err != nil { - log.Errorf("Failed to restarting API server: %v", err) + logger.Error().Msgf("Failed to restarting API server: %v", err) continue } - log.Infof("Reloaded by HTTP") + logger.Info().Msgf("Reloaded by HTTP") case <-sighup: if err := api.Restart(); err != nil { - log.Errorf("Failed to restarting API server: %v", err) + logger.Error().Msgf("Failed to restarting API server: %v", err) continue } - log.Info("Reloaded by SIGHUP") + logger.Info().Msg("Reloaded by SIGHUP") case <-sigterm: - log.Info("Stopping API server") + logger.Info().Msg("Stopping API server") return api.Stop() } } @@ -149,7 +148,7 @@ func (api *APIServer) GetMetrics() *metrics.APIMetrics { } func (api *APIServer) RunWatch(cliCtx *cli.Context) { - api.log.Info("Starting API Server in watch mode") + api.log.Info().Msg("Starting API Server in watch mode") b := backup.NewBackuper(api.config) commandId, _ := status.Current.Start("watch") err := b.Watch( @@ -167,7 +166,7 @@ func (api *APIServer) Stop() error { } func (api *APIServer) Restart() error { - log := apexLog.WithField("logger", "server.Restart") + logger := log.With().Str("logger", "server.Restart").Logger() _, err := api.ReloadConfig(nil, "restart") if err != nil { return err @@ -183,9 +182,9 @@ func (api *APIServer) Restart() error { err = api.server.ListenAndServeTLS(api.config.API.CertificateFile, api.config.API.PrivateKeyFile) if err != nil { if err == http.ErrServerClosed { - log.Warnf("ListenAndServeTLS get signal: %s", err.Error()) + logger.Warn().Msgf("ListenAndServeTLS get signal: %s", err.Error()) } else { - log.Fatalf("ListenAndServeTLS error: %s", err.Error()) + logger.Fatal().Stack().Msgf("ListenAndServeTLS error: %s", err.Error()) } } }() @@ -194,9 +193,9 @@ func (api *APIServer) Restart() error { go func() { if err = api.server.ListenAndServe(); err != nil { if err == http.ErrServerClosed { - log.Warnf("ListenAndServe get signal: %s", err.Error()) + logger.Warn().Msgf("ListenAndServe get signal: %s", err.Error()) } else { - log.Fatalf("ListenAndServe error: %s", err.Error()) + logger.Fatal().Stack().Msgf("ListenAndServe error: %s", err.Error()) } } }() @@ -206,7 +205,7 @@ func (api *APIServer) Restart() error { // registerHTTPHandlers - resister API routes func (api *APIServer) registerHTTPHandlers() *http.Server { - log := apexLog.WithField("logger", "registerHTTPHandlers") + logger := log.With().Str("logger", "registerHTTPHandlers").Logger() r := mux.NewRouter() r.Use(api.basicAuthMiddleware) r.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -246,7 +245,7 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { routes = append(routes, t) return nil }); err != nil { - log.Errorf("mux.Router.Walk return error: %v", err) + logger.Error().Msgf("mux.Router.Walk return error: %v", err) return nil } @@ -262,9 +261,9 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/metrics" { - api.log.Infof("API call %s %s", r.Method, r.URL.Path) + api.log.Info().Msgf("API call %s %s", r.Method, r.URL.Path) } else { - api.log.Debugf("API call %s %s", r.Method, r.URL.Path) + api.log.Debug().Msgf("API call %s %s", r.Method, r.URL.Path) } user, pass, _ := r.BasicAuth() query := r.URL.Query() @@ -275,11 +274,11 @@ func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { pass = p[0] } if (user != api.config.API.Username) || (pass != api.config.API.Password) { - api.log.Warnf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) + api.log.Warn().Msgf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) w.Header().Set("WWW-Authenticate", "Basic realm=\"Provide username and password\"") w.WriteHeader(http.StatusUnauthorized) if _, err := w.Write([]byte("401 Unauthorized\n")); err != nil { - api.log.Errorf("RequestWriter.Write return error: %v", err) + api.log.Error().Msgf("RequestWriter.Write return error: %v", err) } return } @@ -316,7 +315,7 @@ func (api *APIServer) actions(w http.ResponseWriter, r *http.Request) { api.writeError(w, http.StatusBadRequest, string(line), err) return } - api.log.Infof("/backup/actions call: %s", row.Command) + api.log.Info().Msgf("/backup/actions call: %s", row.Command) args, err := shlex.Split(row.Command) if err != nil { api.writeError(w, http.StatusBadRequest, "", err) @@ -373,16 +372,16 @@ func (api *APIServer) actionsDeleteHandler(row status.ActionRow, args []string, if err != nil { return actionsResults, err } - api.log.Info("DELETED") go func() { if err := api.UpdateBackupMetrics(context.Background(), args[1] == "local"); err != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", err) + api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() actionsResults = append(actionsResults, actionsResultsRow{ Status: "success", Operation: row.Command, }) + api.log.Info().Msg("DELETED") return actionsResults, nil } @@ -397,12 +396,12 @@ func (api *APIServer) actionsAsyncCommandsHandler(command string, args []string, }) status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("API /backup/actions error: %v", err) + api.log.Error().Msgf("API /backup/actions error: %v", err) return } go func() { if err := api.UpdateBackupMetrics(context.Background(), command == "create" || command == "restore"); err != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", err) + api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() }() @@ -433,7 +432,7 @@ func (api *APIServer) actionsKillHandler(row status.ActionRow, args []string, ac func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row status.ActionRow, command string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } commandId, ctx := status.Current.Start(command) @@ -445,14 +444,14 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Errorf("Clean remote broken error: %v", err) + api.log.Error().Msgf("Clean remote broken error: %v", err) status.Current.Stop(commandId, err) return actionsResults, err } - api.log.Info("CLEANED") + api.log.Info().Msg("CLEANED") metricsErr := api.UpdateBackupMetrics(ctx, false) if metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + api.log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } status.Current.Stop(commandId, nil) actionsResults = append(actionsResults, actionsResultsRow{ @@ -464,7 +463,7 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.ActionRow, args []string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if (!api.config.API.AllowParallel && status.Current.InProgress()) || status.Current.CheckCommandInProgress(row.Command) { - api.log.Info(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } cfg, err := api.ReloadConfig(w, "watch") @@ -542,7 +541,7 @@ func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.Acti err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("Watch error: %v", err) + api.log.Error().Msgf("Watch error: %v", err) return } }() @@ -565,7 +564,7 @@ func (api *APIServer) actionsLog(w http.ResponseWriter, r *http.Request) { if q.Get("last") != "" { last, err = strconv.ParseInt(q.Get("last"), 10, 16) if err != nil { - api.log.Warn(err.Error()) + api.log.Warn().Err(err).Send() api.writeError(w, http.StatusInternalServerError, "actions", err) return } @@ -780,7 +779,7 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { // httpCreateHandler - create a backup func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "create", ErrAPILocked) return } @@ -836,7 +835,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + api.log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "create", err) return } @@ -848,13 +847,13 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) return b.CreateBackup(backupName, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, checkPartsColumns, api.clickhouseBackupVersion, commandId) }) if err != nil { - api.log.Errorf("API /backup/create error: %v", err) + api.log.Error().Msgf("API /backup/create error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, true); err != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", err) + api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -876,7 +875,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) // httpWatchHandler - run watch command go routine, can't run the same watch command twice func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -941,7 +940,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { } if status.Current.CheckCommandInProgress(fullCommand) { - api.log.Warnf("%s error: %v", fullCommand, ErrAPILocked) + api.log.Warn().Msgf("%s error: %v", fullCommand, ErrAPILocked) api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -952,7 +951,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("Watch error: %v", err) + api.log.Error().Msgf("Watch error: %v", err) return } }() @@ -976,7 +975,7 @@ func (api *APIServer) httpCleanHandler(w http.ResponseWriter, _ *http.Request) { err = b.Clean(ctx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("Clean error: %v", err) + api.log.Error().Msgf("Clean error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean", err) return } @@ -1001,14 +1000,14 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Errorf("Clean remote broken error: %v", err) + api.log.Error().Msgf("Clean remote broken error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean_remote_broken", err) return } err = api.UpdateBackupMetrics(ctx, false) if err != nil { - api.log.Errorf("Clean remote broken error: %v", err) + api.log.Error().Msgf("Clean remote broken error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean_remote_broken", err) return } @@ -1025,7 +1024,7 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt // httpUploadHandler - upload a backup to remote storage func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "upload", ErrAPILocked) return } @@ -1073,7 +1072,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + api.log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "upload", err) return } @@ -1085,13 +1084,13 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) return b.Upload(name, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) }) if err != nil { - api.log.Errorf("Upload error: %v", err) + api.log.Error().Msgf("Upload error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, false); err != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", err) + api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -1119,7 +1118,7 @@ var databaseMappingRE = regexp.MustCompile(`[\w+]:[\w+]`) // httpRestoreHandler - restore a backup from local storage func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "restore", ErrAPILocked) return } @@ -1197,7 +1196,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + api.log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "restore", err) return } @@ -1210,7 +1209,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) }) status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("API /backup/restore error: %v", err) + api.log.Error().Msgf("API /backup/restore error: %v", err) api.errorCallback(r.Context(), err, callback) return } @@ -1230,7 +1229,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) // httpDownloadHandler - download a backup from remote to local storage func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "download", ErrAPILocked) return } @@ -1268,7 +1267,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + api.log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "download", err) return } @@ -1280,13 +1279,13 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request return b.Download(name, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) }) if err != nil { - api.log.Errorf("API /backup/download error: %v", err) + api.log.Error().Msgf("API /backup/download error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, true); err != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", err) + api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -1308,7 +1307,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request // httpDeleteHandler - delete a backup from local or remote storage func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + api.log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "delete", ErrAPILocked) return } @@ -1330,13 +1329,13 @@ func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) } status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("delete backup error: %v", err) + api.log.Error().Msgf("delete backup error: %v", err) api.writeError(w, http.StatusInternalServerError, "delete", err) return } go func() { if err := api.UpdateBackupMetrics(context.Background(), vars["where"] == "local"); err != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", err) + api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() api.sendJSONEachRow(w, http.StatusOK, struct { @@ -1368,7 +1367,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e numberBackupsRemote := 0 numberBackupsRemoteBroken := 0 - api.log.Infof("Update backup metrics start (onlyLocal=%v)", onlyLocal) + api.log.Info().Msgf("Update backup metrics start (onlyLocal=%v)", onlyLocal) if !api.config.API.EnableMetrics { return nil } @@ -1430,7 +1429,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e api.metrics.LastFinish["create_remote"].Set(float64(lastBackupUpload.Unix())) } } - api.log.WithFields(apexLog.Fields{ + api.log.Info().Fields(map[string]interface{}{ "duration": utils.HumanizeDuration(time.Since(startTime)), "LastBackupCreateLocal": lastBackupCreateLocal, "LastBackupCreateRemote": lastBackupCreateRemote, @@ -1439,7 +1438,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e "LastBackupSizeLocal": lastSizeLocal, "NumberBackupsLocal": numberBackupsLocal, "NumberBackupsRemote": numberBackupsRemote, - }).Info("Update backup metrics finish") + }).Msg("Update backup metrics finish") return nil } @@ -1469,10 +1468,9 @@ func (api *APIServer) registerMetricsHandlers(r *mux.Router, enableMetrics bool, } func (api *APIServer) CreateIntegrationTables() error { - api.log.Infof("Create integration tables") + api.log.Info().Msgf("Create integration tables") ch := &clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Log: api.log.WithField("logger", "clickhouse"), } if err := ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %w", err) @@ -1516,14 +1514,14 @@ func (api *APIServer) CreateIntegrationTables() error { func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*config.Config, error) { cfg, err := config.LoadConfig(api.configPath) if err != nil { - api.log.Errorf("config.LoadConfig(%s) return error: %v", api.configPath, err) + api.log.Error().Msgf("config.LoadConfig(%s) return error: %v", api.configPath, err) if w != nil { api.writeError(w, http.StatusInternalServerError, command, err) } return nil, err } api.config = cfg - api.log = apexLog.WithField("logger", "server") + api.log = log.With().Str("logger", "server").Logger() api.metrics.NumberBackupsRemoteExpected.Set(float64(cfg.General.BackupsToKeepRemote)) api.metrics.NumberBackupsLocalExpected.Set(float64(cfg.General.BackupsToKeepLocal)) return cfg, nil @@ -1532,14 +1530,13 @@ func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*conf func (api *APIServer) ResumeOperationsAfterRestart() error { ch := clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Log: apexLog.WithField("logger", "clickhouse"), } if err := ch.Connect(); err != nil { return err } defer func() { if err := ch.GetConn().Close(); err != nil { - api.log.Errorf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) + api.log.Error().Msgf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) } }() disks, err := ch.GetDisks(context.Background()) @@ -1598,7 +1595,7 @@ func (api *APIServer) ResumeOperationsAfterRestart() error { } args = append(args, "--resumable=1", backupName) fullCommand := strings.Join(args, " ") - api.log.WithField("operation", "ResumeOperationsAfterRestart").Info(fullCommand) + api.log.Info().Str("operation", "ResumeOperationsAfterRestart").Send() commandId, _ := status.Current.Start(fullCommand) err, _ = api.metrics.ExecuteWithMetrics(command, 0, func() error { return api.cliApp.Run(append([]string{"clickhouse-backup", "-c", api.configPath, "--command-id", strconv.FormatInt(int64(commandId), 10)}, args...)) diff --git a/pkg/server/utils.go b/pkg/server/utils.go index d32fe1d3..489b9f38 100644 --- a/pkg/server/utils.go +++ b/pkg/server/utils.go @@ -10,12 +10,12 @@ import ( func (api *APIServer) flushOutput(w http.ResponseWriter, out string) { if _, err := fmt.Fprintln(w, out); err != nil { - api.log.Warnf("can't write to http.ResponseWriter: %v", err) + api.log.Warn().Msgf("can't write to http.ResponseWriter: %v", err) } } func (api *APIServer) writeError(w http.ResponseWriter, statusCode int, operation string, err error) { - api.log.Errorf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) + api.log.Error().Msgf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) w.WriteHeader(statusCode) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate") @@ -45,7 +45,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warnf("sendJSONEachRow json.Marshal error: %v", err) + api.log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } default: @@ -53,7 +53,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warnf("sendJSONEachRow json.Marshal error: %v", err) + api.log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } } @@ -71,7 +71,7 @@ func (api *APIServer) errorCallback(ctx context.Context, err error, callback cal Error: err.Error(), } for _, e := range callback(ctx, payload) { - api.log.Error(e.Error()) + api.log.Error().Err(e).Send() } } @@ -82,6 +82,6 @@ func (api *APIServer) successCallback(ctx context.Context, callback callbackFn) Error: "", } for _, e := range callback(ctx, payload) { - api.log.Error(e.Error()) + api.log.Error().Err(e).Send() } } diff --git a/pkg/status/status.go b/pkg/status/status.go index 70ee12e1..366632ad 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -3,11 +3,13 @@ package status import ( "context" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/common" - apexLog "github.com/apex/log" + "github.com/rs/zerolog" "strings" "sync" "time" + + "github.com/Altinity/clickhouse-backup/pkg/common" + "github.com/rs/zerolog/log" ) const ( @@ -18,14 +20,14 @@ const ( ) var Current = &AsyncStatus{ - log: apexLog.WithField("logger", "status"), + logger: log.With().Str("logger", "status").Logger(), } const NotFromAPI = int(-1) type AsyncStatus struct { commands []ActionRow - log *apexLog.Entry + logger zerolog.Logger sync.RWMutex } @@ -57,7 +59,7 @@ func (status *AsyncStatus) Start(command string) (int, context.Context) { Cancel: cancel, }) lastCommandId := len(status.commands) - 1 - status.log.Debugf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) + status.logger.Debug().Msgf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) return lastCommandId, ctx } @@ -77,10 +79,10 @@ func (status *AsyncStatus) InProgress() bool { defer status.RUnlock() n := len(status.commands) - 1 if n < 0 { - status.log.Debugf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) + status.logger.Debug().Msgf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) return false } - status.log.Debugf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus) + status.logger.Debug().Msgf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus) return status.commands[n].Status == InProgressStatus } @@ -116,7 +118,7 @@ func (status *AsyncStatus) Stop(commandId int, err error) { status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) status.commands[commandId].Ctx = nil status.commands[commandId].Cancel = nil - status.log.Debugf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + status.logger.Debug().Msgf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } func (status *AsyncStatus) Cancel(command string, err error) error { @@ -124,7 +126,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { defer status.Unlock() if len(status.commands) == 0 { err = fmt.Errorf("empty command list") - status.log.Warnf(err.Error()) + status.logger.Warn().Err(err).Send() return err } commandId := -1 @@ -145,11 +147,11 @@ func (status *AsyncStatus) Cancel(command string, err error) error { } if commandId == -1 { err = fmt.Errorf("command `%s` not found", command) - status.log.Warnf(err.Error()) + status.logger.Warn().Err(err).Send() return err } if status.commands[commandId].Status != InProgressStatus { - status.log.Warnf("found `%s` with status=%s", command, status.commands[commandId].Status) + status.logger.Warn().Msgf("found `%s` with status=%s", command, status.commands[commandId].Status) } if status.commands[commandId].Ctx != nil { status.commands[commandId].Cancel() @@ -159,7 +161,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { status.commands[commandId].Error = err.Error() status.commands[commandId].Status = CancelStatus status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.log.Debugf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + status.logger.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) return nil } @@ -175,7 +177,7 @@ func (status *AsyncStatus) CancelAll(cancelMsg string) { status.commands[commandId].Status = CancelStatus status.commands[commandId].Error = cancelMsg status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.log.Debugf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + status.logger.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } } diff --git a/pkg/storage/ftp.go b/pkg/storage/ftp.go index 85893f19..ee9dd14f 100644 --- a/pkg/storage/ftp.go +++ b/pkg/storage/ftp.go @@ -4,8 +4,6 @@ import ( "context" "crypto/tls" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/config" - apexLog "github.com/apex/log" "io" "os" "path" @@ -13,14 +11,16 @@ import ( "sync" "time" + "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/jlaffaye/ftp" "github.com/jolestar/go-commons-pool/v2" + "github.com/rs/zerolog" ) type FTP struct { clients *pool.ObjectPool Config *config.FTPConfig - Log *apexLog.Entry + Logger zerolog.Logger dirCache map[string]bool dirCacheMutex sync.RWMutex } @@ -64,21 +64,21 @@ func (f *FTP) Close(ctx context.Context) error { // getConnectionFromPool *ftp.ServerConn is not thread-safe, so we need implements connection pool func (f *FTP) getConnectionFromPool(ctx context.Context, where string) (*ftp.ServerConn, error) { - f.Log.Debugf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + f.Logger.Debug().Msgf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) client, err := f.clients.BorrowObject(ctx) if err != nil { - f.Log.Errorf("can't BorrowObject from FTP Connection Pool: %v", err) + f.Logger.Error().Msgf("can't BorrowObject from FTP Connection Pool: %v", err) return nil, err } return client.(*ftp.ServerConn), nil } func (f *FTP) returnConnectionToPool(ctx context.Context, where string, client *ftp.ServerConn) { - f.Log.Debugf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + f.Logger.Debug().Msgf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) if client != nil { err := f.clients.ReturnObject(ctx, client) if err != nil { - f.Log.Errorf("can't ReturnObject to FTP Connection Pool: %v", err) + f.Logger.Error().Msgf("can't ReturnObject to FTP Connection Pool: %v", err) } } } @@ -175,7 +175,7 @@ func (f *FTP) Walk(ctx context.Context, ftpPath string, recursive bool, process } func (f *FTP) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { - f.Log.Debugf("GetFileReader key=%s", key) + f.Logger.Debug().Msgf("GetFileReader key=%s", key) client, err := f.getConnectionFromPool(ctx, "GetFileReader") if err != nil { return nil, err @@ -194,7 +194,7 @@ func (f *FTP) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) (io } func (f *FTP) PutFile(ctx context.Context, key string, r io.ReadCloser) error { - f.Log.Debugf("PutFile key=%s", key) + f.Logger.Debug().Msgf("PutFile key=%s", key) client, err := f.getConnectionFromPool(ctx, "PutFile") defer f.returnConnectionToPool(ctx, "PutFile", client) if err != nil { @@ -239,7 +239,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.RLock() if _, exists := f.dirCache[d]; exists { f.dirCacheMutex.RUnlock() - f.Log.Debugf("MkdirAll %s exists in dirCache", d) + f.Logger.Debug().Msgf("MkdirAll %s exists in dirCache", d) continue } f.dirCacheMutex.RUnlock() @@ -247,7 +247,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.Lock() err = client.MakeDir(d) if err != nil { - f.Log.Warnf("MkdirAll MakeDir(%s) return error: %v", d, err) + f.Logger.Warn().Msgf("MkdirAll MakeDir(%s) return error: %v", d, err) } else { f.dirCache[d] = true } diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 50b933d6..31bb133f 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -14,7 +14,7 @@ import ( "google.golang.org/api/option/internaloption" "cloud.google.com/go/storage" - "github.com/apex/log" + "github.com/rs/zerolog/log" "google.golang.org/api/iterator" "google.golang.org/api/option" googleHTTPTransport "google.golang.org/api/transport/http" @@ -37,11 +37,11 @@ func (w debugGCSTransport) RoundTrip(r *http.Request) (*http.Response, error) { logMsg += fmt.Sprintf("%v: %v\n", h, v) } } - log.Info(logMsg) + log.Info().Msg(logMsg) resp, err := w.base.RoundTrip(r) if err != nil { - log.Errorf("GCS_ERROR: %v", err) + log.Error().Msgf("GCS_ERROR: %v", err) return resp, err } logMsg = fmt.Sprintf("<<< [GCS_RESPONSE] <<< %v %v\n", r.Method, r.URL.String()) @@ -50,7 +50,7 @@ func (w debugGCSTransport) RoundTrip(r *http.Request) (*http.Response, error) { logMsg += fmt.Sprintf("%v: %v\n", h, v) } } - log.Info(logMsg) + log.Info().Msg(logMsg) return resp, err } @@ -166,7 +166,7 @@ func (gcs *GCS) PutFile(ctx context.Context, key string, r io.ReadCloser) error } defer func() { if err := writer.Close(); err != nil { - log.Warnf("can't close writer: %+v", err) + log.Warn().Msgf("can't close writer: %+v", err) } }() buffer := make([]byte, 512*1024) diff --git a/pkg/storage/general.go b/pkg/storage/general.go index 0260e3db..2899139c 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -5,11 +5,7 @@ import ( "context" "encoding/json" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/clickhouse" - "github.com/Altinity/clickhouse-backup/pkg/config" - "github.com/Altinity/clickhouse-backup/pkg/progressbar" - "github.com/Altinity/clickhouse-backup/pkg/utils" - "github.com/eapache/go-resiliency/retrier" + "github.com/rs/zerolog" "io" "os" "path" @@ -19,13 +15,17 @@ import ( "sync" "time" + "github.com/Altinity/clickhouse-backup/pkg/clickhouse" + "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/metadata" - "golang.org/x/sync/errgroup" - - apexLog "github.com/apex/log" + "github.com/Altinity/clickhouse-backup/pkg/progressbar" + "github.com/Altinity/clickhouse-backup/pkg/utils" "github.com/djherbis/buffer" "github.com/djherbis/nio/v3" + "github.com/eapache/go-resiliency/retrier" "github.com/mholt/archiver/v4" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" ) const ( @@ -49,7 +49,7 @@ type Backup struct { type BackupDestination struct { RemoteStorage - Log *apexLog.Entry + Logger zerolog.Logger compressionFormat string compressionLevel int disableProgressBar bool @@ -67,23 +67,26 @@ func (bd *BackupDestination) RemoveOldBackups(ctx context.Context, keep int) err return err } backupsToDelete := GetBackupsToDelete(backupList, keep) - bd.Log.WithFields(apexLog.Fields{ + bd.Logger.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "duration": utils.HumanizeDuration(time.Since(start)), - }).Info("calculate backup list for delete") + }).Msg("calculate backup list for delete") for _, backupToDelete := range backupsToDelete { startDelete := time.Now() if err := bd.RemoveBackup(ctx, backupToDelete); err != nil { - bd.Log.Warnf("can't delete %s return error : %v", backupToDelete.BackupName, err) + bd.Logger.Warn().Msgf("can't delete %s return error : %v", backupToDelete.BackupName, err) } - bd.Log.WithFields(apexLog.Fields{ + bd.Logger.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "location": "remote", "backup": backupToDelete.BackupName, "duration": utils.HumanizeDuration(time.Since(startDelete)), - }).Info("done") + }).Msg("done") } - bd.Log.WithFields(apexLog.Fields{"operation": "RemoveOldBackups", "duration": utils.HumanizeDuration(time.Since(start))}).Info("done") + bd.Logger.Info().Fields(map[string]interface{}{ + "operation": "RemoveOldBackups", + "duration": utils.HumanizeDuration(time.Since(start)), + }).Msg("done") return nil } @@ -120,17 +123,17 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) listCache := map[string]Backup{} if info, err := os.Stat(listCacheFile); os.IsNotExist(err) || info.IsDir() { - bd.Log.Debugf("%s not found, load %d elements", listCacheFile, len(listCache)) + bd.Logger.Debug().Msgf("%s not found, load %d elements", listCacheFile, len(listCache)) return listCache, nil } f, err := os.Open(listCacheFile) if err != nil { - bd.Log.Warnf("can't open %s return error %v", listCacheFile, err) + bd.Logger.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return listCache, nil } defer func() { if err := f.Close(); err != nil { - bd.Log.Warnf("can't close %s return error %v", listCacheFile, err) + bd.Logger.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() select { @@ -139,15 +142,15 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] default: body, err := io.ReadAll(f) if err != nil { - bd.Log.Warnf("can't read %s return error %v", listCacheFile, err) + bd.Logger.Warn().Msgf("can't read %s return error %v", listCacheFile, err) return listCache, nil } if string(body) != "" { if err := json.Unmarshal(body, &listCache); err != nil { - bd.Log.Fatalf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) + bd.Logger.Fatal().Stack().Msgf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) } } - bd.Log.Debugf("%s load %d elements", listCacheFile, len(listCache)) + bd.Logger.Debug().Msgf("%s load %d elements", listCacheFile, len(listCache)) return listCache, nil } } @@ -156,12 +159,12 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) f, err := os.OpenFile(listCacheFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { - bd.Log.Warnf("can't open %s return error %v", listCacheFile, err) + bd.Logger.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return nil } defer func() { if err := f.Close(); err != nil { - bd.Log.Warnf("can't close %s return error %v", listCacheFile, err) + bd.Logger.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() for backupName := range listCache { @@ -187,15 +190,15 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma default: body, err := json.MarshalIndent(&listCache, "", "\t") if err != nil { - bd.Log.Warnf("can't json marshal %s return error %v", listCacheFile, err) + bd.Logger.Warn().Msgf("can't json marshal %s return error %v", listCacheFile, err) return nil } _, err = f.Write(body) if err != nil { - bd.Log.Warnf("can't write to %s return error %v", listCacheFile, err) + bd.Logger.Warn().Msgf("can't write to %s return error %v", listCacheFile, err) return nil } - bd.Log.Debugf("%s save %d elements", listCacheFile, len(listCache)) + bd.Logger.Debug().Msgf("%s save %d elements", listCacheFile, len(listCache)) return nil } } @@ -308,7 +311,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, return nil }) if err != nil { - bd.Log.Warnf("BackupList bd.Walk return error: %v", err) + bd.Logger.Warn().Msgf("BackupList bd.Walk return error: %v", err) } // sort by name for the same not parsed metadata.json sort.SliceStable(result, func(i, j int) bool { @@ -340,13 +343,13 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot } defer func() { if err := reader.Close(); err != nil { - bd.Log.Warnf("can't close GetFileReader descriptor %v", reader) + bd.Logger.Warn().Msgf("can't close GetFileReader descriptor %v", reader) } switch reader.(type) { case *os.File: fileName := reader.(*os.File).Name() if err := os.Remove(fileName); err != nil { - bd.Log.Warnf("can't remove %s", fileName) + bd.Logger.Warn().Msgf("can't remove %s", fileName) } } }() @@ -358,7 +361,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot proxyReader := bar.NewProxyReader(bufReader) compressionFormat := bd.compressionFormat if !checkArchiveExtension(path.Ext(remotePath), compressionFormat) { - bd.Log.Warnf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) + bd.Logger.Warn().Msgf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) compressionFormat = strings.Replace(path.Ext(remotePath), ".", "", -1) } z, err := getArchiveReader(compressionFormat) @@ -399,7 +402,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot if err := f.Close(); err != nil { return err } - //bd.Log.Debugf("extract %s", extractFile) + //bd.Logger.Debug().Msgf("extract %s", extractFile) return nil }); err != nil { return err @@ -434,11 +437,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if writerErr != nil { if err := w.CloseWithError(writerErr); err != nil { - bd.Log.Errorf("can't close after error %v pipe writer error: %v", writerErr, err) + bd.Logger.Error().Msgf("can't close after error %v pipe writer error: %v", writerErr, err) } } else { if err := w.Close(); err != nil { - bd.Log.Errorf("can't close pipe writer: %v", err) + bd.Logger.Error().Msgf("can't close pipe writer: %v", err) } } }() @@ -465,7 +468,7 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc }, } archiveFiles = append(archiveFiles, file) - //bd.Log.Debugf("add %s to archive %s", filePath, remotePath) + //bd.Logger.Debug().Msgf("add %s to archive %s", filePath, remotePath) } if writerErr = z.Archive(ctx, w, archiveFiles); writerErr != nil { return writerErr @@ -476,11 +479,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if readerErr != nil { if err := body.CloseWithError(readerErr); err != nil { - bd.Log.Errorf("can't close after error %v pipe reader error: %v", writerErr, err) + bd.Logger.Error().Msgf("can't close after error %v pipe reader error: %v", writerErr, err) } } else { if err := body.Close(); err != nil { - bd.Log.Errorf("can't close pipe reader: %v", err) + bd.Logger.Error().Msgf("can't close pipe reader: %v", err) } } }() @@ -505,10 +508,10 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot bar = progressbar.StartNewByteBar(!bd.disableProgressBar, totalBytes) defer bar.Finish() } - log := bd.Log.WithFields(apexLog.Fields{ + logger := bd.Logger.With().Fields(map[string]interface{}{ "path": remotePath, "operation": "download", - }) + }).Logger() return bd.Walk(ctx, remotePath, true, func(ctx context.Context, f RemoteFile) error { if bd.Kind() == "SFTP" && (f.Name() == "." || f.Name() == "..") { return nil @@ -517,30 +520,30 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot err := retry.RunCtx(ctx, func(ctx context.Context) error { r, err := bd.GetFileReader(ctx, path.Join(remotePath, f.Name())) if err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() return err } dstFilePath := path.Join(localPath, f.Name()) dstDirPath, _ := path.Split(dstFilePath) if err := os.MkdirAll(dstDirPath, 0750); err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() return err } dst, err := os.Create(dstFilePath) if err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() return err } if _, err := io.CopyBuffer(dst, r, nil); err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() return err } if err := dst.Close(); err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() return err } if err := r.Close(); err != nil { - log.Error(err.Error()) + logger.Error().Err(err).Send() return err } return nil @@ -581,7 +584,7 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLoc } closeFile := func() { if err := f.Close(); err != nil { - bd.Log.Warnf("can't close UploadPath file descriptor %v: %v", f, err) + bd.Logger.Warn().Msgf("can't close UploadPath file descriptor %v: %v", f, err) } } retry := retrier.New(retrier.ConstantBackoff(RetriesOnFailure, RetriesDuration), nil) @@ -606,7 +609,7 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLoc } func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, calcMaxSize bool, backupName string) (*BackupDestination, error) { - log := apexLog.WithField("logger", "NewBackupDestination") + logger := log.With().Str("logger", "NewBackupDestination").Logger() var err error // https://github.com/Altinity/clickhouse-backup/issues/404 if calcMaxSize { @@ -615,7 +618,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous return nil, err } if cfg.General.MaxFileSize > 0 && cfg.General.MaxFileSize < maxFileSize { - log.Warnf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) + logger.Warn().Msgf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) } if cfg.General.MaxFileSize <= 0 || cfg.General.MaxFileSize < maxFileSize { cfg.General.MaxFileSize = maxFileSize @@ -643,7 +646,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous azblobStorage.Config.BufferSize = bufferSize return &BackupDestination{ azblobStorage, - log.WithField("logger", "azure"), + logger.With().Str("logger", "azure").Logger(), cfg.AzureBlob.CompressionFormat, cfg.AzureBlob.CompressionLevel, cfg.General.DisableProgressBar, @@ -664,7 +667,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous Concurrency: cfg.S3.Concurrency, BufferSize: 512 * 1024, PartSize: partSize, - Log: log.WithField("logger", "S3"), + Logger: log.With().Str("logger", "S3").Logger(), } s3Storage.Config.Path, err = ch.ApplyMacros(ctx, s3Storage.Config.Path) if err != nil { @@ -681,7 +684,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ s3Storage, - log.WithField("logger", "s3"), + log.With().Str("logger", "s3").Logger(), cfg.S3.CompressionFormat, cfg.S3.CompressionLevel, cfg.General.DisableProgressBar, @@ -703,7 +706,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ googleCloudStorage, - log.WithField("logger", "gcs"), + log.With().Str("logger", "gcs").Logger(), cfg.GCS.CompressionFormat, cfg.GCS.CompressionLevel, cfg.General.DisableProgressBar, @@ -716,7 +719,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ tencentStorage, - log.WithField("logger", "cos"), + log.With().Str("logger", "cos").Logger(), cfg.COS.CompressionFormat, cfg.COS.CompressionLevel, cfg.General.DisableProgressBar, @@ -724,7 +727,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous case "ftp": ftpStorage := &FTP{ Config: &cfg.FTP, - Log: log.WithField("logger", "FTP"), + Logger: log.With().Str("logger", "FTP").Logger(), } ftpStorage.Config.Path, err = ch.ApplyMacros(ctx, ftpStorage.Config.Path) if err != nil { @@ -732,7 +735,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ ftpStorage, - log.WithField("logger", "FTP"), + log.With().Str("logger", "FTP").Logger(), cfg.FTP.CompressionFormat, cfg.FTP.CompressionLevel, cfg.General.DisableProgressBar, @@ -747,7 +750,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ sftpStorage, - log.WithField("logger", "SFTP"), + log.With().Str("logger", "SFTP").Logger(), cfg.SFTP.CompressionFormat, cfg.SFTP.CompressionLevel, cfg.General.DisableProgressBar, @@ -757,7 +760,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } } -// https://github.com/Altinity/clickhouse-backup/issues/588 +// ApplyMacrosToObjectLabels - https://github.com/Altinity/clickhouse-backup/issues/588 func ApplyMacrosToObjectLabels(ctx context.Context, objectLabels map[string]string, ch *clickhouse.ClickHouse, backupName string) (map[string]string, error) { var err error for k, v := range objectLabels { diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 4efd76f5..31d5faab 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -7,6 +7,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/aws/smithy-go" awsV2http "github.com/aws/smithy-go/transport/http" + "github.com/rs/zerolog" "io" "net/http" "os" @@ -16,36 +17,34 @@ import ( "golang.org/x/sync/errgroup" - apexLog "github.com/apex/log" "github.com/aws/aws-sdk-go-v2/aws" awsV2Config "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" "github.com/aws/aws-sdk-go-v2/credentials/stscreds" - "github.com/aws/aws-sdk-go-v2/service/sts" - s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/aws-sdk-go-v2/service/sts" awsV2Logging "github.com/aws/smithy-go/logging" "github.com/pkg/errors" ) -type S3LogToApexLogAdapter struct { - apexLog *apexLog.Logger +type S3LogToZeroLogAdapter struct { + logger zerolog.Logger } -func newS3Logger(log *apexLog.Entry) S3LogToApexLogAdapter { - return S3LogToApexLogAdapter{ - apexLog: log.Logger, +func newS3Logger(log zerolog.Logger) S3LogToZeroLogAdapter { + return S3LogToZeroLogAdapter{ + logger: log, } } -func (S3LogToApexLogAdapter S3LogToApexLogAdapter) Logf(severity awsV2Logging.Classification, msg string, args ...interface{}) { +func (S3LogToApexLogAdapter S3LogToZeroLogAdapter) Logf(severity awsV2Logging.Classification, msg string, args ...interface{}) { msg = fmt.Sprintf("[s3:%s] %s", severity, msg) if len(args) > 0 { - S3LogToApexLogAdapter.apexLog.Infof(msg, args...) + S3LogToApexLogAdapter.logger.Info().Msgf(msg, args...) } else { - S3LogToApexLogAdapter.apexLog.Info(msg) + S3LogToApexLogAdapter.logger.Info().Msg(msg) } } @@ -55,7 +54,7 @@ type S3 struct { uploader *s3manager.Uploader downloader *s3manager.Downloader Config *config.S3Config - Log *apexLog.Entry + Logger zerolog.Logger PartSize int64 Concurrency int BufferSize int @@ -109,7 +108,7 @@ func (s *S3) Connect(ctx context.Context) error { } if s.Config.Debug { - awsConfig.Logger = newS3Logger(s.Log) + awsConfig.Logger = newS3Logger(s.Logger) awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequest | aws.LogResponse } diff --git a/pkg/storage/sftp.go b/pkg/storage/sftp.go index 9e5d1b54..6c3fc42c 100644 --- a/pkg/storage/sftp.go +++ b/pkg/storage/sftp.go @@ -13,8 +13,8 @@ import ( "syscall" "time" - "github.com/apex/log" libSFTP "github.com/pkg/sftp" + "github.com/rs/zerolog/log" "golang.org/x/crypto/ssh" ) @@ -27,7 +27,7 @@ type SFTP struct { func (sftp *SFTP) Debug(msg string, v ...interface{}) { if sftp.Config.Debug { - log.Infof(msg, v...) + log.Info().Msgf(msg, v...) } } @@ -140,7 +140,7 @@ func (sftp *SFTP) DeleteDirectory(ctx context.Context, dirPath string) error { sftp.Debug("[SFTP_DEBUG] DeleteDirectory %s", dirPath) defer func() { if err := sftp.sftpClient.RemoveDirectory(dirPath); err != nil { - log.Warnf("RemoveDirectory err=%v", err) + log.Warn().Msgf("RemoveDirectory err=%v", err) } }() @@ -153,11 +153,11 @@ func (sftp *SFTP) DeleteDirectory(ctx context.Context, dirPath string) error { filePath := path.Join(dirPath, file.Name()) if file.IsDir() { if err := sftp.DeleteDirectory(ctx, filePath); err != nil { - log.Warnf("sftp.DeleteDirectory(%s) err=%v", filePath, err) + log.Warn().Msgf("sftp.DeleteDirectory(%s) err=%v", filePath, err) } } else { if err := sftp.sftpClient.Remove(filePath); err != nil { - log.Warnf("sftp.Remove(%s) err=%v", filePath, err) + log.Warn().Msgf("sftp.Remove(%s) err=%v", filePath, err) } } } @@ -221,7 +221,7 @@ func (sftp *SFTP) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) func (sftp *SFTP) PutFile(ctx context.Context, key string, localFile io.ReadCloser) error { filePath := path.Join(sftp.Config.Path, key) if err := sftp.sftpClient.MkdirAll(path.Dir(filePath)); err != nil { - log.Warnf("sftp.sftpClient.MkdirAll(%s) err=%v", path.Dir(filePath), err) + log.Warn().Msgf("sftp.sftpClient.MkdirAll(%s) err=%v", path.Dir(filePath), err) } remoteFile, err := sftp.sftpClient.Create(filePath) if err != nil { @@ -229,7 +229,7 @@ func (sftp *SFTP) PutFile(ctx context.Context, key string, localFile io.ReadClos } defer func() { if err := remoteFile.Close(); err != nil { - log.Warnf("can't close %s err=%v", filePath, err) + log.Warn().Msgf("can't close %s err=%v", filePath, err) } }() if _, err = remoteFile.ReadFrom(localFile); err != nil { diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go index 02517815..f609daf2 100644 --- a/pkg/storage/utils.go +++ b/pkg/storage/utils.go @@ -2,9 +2,9 @@ package storage import ( "fmt" - "github.com/apex/log" "github.com/klauspost/compress/zstd" "github.com/mholt/archiver/v4" + "github.com/rs/zerolog/log" "sort" "strings" "time" @@ -23,12 +23,12 @@ func GetBackupsToDelete(backups []Backup, keep int) []Backup { deletedBackups := make([]Backup, len(backups)-keep) copied := copy(deletedBackups, backups[keep:]) if copied != len(backups)-keep { - log.Warnf("copied wrong items from backup list expected=%d, actual=%d", len(backups)-keep, copied) + log.Warn().Msgf("copied wrong items from backup list expected=%d, actual=%d", len(backups)-keep, copied) } keepBackups := make([]Backup, keep) copied = copy(keepBackups, backups[:keep]) if copied != keep { - log.Warnf("copied wrong items from backup list expected=%d, actual=%d", keep, copied) + log.Warn().Msgf("copied wrong items from backup list expected=%d, actual=%d", keep, copied) } var findRequiredBackup func(b Backup) findRequiredBackup = func(b Backup) { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index e41eb5ca..9720a402 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -3,7 +3,7 @@ package utils import ( "context" "fmt" - "github.com/apex/log" + "github.com/rs/zerolog/log" "os/exec" "regexp" "strings" @@ -47,27 +47,27 @@ func HumanizeDuration(d time.Duration) string { if d >= year { years := d / year if _, err := fmt.Fprintf(&b, "%dy", years); err != nil { - log.Warnf("HumanizeDuration error: %v", err) + log.Warn().Msgf("HumanizeDuration error: %v", err) } d -= years * year } days := d / day d -= days * day if _, err := fmt.Fprintf(&b, "%dd%s", days, d); err != nil { - log.Warnf("HumanizeDuration error: %v", err) + log.Warn().Msgf("HumanizeDuration error: %v", err) } return b.String() } func ExecCmd(ctx context.Context, timeout time.Duration, cmd string, args ...string) error { out, err := ExecCmdOut(ctx, timeout, cmd, args...) - log.Info(out) + log.Info().Msg(out) return err } func ExecCmdOut(ctx context.Context, timeout time.Duration, cmd string, args ...string) (string, error) { ctx, cancel := context.WithTimeout(ctx, timeout) - log.Infof("%s %s", cmd, strings.Join(args, " ")) + log.Info().Msgf("%s %s", cmd, strings.Join(args, " ")) out, err := exec.CommandContext(ctx, cmd, args...).CombinedOutput() cancel() return string(out), err diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 55f9280f..3a13162e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -6,11 +6,14 @@ import ( "context" "fmt" "github.com/Altinity/clickhouse-backup/pkg/config" - "github.com/Altinity/clickhouse-backup/pkg/logcli" + "github.com/Altinity/clickhouse-backup/pkg/log_helper" "github.com/Altinity/clickhouse-backup/pkg/partition" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/Altinity/clickhouse-backup/pkg/utils" "github.com/google/uuid" + "github.com/rs/zerolog" + "github.com/rs/zerolog/pkgerrors" + stdlog "log" "math/rand" "os" "os/exec" @@ -22,7 +25,7 @@ import ( "time" "github.com/Altinity/clickhouse-backup/pkg/clickhouse" - "github.com/apex/log" + "github.com/rs/zerolog/log" "golang.org/x/mod/semver" _ "github.com/ClickHouse/clickhouse-go/v2" @@ -57,16 +60,16 @@ var testData = []TestDataStruct{ { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", Name: ".inner.table1", - Schema: "(Date Date, TimeStamp DateTime, Log String) ENGINE = MergeTree(Date, (TimeStamp, Log), 8192)", + Schema: "(Date Date, TimeStamp DateTime, Logger String) ENGINE = MergeTree(Date, (TimeStamp, Logger), 8192)", Rows: []map[string]interface{}{ - {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:14"), "Log": "One"}, - {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:15"), "Log": "Two"}, - {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:16"), "Log": "Three"}, - {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:17"), "Log": "Four"}, - {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:18"), "Log": "Five"}, - {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:19"), "Log": "Six"}, + {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:14"), "Logger": "One"}, + {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:15"), "Logger": "Two"}, + {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:16"), "Logger": "Three"}, + {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:17"), "Logger": "Four"}, + {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:18"), "Logger": "Five"}, + {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:19"), "Logger": "Six"}, }, - Fields: []string{"Date", "TimeStamp", "Log"}, + Fields: []string{"Date", "TimeStamp", "Logger"}, OrderBy: "TimeStamp", }, { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", @@ -304,11 +307,11 @@ var incrementData = []TestDataStruct{ { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", Name: ".inner.table1", - Schema: "(Date Date, TimeStamp DateTime, Log String) ENGINE = MergeTree(Date, (TimeStamp, Log), 8192)", + Schema: "(Date Date, TimeStamp DateTime, Logger String) ENGINE = MergeTree(Date, (TimeStamp, Logger), 8192)", Rows: []map[string]interface{}{ - {"Date": toDate("2019-10-26"), "TimeStamp": toTS("2019-01-26 07:37:19"), "Log": "Seven"}, + {"Date": toDate("2019-10-26"), "TimeStamp": toTS("2019-01-26 07:37:19"), "Logger": "Seven"}, }, - Fields: []string{"Date", "TimeStamp", "Log"}, + Fields: []string{"Date", "TimeStamp", "Logger"}, OrderBy: "TimeStamp", }, { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", @@ -392,12 +395,14 @@ var incrementData = []TestDataStruct{ } func init() { - log.SetHandler(logcli.New(os.Stdout)) + log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: time.StampMilli}) + stdlog.SetOutput(log.Logger) + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack logLevel := "info" if os.Getenv("LOG_LEVEL") != "" { logLevel = os.Getenv("LOG_LEVEL") } - log.SetLevelFromString(logLevel) + log_helper.SetLogLevelFromString(logLevel) } func TestDoRestoreRBAC(t *testing.T) { @@ -419,7 +424,7 @@ func TestDoRestoreRBAC(t *testing.T) { ch.queryWithNoError(r, "DROP ROLE IF EXISTS test_rbac") ch.queryWithNoError(r, "DROP USER IF EXISTS test_rbac") - log.Info("create RBAC related objects") + log.Info().Msg("create RBAC related objects") ch.queryWithNoError(r, "CREATE SETTINGS PROFILE test_rbac SETTINGS max_execution_time=60") ch.queryWithNoError(r, "CREATE ROLE test_rbac SETTINGS PROFILE 'test_rbac'") ch.queryWithNoError(r, "CREATE USER test_rbac IDENTIFIED BY 'test_rbac' DEFAULT ROLE test_rbac") @@ -431,7 +436,7 @@ func TestDoRestoreRBAC(t *testing.T) { r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", "test_rbac_backup")) r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) - log.Info("drop all RBAC related objects after backup") + log.Info().Msg("drop all RBAC related objects after backup") ch.queryWithNoError(r, "DROP SETTINGS PROFILE test_rbac") ch.queryWithNoError(r, "DROP QUOTA test_rbac") ch.queryWithNoError(r, "DROP ROW POLICY test_rbac ON default.test_rbac") @@ -442,7 +447,7 @@ func TestDoRestoreRBAC(t *testing.T) { r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker-compose", "-f", os.Getenv("COMPOSE_FILE"), "restart", "clickhouse")) ch.connectWithWait(r, 2*time.Second, 10*time.Second) - log.Info("download+restore RBAC") + log.Info().Msg("download+restore RBAC") r.NoError(dockerExec("clickhouse", "ls", "-lah", "/var/lib/clickhouse/access")) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "download", "test_rbac_backup")) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", "--rbac", "test_rbac_backup")) @@ -680,7 +685,7 @@ func TestLongListRemote(t *testing.T) { cacheClearDuration := time.Since(startCacheClear) r.Greater(cacheClearDuration, cashedDuration) - log.Infof("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cashedDuration.String(), cacheClearDuration.String()) + log.Info().Msgf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cashedDuration.String(), cacheClearDuration.String()) testListRemoteAllBackups := make([]string, totalCacheCount) for i := 0; i < totalCacheCount; i++ { @@ -720,20 +725,20 @@ func TestRestoreDatabaseMapping(t *testing.T) { ch.queryWithNoError(r, "CREATE VIEW database1.v1 AS SELECT * FROM database1.t1") ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") - log.Info("Create backup") + log.Info().Msg("Create backup") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", testBackupName)) - log.Info("Restore schema") + log.Info().Msg("Restore schema") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) - log.Info("Check result database1") + log.Info().Msg("Check result database1") ch.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") checkRecordset(1, 20, "SELECT count() FROM database1.t1") checkRecordset(1, 20, "SELECT count() FROM database1.d1") checkRecordset(1, 20, "SELECT count() FROM database1.mv1") checkRecordset(1, 20, "SELECT count() FROM database1.v1") - log.Info("Drop database1") + log.Info().Msg("Drop database1") isAtomic, err := ch.chbackend.IsAtomic("database1") r.NoError(err) if isAtomic { @@ -742,16 +747,16 @@ func TestRestoreDatabaseMapping(t *testing.T) { ch.queryWithNoError(r, "DROP DATABASE database1") } - log.Info("Restore data") + log.Info().Msg("Restore data") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--data", "--restore-database-mapping", "database1:database2", "--tables", "database1.*", testBackupName)) - log.Info("Check result database2") + log.Info().Msg("Check result database2") checkRecordset(1, 10, "SELECT count() FROM database2.t1") checkRecordset(1, 10, "SELECT count() FROM database2.d1") checkRecordset(1, 10, "SELECT count() FROM database2.mv1") checkRecordset(1, 10, "SELECT count() FROM database2.v1") - log.Info("Check database1 not exists") + log.Info().Msg("Check database1 not exists") checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1'") fullCleanup(r, ch, []string{testBackupName}, []string{"local"}, databaseList, true) @@ -847,21 +852,21 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { incrementBackupName := fmt.Sprintf("increment_%d", rand.Int()) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} - log.Info("Clean before start") + log.Info().Msg("Clean before start") fullCleanup(r, ch, []string{testBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false) r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) generateTestData(ch, r) r.NoError(dockerExec("minio", "mc", "ls", "local/clickhouse/disk_s3")) - log.Info("Create backup") + log.Info().Msg("Create backup") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", testBackupName)) generateIncrementTestData(ch, r) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create", incrementBackupName)) - log.Info("Upload") + log.Info().Msg("Upload") uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd clickhouse-backup upload --resume %s", remoteStorageType, testBackupName) checkResumeAlreadyProcessed(uploadCmd, testBackupName, "upload", r, remoteStorageType) @@ -877,7 +882,7 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { out, err = dockerExecOut("clickhouse", "ls", "-lha", backupDir) r.NoError(err) r.Equal(5, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") - log.Info("Delete backup") + log.Info().Msg("Delete backup") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", testBackupName)) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", incrementBackupName)) out, err = dockerExecOut("clickhouse", "ls", "-lha", backupDir) @@ -886,20 +891,20 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { dropDatabasesFromTestDataDataSet(r, ch, databaseList) - log.Info("Download") + log.Info().Msg("Download") downloadCmd := fmt.Sprintf("clickhouse-backup download --resume %s", testBackupName) checkResumeAlreadyProcessed(downloadCmd, testBackupName, "download", r, remoteStorageType) - log.Info("Restore schema") + log.Info().Msg("Restore schema") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--schema", testBackupName)) - log.Info("Restore data") + log.Info().Msg("Restore data") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--data", testBackupName)) - log.Info("Full restore with rm") + log.Info().Msg("Full restore with rm") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--rm", testBackupName)) - log.Info("Check data") + log.Info().Msg("Check data") for i := range testData { if testData[i].CheckDatabaseOnly { r.NoError(ch.checkDatabaseEngine(t, testData[i])) @@ -913,17 +918,17 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { // test increment dropDatabasesFromTestDataDataSet(r, ch, databaseList) - log.Info("Delete backup") + log.Info().Msg("Delete backup") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "delete", "local", testBackupName)) - log.Info("Download increment") + log.Info().Msg("Download increment") downloadCmd = fmt.Sprintf("clickhouse-backup download --resume %s", incrementBackupName) checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) - log.Info("Restore") + log.Info().Msg("Restore") r.NoError(dockerExec("clickhouse", "clickhouse-backup", "restore", "--schema", "--data", incrementBackupName)) - log.Info("Check increment data") + log.Info().Msg("Check increment data") for i := range testData { testDataItem := testData[i] if isTableSkip(ch, testDataItem, true) || testDataItem.IsDictionary { @@ -943,7 +948,7 @@ func runMainIntegrationScenario(t *testing.T, remoteStorageType string) { } // test end - log.Info("Clean after finish") + log.Info().Msg("Clean after finish") if remoteStorageType == "CUSTOM" { fullCleanup(r, ch, []string{}, []string{}, databaseList, true) } else { @@ -959,7 +964,7 @@ func checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) } out, err := dockerExecOut("clickhouse", "bash", "-xce", backupCmd) - log.Info(out) + log.Info().Msg(out) r.NoError(err) if strings.Contains(backupCmd, "--resume") { r.Contains(out, "already processed") @@ -991,7 +996,7 @@ func fullCleanup(r *require.Assertions, ch *TestClickHouse, backupNames, backupT } func generateTestData(ch *TestClickHouse, r *require.Assertions) { - log.Info("Generate test data") + log.Info().Msg("Generate test data") generateTestDataWithDifferentStoragePolicy() for _, data := range testData { if isTableSkip(ch, data, false) { @@ -1056,7 +1061,7 @@ func generateTestDataWithDifferentStoragePolicy() { } func generateIncrementTestData(ch *TestClickHouse, r *require.Assertions) { - log.Info("Generate increment test data") + log.Info().Msg("Generate increment test data") for _, data := range incrementData { if isTableSkip(ch, data, false) { continue @@ -1066,7 +1071,7 @@ func generateIncrementTestData(ch *TestClickHouse, r *require.Assertions) { } func dropDatabasesFromTestDataDataSet(r *require.Assertions, ch *TestClickHouse, databaseList []string) { - log.Info("Drop all databases") + log.Info().Msg("Drop all databases") for _, db := range databaseList { r.NoError(ch.dropDatabase(db)) } @@ -1133,7 +1138,7 @@ func TestSkipNotExistsTable(t *testing.T) { ch.connectWithWait(r, 0*time.Second, 1*time.Second) defer ch.chbackend.Close() - log.Info("Check skip not exist errors") + log.Info().Msg("Check skip not exist errors") ifNotExistsCreateSQL := "CREATE TABLE IF NOT EXISTS default.if_not_exists (id UInt64) ENGINE=MergeTree() ORDER BY id" ifNotExistsInsertSQL := "INSERT INTO default.if_not_exists SELECT number FROM numbers(1000)" chVersion, err := ch.chbackend.GetVersion(context.Background()) @@ -1162,7 +1167,7 @@ func TestSkipNotExistsTable(t *testing.T) { startTime := time.Now() out, err := dockerExecOut("clickhouse", "bash", "-ce", "LOG_LEVEL=debug clickhouse-backup create --table default.if_not_exists "+testBackupName) pause = time.Since(startTime).Nanoseconds() * pausePercent / 100 - log.Info(out) + log.Info().Msg(out) if err != nil { if !strings.Contains(out, "no tables for backup") { assert.NoError(t, err) @@ -1193,7 +1198,7 @@ func TestSkipNotExistsTable(t *testing.T) { for pause := range pauseChannel { if pause > 0 { time.Sleep(time.Duration(pause) * time.Nanosecond) - log.Infof("pause=%s", time.Duration(pause).String()) + log.Info().Msgf("pause=%s", time.Duration(pause).String()) err = ch.chbackend.DropTable(clickhouse.Table{Database: "default", Name: "if_not_exists"}, ifNotExistsCreateSQL, "", false, chVersion) r.NoError(err) } @@ -1541,7 +1546,7 @@ func TestServerAPI(t *testing.T) { randFields := 10 fillDatabaseForAPIServer(maxTables, minFields, randFields, ch, r, fieldTypes) - log.Info("Run `clickhouse-backup server --watch` in background") + log.Info().Msg("Run `clickhouse-backup server --watch` in background") r.NoError(dockerExec("-d", "clickhouse", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log")) time.Sleep(1 * time.Second) @@ -1549,7 +1554,7 @@ func TestServerAPI(t *testing.T) { testAPIBackupTables(r) - log.Info("Check /backup/actions") + log.Info().Msg("Check /backup/actions") ch.queryWithNoError(r, "SELECT count() FROM system.backup_actions") testAPIBackupUpload(r) @@ -1574,7 +1579,7 @@ func TestServerAPI(t *testing.T) { func testAPIRestart(r *require.Assertions, ch *TestClickHouse) { out, err := dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "acknowledged") @@ -1590,7 +1595,7 @@ func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { runClickHouseClientInsertSystemBackupActions := func(commands []string, needWait bool) { sql := "INSERT INTO system.backup_actions(command) " + "VALUES ('" + strings.Join(commands, "'),('") + "')" out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("clickhouse client --echo -mn -q \"%s\"", sql)) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) if needWait { for _, command := range commands { @@ -1638,10 +1643,10 @@ func testAPIBackupActions(r *require.Assertions, ch *TestClickHouse) { } func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { - log.Info("Check /backup/watch + /backup/kill") + log.Info().Msg("Check /backup/watch + /backup/kill") runKillCommand := func(command string) { out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) } checkWatchBackup := func(expectedCount uint64) { @@ -1668,7 +1673,7 @@ func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { checkCanceledCommand(1) out, err := dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) time.Sleep(7 * time.Second) @@ -1678,15 +1683,15 @@ func testAPIWatchAndKill(r *require.Assertions, ch *TestClickHouse) { } func testAPIBackupDelete(r *require.Assertions) { - log.Info("Check /backup/delete/{where}/{name}") + log.Info().Msg("Check /backup/delete/{where}/{name}") for i := 1; i <= apiBackupNumber; i++ { out, err := dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) - log.Infof(out) + log.Info().Msg(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") out, err = dockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) - log.Infof(out) + log.Info().Msg(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") @@ -1697,7 +1702,7 @@ func testAPIBackupDelete(r *require.Assertions) { } func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { - log.Info("Check /metrics clickhouse_backup_last_backup_size_remote") + log.Info().Msg("Check /metrics clickhouse_backup_last_backup_size_remote") var lastRemoteSize int64 r.NoError(ch.chbackend.SelectSingleRowNoCtx(&lastRemoteSize, "SELECT size FROM system.backup_list WHERE name='z_backup_5' AND location='remote'")) @@ -1711,11 +1716,11 @@ func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { r.Greater(uint64(lastRemoteSize), realTotalBytes) out, err := dockerExecOut("clickhouse", "curl", "-sL", "http://localhost:7171/metrics") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, fmt.Sprintf("clickhouse_backup_last_backup_size_remote %d", lastRemoteSize)) - log.Info("Check /metrics clickhouse_backup_number_backups_*") + log.Info().Msg("Check /metrics clickhouse_backup_number_backups_*") r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_local %d", apiBackupNumber)) // +1 watch backup r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_remote %d", apiBackupNumber+1)) @@ -1724,13 +1729,13 @@ func testAPIMetrics(r *require.Assertions, ch *TestClickHouse) { } func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { - log.Info("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") + log.Info().Msg("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") out, err := dockerExecOut( "clickhouse", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/delete/local/z_backup_$i\"; curl -sfL -XPOST \"http://localhost:7171/backup/download/z_backup_$i\"; sleep 2; curl -sfL -XPOST \"http://localhost:7171/backup/restore/z_backup_$i?rm=1\"; sleep 8; done", apiBackupNumber), ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") @@ -1743,27 +1748,27 @@ func testAPIDeleteLocalDownloadRestore(r *require.Assertions) { } func testAPIBackupList(t *testing.T, r *require.Assertions) { - log.Info("Check /backup/list") + log.Info().Msg("Check /backup/list") out, err := dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - log.Info("Check /backup/list/local") + log.Info().Msg("Check /backup/list/local") out, err = dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - log.Info("Check /backup/list/remote") + log.Info().Msg("Check /backup/list/remote") out, err = dockerExecOut("clickhouse", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) @@ -1772,13 +1777,13 @@ func testAPIBackupList(t *testing.T, r *require.Assertions) { } func testAPIBackupUpload(r *require.Assertions) { - log.Info("Check /backup/upload") + log.Info().Msg("Check /backup/upload") out, err := dockerExecOut( "clickhouse", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/upload/z_backup_$i\"; sleep 2; done", apiBackupNumber), ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.NotContains(out, "\"status\":\"error\"") r.NotContains(out, "another operation is currently running") @@ -1788,12 +1793,12 @@ func testAPIBackupUpload(r *require.Assertions) { } func testAPIBackupTables(r *require.Assertions) { - log.Info("Check /backup/tables") + log.Info().Msg("Check /backup/tables") out, err := dockerExecOut( "clickhouse", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables\"", ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "long_schema") r.NotContains(out, "Connection refused") @@ -1803,12 +1808,12 @@ func testAPIBackupTables(r *require.Assertions) { r.NotContains(out, "INFORMATION_SCHEMA") r.NotContains(out, "information_schema") - log.Info("Check /backup/tables/all") + log.Info().Msg("Check /backup/tables/all") out, err = dockerExecOut( "clickhouse", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables/all\"", ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "long_schema") r.Contains(out, "system") @@ -1822,13 +1827,13 @@ func testAPIBackupTables(r *require.Assertions) { } func testAPIBackupCreate(r *require.Assertions) { - log.Info("Check /backup/create") + log.Info().Msg("Check /backup/create") out, err := dockerExecOut( "clickhouse", "bash", "-xe", "-c", fmt.Sprintf("sleep 3; for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/create?table=long_schema.*&name=z_backup_$i\"; sleep 1.5; done", apiBackupNumber), ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.NotContains(out, "Connection refused") r.NotContains(out, "another operation is currently running") @@ -1840,7 +1845,7 @@ func testAPIBackupCreate(r *require.Assertions) { } func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch *TestClickHouse, r *require.Assertions, fieldTypes []string) { - log.Infof("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) + log.Info().Msgf("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS long_schema") for i := 0; i < maxTables; i++ { sql := fmt.Sprintf("CREATE TABLE long_schema.t%d (id UInt64", i) @@ -1854,7 +1859,7 @@ func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch * sql = fmt.Sprintf("INSERT INTO long_schema.t%d(id) SELECT number FROM numbers(100)", i) ch.queryWithNoError(r, sql) } - log.Info("...DONE") + log.Info().Msg("...DONE") } type TestClickHouse struct { @@ -1869,16 +1874,16 @@ func (ch *TestClickHouse) connectWithWait(r *require.Assertions, sleepBefore, ti r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "logs", "clickhouse")) out, dockerErr := dockerExecOut("clickhouse", "clickhouse-client", "--echo", "-q", "'SELECT version()'") r.NoError(dockerErr) - ch.chbackend.Log.Warnf(out) + log.Warn().Msg(out) r.NoError(err) } if err != nil { - log.Warnf("clickhouse not ready %v, wait %d seconds", err, i*2) + log.Warn().Msgf("clickhouse not ready %v, wait %d seconds", err, i*2) r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "ps", "-a")) if out, dockerErr := dockerExecOut("clickhouse", "clickhouse-client", "--echo", "-q", "SELECT version()"); dockerErr == nil { - log.Warnf(out) + log.Warn().Msg(out) } else { - log.Info(out) + log.Info().Msg(out) } time.Sleep(time.Second * time.Duration(i*2)) } else { @@ -1888,7 +1893,7 @@ func (ch *TestClickHouse) connectWithWait(r *require.Assertions, sleepBefore, ti if err == nil { break } else { - log.Warnf("mysql not ready %v, wait %d seconds", err, i) + log.Warn().Msgf("mysql not ready %v, wait %d seconds", err, i) time.Sleep(time.Second * time.Duration(i)) } } else { @@ -1905,7 +1910,6 @@ func (ch *TestClickHouse) connect(timeOut string) error { Port: 9000, Timeout: timeOut, }, - Log: log.WithField("logger", "integration-test"), } var err error for i := 0; i < 3; i++ { @@ -2025,7 +2029,7 @@ func (ch *TestClickHouse) dropDatabase(database string) (err error) { func (ch *TestClickHouse) checkData(t *testing.T, data TestDataStruct, r *require.Assertions) error { assert.NotNil(t, data.Rows) - log.Infof("Check '%d' rows in '%s.%s'\n", len(data.Rows), data.Database, data.Name) + log.Info().Msgf("Check '%d' rows in '%s.%s'\n", len(data.Rows), data.Database, data.Name) selectSQL := fmt.Sprintf("SELECT * FROM `%s`.`%s` ORDER BY `%s`", data.Database, data.Name, data.OrderBy) if data.IsFunction && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") == -1 { @@ -2034,7 +2038,7 @@ func (ch *TestClickHouse) checkData(t *testing.T, data TestDataStruct, r *requir if data.IsFunction { selectSQL = fmt.Sprintf("SELECT %s(number, number+1) AS test_result FROM numbers(3)", data.Name) } - log.Debug(selectSQL) + log.Debug().Msg(selectSQL) rows, err := ch.chbackend.GetConn().Query(context.TODO(), selectSQL) if err != nil { return err @@ -2106,7 +2110,7 @@ func (ch *TestClickHouse) queryWithNoError(r *require.Assertions, query string, func dockerExec(container string, cmd ...string) error { out, err := dockerExecOut(container, cmd...) - log.Info(out) + log.Info().Msg(out) return err } @@ -2119,9 +2123,9 @@ func dockerExecOut(container string, cmd ...string) (string, error) { func dockerCP(src, dst string) error { ctx, cancel := context.WithTimeout(context.Background(), 180*time.Second) dcmd := []string{"cp", src, dst} - log.Infof("docker %s", strings.Join(dcmd, " ")) + log.Info().Msgf("docker %s", strings.Join(dcmd, " ")) out, err := exec.CommandContext(ctx, "docker", dcmd...).CombinedOutput() - log.Info(string(out)) + log.Info().Msg(string(out)) cancel() return err } @@ -2189,7 +2193,7 @@ func installDebIfNotExists(r *require.Assertions, container, pkg string) { } func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, remoteStorageType string) { - log.Info("testBackupSpecifiedPartitions started") + log.Info().Msg("testBackupSpecifiedPartitions started") var err error var out string var result, expectedCount uint64 @@ -2285,5 +2289,5 @@ func testBackupSpecifiedPartitions(r *require.Assertions, ch *TestClickHouse, re ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t1") ch.queryWithNoError(r, "DROP TABLE IF EXISTS default.t2") - log.Info("testBackupSpecifiedPartitions finish") + log.Info().Msg("testBackupSpecifiedPartitions finish") } From 0562dd09d3b9fca567a5e43abb17bc3abc80c183 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 13:34:12 +0400 Subject: [PATCH 02/21] fix thread-safe zerolog usage, details https://github.com/rs/zerolog/issues/242 and https://github.com/rs/zerolog/blob/master/README.md?plain=1#L431 --- cmd/clickhouse-backup/main.go | 16 ++++--- pkg/backup/backuper.go | 2 + pkg/backup/download.go | 2 +- pkg/backup/upload.go | 6 ++- pkg/clickhouse/clickhouse.go | 72 ++++++++++++++-------------- pkg/resumable/state.go | 5 +- pkg/server/server.go | 3 ++ test/integration/integration_test.go | 8 ++-- 8 files changed, 64 insertions(+), 50 deletions(-) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 7b4b5c75..9a73cdb4 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -3,16 +3,15 @@ package main import ( "context" "fmt" + "github.com/rs/zerolog/diode" stdlog "log" "os" "time" - "github.com/Altinity/clickhouse-backup/pkg/config" - "github.com/Altinity/clickhouse-backup/pkg/status" - "github.com/Altinity/clickhouse-backup/pkg/backup" + "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/server" - + "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/rs/zerolog/pkgerrors" @@ -26,9 +25,14 @@ var ( ) func main() { - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: time.StampMilli}) - stdlog.SetOutput(log.Logger) + zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} + diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) { + fmt.Printf("Logger Dropped %d messages", missed) + }) + log.Logger = zerolog.New(diodeWriter).With().Timestamp().Logger() + stdlog.SetOutput(log.Logger) cliapp := cli.NewApp() cliapp.Name = "clickhouse-backup" cliapp.Usage = "Tool for easy backup of ClickHouse with cloud support" diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index b3e9d26b..ee67b2e1 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -7,6 +7,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/resumable" "github.com/Altinity/clickhouse-backup/pkg/storage" + "github.com/rs/zerolog/log" "path" ) @@ -25,6 +26,7 @@ type Backuper struct { func NewBackuper(cfg *config.Config) *Backuper { ch := &clickhouse.ClickHouse{ Config: &cfg.ClickHouse, + Logger: log.With().Str("logger", "clickhouse").Logger(), } return &Backuper{ cfg: cfg, diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 538c37eb..e8318592 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -188,10 +188,10 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ logger.Error().Msgf("can't acquire semaphore during Download metadata: %v", err) break } - tableLogger := logger.With().Str("table_metadata", fmt.Sprintf("%s.%s", t.Database, t.Table)).Logger() idx := i tableTitle := t metadataGroup.Go(func() error { + tableLogger := logger.With().Str("table_metadata", fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)).Logger() defer downloadSemaphore.Release(1) downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableLogger, tableTitle, schemaOnly, partitions) if err != nil { diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index ce7459b1..8553f351 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -147,6 +147,9 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str idx := i uploadGroup.Go(func() error { defer uploadSemaphore.Release(1) + uploadLogger := logger.With(). + Str("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)). + Logger() var uploadedBytes int64 if !schemaOnly { var files map[string][]string @@ -163,8 +166,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str return err } atomic.AddInt64(&metadataSize, tableMetadataSize) - logger.Info(). - Str("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)). + uploadLogger.Info(). Str("duration", utils.HumanizeDuration(time.Since(start))). Str("size", utils.FormatBytes(uint64(uploadedBytes+tableMetadataSize))). Msg("done") diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index af115651..bcce072d 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -21,7 +21,6 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" - "github.com/rs/zerolog/log" ) // ClickHouse - provide @@ -29,6 +28,7 @@ type ClickHouse struct { Config *config.ClickHouseConfig conn driver.Conn disks []Disk + Logger zerolog.Logger version int isPartsColumnPresent int8 IsOpen bool @@ -38,7 +38,7 @@ type ClickHouse struct { func (ch *ClickHouse) Connect() error { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - log.Error().Msgf("close previous connection error: %v", err) + ch.Logger.Error().Msgf("close previous connection error: %v", err) } } ch.IsOpen = false @@ -55,9 +55,9 @@ func (ch *ClickHouse) Connect() error { Password: ch.Config.Password, }, Settings: clickhouse.Settings{ - "connect_timeout": int(timeout.Seconds()), - "receive_timeout": int(timeout.Seconds()), - "send_timeout": int(timeout.Seconds()), + // "connect_timeout": int(timeout.Seconds()), + // "receive_timeout": int(timeout.Seconds()), + // "send_timeout": int(timeout.Seconds()), }, MaxOpenConns: 1, ConnMaxLifetime: 0, @@ -78,7 +78,7 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCert != "" || ch.Config.TLSKey != "" { cert, err := tls.LoadX509KeyPair(ch.Config.TLSCert, ch.Config.TLSKey) if err != nil { - log.Error().Msgf("tls.LoadX509KeyPair error: %v", err) + ch.Logger.Error().Msgf("tls.LoadX509KeyPair error: %v", err) return err } tlsConfig.Certificates = []tls.Certificate{cert} @@ -86,12 +86,12 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCa != "" { caCert, err := os.ReadFile(ch.Config.TLSCa) if err != nil { - log.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) + ch.Logger.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) return err } caCertPool := x509.NewCertPool() if caCertPool.AppendCertsFromPEM(caCert) != true { - log.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) + ch.Logger.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) return fmt.Errorf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) } tlsConfig.RootCAs = caCertPool @@ -104,23 +104,23 @@ func (ch *ClickHouse) Connect() error { } if ch.conn, err = clickhouse.Open(opt); err != nil { - log.Error().Msgf("clickhouse connection: %s, sql.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + ch.Logger.Error().Msgf("clickhouse connection: %s, clickhouse.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } - logFunc := log.Info() + logFunc := ch.Logger.Info() if !ch.Config.LogSQLQueries { - logFunc = log.Debug() + logFunc = ch.Logger.Debug() } - logFunc.Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + logFunc.Stack().Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v?timeout=%v", ch.Config.Host, ch.Config.Port, ch.Config.Timeout)) err = ch.conn.Ping(context.Background()) if err != nil { - log.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + ch.Logger.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } else { ch.IsOpen = true } - logFunc.Msgf("clickhouse connection open: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + logFunc.Stack().Msgf("clickhouse connection open: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) return err } @@ -259,13 +259,13 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro func (ch *ClickHouse) Close() { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - log.Warn().Msgf("can't close clickhouse connection: %v", err) + ch.Logger.Warn().Msgf("can't close clickhouse connection: %v", err) } } if ch.Config.LogSQLQueries { - log.Info().Msg("clickhouse connection closed") + ch.Logger.Info().Msg("clickhouse connection closed") } else { - log.Debug().Msg("clickhouse connection closed") + ch.Logger.Debug().Msg("clickhouse connection closed") } ch.IsOpen = false } @@ -461,7 +461,7 @@ func (ch *ClickHouse) GetDatabases(ctx context.Context, cfg *config.Config, tabl var result string // 19.4 doesn't have /var/lib/clickhouse/metadata/default.sql if err := ch.SelectSingleRow(ctx, &result, showDatabaseSQL); err != nil { - log.Warn().Msgf("can't get create database query: %v", err) + ch.Logger.Warn().Msgf("can't get create database query: %v", err) allDatabases[i].Query = fmt.Sprintf("CREATE DATABASE `%s` ENGINE = %s", db.Name, db.Engine) } else { // 23.3+ masked secrets https://github.com/Altinity/clickhouse-backup/issues/640 @@ -486,7 +486,7 @@ func (ch *ClickHouse) getTableSizeFromParts(ctx context.Context, table Table) ui } query := fmt.Sprintf("SELECT sum(bytes_on_disk) as size FROM system.parts WHERE active AND database='%s' AND table='%s' GROUP BY database, table", table.Database, table.Name) if err := ch.SelectContext(ctx, &tablesSize, query); err != nil { - log.Warn().Msgf("error parsing tablesSize: %v", err) + ch.Logger.Warn().Msgf("error parsing tablesSize: %v", err) } if len(tablesSize) > 0 { return tablesSize[0].Size @@ -517,7 +517,7 @@ func (ch *ClickHouse) fixVariousVersions(ctx context.Context, t Table, metadataP if strings.Contains(t.CreateTableQuery, "'[HIDDEN]'") { tableSQLPath := path.Join(metadataPath, common.TablePathEncode(t.Database), common.TablePathEncode(t.Name)+".sql") if attachSQL, err := os.ReadFile(tableSQLPath); err != nil { - log.Warn().Msgf("can't read %s: %v", tableSQLPath, err) + ch.Logger.Warn().Msgf("can't read %s: %v", tableSQLPath, err) } else { t.CreateTableQuery = strings.Replace(string(attachSQL), "ATTACH", "CREATE", 1) t.CreateTableQuery = strings.Replace(t.CreateTableQuery, " _ ", " `"+t.Database+"`.`"+t.Name+"` ", 1) @@ -536,7 +536,7 @@ func (ch *ClickHouse) GetVersion(ctx context.Context) (int, error) { var err error query := "SELECT value FROM `system`.`build_options` where name='VERSION_INTEGER'" if err = ch.SelectSingleRow(ctx, &result, query); err != nil { - log.Warn().Msgf("can't get ClickHouse version: %v", err) + ch.Logger.Warn().Msgf("can't get ClickHouse version: %v", err) return 0, nil } ch.version, err = strconv.Atoi(result) @@ -567,7 +567,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name withNameQuery = fmt.Sprintf("WITH NAME '%s'", name) } for _, item := range partitions { - log.Debug().Msgf(" partition '%v'", item.PartitionID) + ch.Logger.Debug().Msgf(" partition '%v'", item.PartitionID) query := fmt.Sprintf( "ALTER TABLE `%v`.`%v` FREEZE PARTITION ID '%v' %s;", table.Database, @@ -585,7 +585,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name } if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - log.Warn().Msgf("can't freeze partition: %v", err) + ch.Logger.Warn().Msgf("can't freeze partition: %v", err) } else { return fmt.Errorf("can't freeze partition '%s': %w", item.PartitionID, err) } @@ -604,9 +604,9 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string if strings.HasPrefix(table.Engine, "Replicated") && ch.Config.SyncReplicatedTables { query := fmt.Sprintf("SYSTEM SYNC REPLICA `%s`.`%s`;", table.Database, table.Name) if err := ch.QueryContext(ctx, query); err != nil { - log.Warn().Msgf("can't sync replica: %v", err) + ch.Logger.Warn().Msgf("can't sync replica: %v", err) } else { - log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") + ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") } } if version < 19001005 || ch.Config.FreezeByPart { @@ -619,7 +619,7 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string query := fmt.Sprintf("ALTER TABLE `%s`.`%s` FREEZE %s;", table.Database, table.Name, withNameQuery) if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - log.Warn().Msgf("can't freeze table: %v", err) + ch.Logger.Warn().Msgf("can't freeze table: %v", err) return nil } return fmt.Errorf("can't freeze table: %v", err) @@ -643,7 +643,7 @@ func (ch *ClickHouse) AttachDataParts(table metadata.TableMetadata, disks []Disk if err := ch.Query(query); err != nil { return err } - log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk.Name).Str("part", part.Name).Msg("attached") + ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk.Name).Str("part", part.Name).Msg("attached") } } } @@ -656,7 +656,7 @@ var uuidRE = regexp.MustCompile(`UUID '([^']+)'`) // AttachTable - execute ATTACH TABLE command for specific table func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetadata) error { if len(table.Parts) == 0 { - log.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) + ch.Logger.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) return nil } canContinue, err := ch.CheckReplicationInProgress(table) @@ -703,7 +703,7 @@ func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetad return err } - log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") + ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") return nil } func (ch *ClickHouse) ShowCreateTable(ctx context.Context, database, name string) string { @@ -834,7 +834,7 @@ func (ch *ClickHouse) CreateTable(table Table, query string, dropTable, ignoreDe if onCluster != "" && distributedRE.MatchString(query) { matches := distributedRE.FindAllStringSubmatch(query, -1) if onCluster != strings.Trim(matches[0][2], "'\" ") { - log.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) + ch.Logger.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) query = distributedRE.ReplaceAllString(query, fmt.Sprintf("${1}(%s,${3})", onCluster)) } } @@ -857,7 +857,7 @@ func (ch *ClickHouse) IsClickhouseShadow(path string) bool { } defer func() { if err := d.Close(); err != nil { - log.Warn().Msgf("can't close directory %v", err) + ch.Logger.Warn().Msgf("can't close directory %v", err) } }() names, err := d.Readdirnames(-1) @@ -910,9 +910,9 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { var logF *zerolog.Event if !ch.Config.LogSQLQueries { - logF = log.Debug() + logF = ch.Logger.Debug() } else { - logF = log.Info() + logF = ch.Logger.Info() } if len(args) > 0 { logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) @@ -1049,10 +1049,10 @@ func (ch *ClickHouse) CheckReplicationInProgress(table metadata.TableMetadata) ( return false, fmt.Errorf("invalid result for check exists replicas: %+v", existsReplicas) } if existsReplicas[0].InProgress > 0 { - log.Warn().Msgf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) + ch.Logger.Warn().Msgf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) return false, nil } else { - log.Info().Msgf("replication_in_progress status = %+v", existsReplicas) + ch.Logger.Info().Msgf("replication_in_progress status = %+v", existsReplicas) } } return true, nil @@ -1089,7 +1089,7 @@ func (ch *ClickHouse) CheckSystemPartsColumns(ctx context.Context, table *Table) } if len(isPartsColumnsInconsistent) > 0 { for i := range isPartsColumnsInconsistent { - log.Error().Msgf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) + ch.Logger.Error().Msgf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) } return fmt.Errorf("`%s`.`%s` have inconsistent data types for active data part in system.parts_columns", table.Database, table.Name) } diff --git a/pkg/resumable/state.go b/pkg/resumable/state.go index 83396c6f..d69819a5 100644 --- a/pkg/resumable/state.go +++ b/pkg/resumable/state.go @@ -3,13 +3,13 @@ package resumable import ( "encoding/json" "fmt" - "github.com/rs/zerolog" "os" "path" "strconv" "strings" "sync" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -27,7 +27,7 @@ func NewState(defaultDiskPath, backupName, command string, params map[string]int stateFile: path.Join(defaultDiskPath, "backup", backupName, fmt.Sprintf("%s.state", command)), currentState: "", mx: &sync.RWMutex{}, - logger: log.With().Str("logger", "resumable").Logger(), + logger: log.Logger.With().Str("logger", "resumable").Logger(), } fp, err := os.OpenFile(s.stateFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) if err != nil { @@ -104,6 +104,7 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) { s.mx.RLock() res := strings.Index(s.currentState, path+":") if res >= 0 { + // s.logger is non thread-safe https://github.com/rs/zerolog/issues/242 s.logger.Info().Msgf("%s already processed", path) sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")] sSize = sSize[strings.Index(sSize, ":")+1:] diff --git a/pkg/server/server.go b/pkg/server/server.go index d121f22e..bf455402 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -70,6 +70,7 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack } ch := clickhouse.ClickHouse{ Config: &cfg.ClickHouse, + Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { logger.Error().Err(err).Send() @@ -1471,6 +1472,7 @@ func (api *APIServer) CreateIntegrationTables() error { api.log.Info().Msgf("Create integration tables") ch := &clickhouse.ClickHouse{ Config: &api.config.ClickHouse, + Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %w", err) @@ -1530,6 +1532,7 @@ func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*conf func (api *APIServer) ResumeOperationsAfterRestart() error { ch := clickhouse.ClickHouse{ Config: &api.config.ClickHouse, + Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { return err diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 3a13162e..a5016b8a 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -395,7 +395,8 @@ var incrementData = []TestDataStruct{ } func init() { - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: time.StampMilli}) + zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs + log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"}) stdlog.SetOutput(log.Logger) zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack logLevel := "info" @@ -1080,7 +1081,7 @@ func dropDatabasesFromTestDataDataSet(r *require.Assertions, ch *TestClickHouse, func TestTablePatterns(t *testing.T) { ch := &TestClickHouse{} r := require.New(t) - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + ch.connectWithWait(r, 500*time.Millisecond, 5*time.Second) defer ch.chbackend.Close() testBackupName := "test_backup_patterns" @@ -1245,7 +1246,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) { } r := require.New(t) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + ch.connectWithWait(r, 500*time.Millisecond, 2*time.Second) backupNames := make([]string, 5) for i := 0; i < 5; i++ { backupNames[i] = fmt.Sprintf("keep_remote_backup_%d", i) @@ -1910,6 +1911,7 @@ func (ch *TestClickHouse) connect(timeOut string) error { Port: 9000, Timeout: timeOut, }, + Logger: log.With().Str("logger", "clickhouse").Logger(), } var err error for i := 0; i < 3; i++ { From e1ca361d9999cbeb1afd2ed486c8f78d163d9cc0 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 19:31:01 +0400 Subject: [PATCH 03/21] fix thread-safe zerolog usage, details https://github.com/rs/zerolog/issues/242 and https://github.com/rs/zerolog/blob/master/README.md?plain=1#L431 --- pkg/backup/download.go | 1 + pkg/resumable/state.go | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/backup/download.go b/pkg/backup/download.go index e8318592..c44fab89 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -398,6 +398,7 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, b.resumableState.AppendToState(localMetadataFile, written) } } + logger = logger.With().Logger() logger.Info(). Str("duration", utils.HumanizeDuration(time.Since(start))). Str("size", utils.FormatBytes(size)). diff --git a/pkg/resumable/state.go b/pkg/resumable/state.go index d69819a5..e269a55a 100644 --- a/pkg/resumable/state.go +++ b/pkg/resumable/state.go @@ -105,12 +105,13 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) { res := strings.Index(s.currentState, path+":") if res >= 0 { // s.logger is non thread-safe https://github.com/rs/zerolog/issues/242 - s.logger.Info().Msgf("%s already processed", path) + logger := s.logger.With().Logger() + logger.Info().Msgf("%s already processed", path) sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")] sSize = sSize[strings.Index(sSize, ":")+1:] size, err = strconv.ParseInt(sSize, 10, 64) if err != nil { - s.logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) + logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) } } s.mx.RUnlock() From 8b96079c3ed2330b3cc2c046f93109620269f032 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 19:40:06 +0400 Subject: [PATCH 04/21] fix thread-safe zerolog usage, details https://github.com/rs/zerolog/issues/242 and https://github.com/rs/zerolog/blob/master/README.md?plain=1#L431 --- pkg/clickhouse/clickhouse.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index bcce072d..9809e1b6 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -909,10 +909,12 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { var logF *zerolog.Event + // zerolog is not thread-safe https://github.com/rs/zerolog/issues/242 ;( + logger := ch.Logger.With().Logger() if !ch.Config.LogSQLQueries { - logF = ch.Logger.Debug() + logF = logger.Debug() } else { - logF = ch.Logger.Info() + logF = logger.Info() } if len(args) > 0 { logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) From daa39d3772e46ff96fd85c4913fd929b4262f038 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 20:13:23 +0400 Subject: [PATCH 05/21] fix thread-safe zerolog usage, details https://github.com/rs/zerolog/issues/242 and https://github.com/rs/zerolog/blob/master/README.md?plain=1#L431 --- cmd/clickhouse-backup/main.go | 2 +- pkg/backup/download.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 9a73cdb4..c0fcbb1e 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -3,7 +3,6 @@ package main import ( "context" "fmt" - "github.com/rs/zerolog/diode" stdlog "log" "os" "time" @@ -13,6 +12,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/server" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/rs/zerolog" + "github.com/rs/zerolog/diode" "github.com/rs/zerolog/log" "github.com/rs/zerolog/pkgerrors" "github.com/urfave/cli" diff --git a/pkg/backup/download.go b/pkg/backup/download.go index c44fab89..a133a6b2 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -191,8 +191,8 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ idx := i tableTitle := t metadataGroup.Go(func() error { - tableLogger := logger.With().Str("table_metadata", fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)).Logger() defer downloadSemaphore.Release(1) + tableLogger := logger.With().Str("table_metadata", fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)).Logger() downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableLogger, tableTitle, schemaOnly, partitions) if err != nil { return err From afc11110de6a95e1650db1b734c3b9b2f1c18d74 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 22:21:36 +0400 Subject: [PATCH 06/21] fix thread-safe zerolog usage, try to remove diode, details https://github.com/rs/zerolog/issues/242 and https://github.com/rs/zerolog/blob/master/README.md?plain=1#L431 --- ReadMe.md | 4 ++-- cmd/clickhouse-backup/main.go | 15 ++++++--------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/ReadMe.md b/ReadMe.md index 0133d191..0b4b76e4 100644 --- a/ReadMe.md +++ b/ReadMe.md @@ -1,10 +1,10 @@ # clickhouse-backup -[![Build](https://github.com/Altinity/clickhouse-backup/actions/workflows/build.yaml/badge.svg?branch=dev)](https://github.com/Altinity/clickhouse-backup/actions/workflows/build.yaml) +[![Build](https://github.com/Altinity/clickhouse-backup/actions/workflows/build.yaml/badge.svg?branch=master)](https://github.com/Altinity/clickhouse-backup/actions/workflows/build.yaml) [![GoDoc](https://godoc.org/github.com/Altinity/clickhouse-backup?status.svg)](http://godoc.org/github.com/Altinity/clickhouse-backup) [![Telegram](https://img.shields.io/badge/telegram-join%20chat-3796cd.svg)](https://t.me/clickhousebackup) -[![Docker Image](https://img.shields.io/docker/pulls/Altinity/clickhouse-backup.svg)](https://hub.docker.com/r/Altinity/clickhouse-backup) +[![Docker Image](https://img.shields.io/docker/pulls/altinity/clickhouse-backup.svg)](https://hub.docker.com/r/altinity/clickhouse-backup) A tool for easy ClickHouse backup and restore with support for many cloud and non-cloud storage types. To make data backup `clickhouse-backup` requires access to the same files as `clickhouse-server` in `/var/lib/clickhouse` folders. diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index c0fcbb1e..1c93f297 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -3,19 +3,16 @@ package main import ( "context" "fmt" - stdlog "log" - "os" - "time" - "github.com/Altinity/clickhouse-backup/pkg/backup" "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/server" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/rs/zerolog" - "github.com/rs/zerolog/diode" "github.com/rs/zerolog/log" "github.com/rs/zerolog/pkgerrors" "github.com/urfave/cli" + stdlog "log" + "os" ) var ( @@ -28,10 +25,10 @@ func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} - diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) { - fmt.Printf("Logger Dropped %d messages", missed) - }) - log.Logger = zerolog.New(diodeWriter).With().Timestamp().Logger() + //diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) { + // fmt.Printf("Logger Dropped %d messages", missed) + //}) + log.Logger = zerolog.New(consoleWriter).With().Timestamp().Caller().Logger() stdlog.SetOutput(log.Logger) cliapp := cli.NewApp() cliapp.Name = "clickhouse-backup" From 79910c1859f7a4e23752b5f2daaaa42a3f32577b Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 22:44:10 +0400 Subject: [PATCH 07/21] fix thread-safe zerolog usage, try to remove consoleWriter, details https://github.com/rs/zerolog/issues/242 and https://github.com/rs/zerolog/blob/master/README.md?plain=1#L431 --- cmd/clickhouse-backup/main.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 1c93f297..55ef0e1f 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -24,11 +24,12 @@ var ( func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack - consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} + // consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} //diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) { // fmt.Printf("Logger Dropped %d messages", missed) //}) - log.Logger = zerolog.New(consoleWriter).With().Timestamp().Caller().Logger() + // log.Logger = zerolog.New(consoleWriter).With().Timestamp().Caller().Logger() + log.Logger = zerolog.New(os.Stdout).With().Timestamp().Caller().Logger() stdlog.SetOutput(log.Logger) cliapp := cli.NewApp() cliapp.Name = "clickhouse-backup" From b1507c08f1073af882b3f3fa73ab441b3e73e6e6 Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 23:02:56 +0400 Subject: [PATCH 08/21] fix thread-safe zerolog usage, try to add zerolog.SyncWriter, details https://github.com/rs/zerolog/issues/242 and https://github.com/rs/zerolog/blob/master/README.md?plain=1#L431 --- cmd/clickhouse-backup/main.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 55ef0e1f..840d314b 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -24,12 +24,12 @@ var ( func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack - // consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} + //consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} //diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) { // fmt.Printf("Logger Dropped %d messages", missed) //}) - // log.Logger = zerolog.New(consoleWriter).With().Timestamp().Caller().Logger() - log.Logger = zerolog.New(os.Stdout).With().Timestamp().Caller().Logger() + log.Logger = zerolog.New(zerolog.SyncWriter(os.Stdout)).With().Timestamp().Caller().Logger() + //log.Logger = zerolog.New(os.Stdout).With().Timestamp().Caller().Logger() stdlog.SetOutput(log.Logger) cliapp := cli.NewApp() cliapp.Name = "clickhouse-backup" From 907377cf533eb4165bc1d383898bb0485c59284f Mon Sep 17 00:00:00 2001 From: Slach Date: Tue, 13 Jun 2023 23:40:29 +0400 Subject: [PATCH 09/21] debug thread-safe zerolog usage, https://github.com/rs/zerolog/issues/555, try to apply force zerolog.Disabled --- pkg/log_helper/log_level.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/log_helper/log_level.go b/pkg/log_helper/log_level.go index f9fbbaf8..4429d87e 100644 --- a/pkg/log_helper/log_level.go +++ b/pkg/log_helper/log_level.go @@ -19,4 +19,5 @@ func SetLogLevelFromString(logLevel string) { level = zerolog.InfoLevel } zerolog.SetGlobalLevel(level) + zerolog.SetGlobalLevel(zerolog.Disabled) } From 733e6894a52f3278dfb84f8ac72db6d34c3e1945 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 14 Jun 2023 10:00:29 +0400 Subject: [PATCH 10/21] debug thread-safe zerolog usage, https://github.com/rs/zerolog/issues/555, try to SyncWriter -> ConsoleWriter, increase some clickhouse timeout in test config, cause clickhouse-go/v2 got some timeouts during execution --- cmd/clickhouse-backup/main.go | 5 +- pkg/clickhouse/clickhouse.go | 6 +- pkg/log_helper/log_level.go | 1 - pkg/resumable/state.go | 5 +- test/integration/config-custom-kopia.yml | 54 +++++++-------- test/integration/config-custom-restic.yml | 52 +++++++-------- test/integration/config-custom-rsync.yml | 52 +++++++-------- test/integration/config-database-mapping.yml | 64 +++++++++--------- test/integration/config-s3-nodelete.yml | 70 ++++++++++---------- test/integration/config-s3.yml | 2 +- test/integration/integration_test.go | 7 +- 11 files changed, 158 insertions(+), 160 deletions(-) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 840d314b..93ca0acc 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -24,11 +24,12 @@ var ( func main() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack - //consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} //diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) { // fmt.Printf("Logger Dropped %d messages", missed) //}) - log.Logger = zerolog.New(zerolog.SyncWriter(os.Stdout)).With().Timestamp().Caller().Logger() + log.Logger = zerolog.New(zerolog.SyncWriter(consoleWriter)).With().Timestamp().Caller().Logger() + //zerolog.SetGlobalLevel(zerolog.Disabled) //log.Logger = zerolog.New(os.Stdout).With().Timestamp().Caller().Logger() stdlog.SetOutput(log.Logger) cliapp := cli.NewApp() diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 9809e1b6..bcce072d 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -909,12 +909,10 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { var logF *zerolog.Event - // zerolog is not thread-safe https://github.com/rs/zerolog/issues/242 ;( - logger := ch.Logger.With().Logger() if !ch.Config.LogSQLQueries { - logF = logger.Debug() + logF = ch.Logger.Debug() } else { - logF = logger.Info() + logF = ch.Logger.Info() } if len(args) > 0 { logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) diff --git a/pkg/log_helper/log_level.go b/pkg/log_helper/log_level.go index 4429d87e..f9fbbaf8 100644 --- a/pkg/log_helper/log_level.go +++ b/pkg/log_helper/log_level.go @@ -19,5 +19,4 @@ func SetLogLevelFromString(logLevel string) { level = zerolog.InfoLevel } zerolog.SetGlobalLevel(level) - zerolog.SetGlobalLevel(zerolog.Disabled) } diff --git a/pkg/resumable/state.go b/pkg/resumable/state.go index e269a55a..d69819a5 100644 --- a/pkg/resumable/state.go +++ b/pkg/resumable/state.go @@ -105,13 +105,12 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) { res := strings.Index(s.currentState, path+":") if res >= 0 { // s.logger is non thread-safe https://github.com/rs/zerolog/issues/242 - logger := s.logger.With().Logger() - logger.Info().Msgf("%s already processed", path) + s.logger.Info().Msgf("%s already processed", path) sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")] sSize = sSize[strings.Index(sSize, ":")+1:] size, err = strconv.ParseInt(sSize, 10, 64) if err != nil { - logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) + s.logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) } } s.mx.RUnlock() diff --git a/test/integration/config-custom-kopia.yml b/test/integration/config-custom-kopia.yml index 7c87410d..e5e9bfe2 100644 --- a/test/integration/config-custom-kopia.yml +++ b/test/integration/config-custom-kopia.yml @@ -1,27 +1,27 @@ -general: - disable_progress_bar: true - remote_storage: custom - upload_concurrency: 4 - download_concurrency: 4 - skip_tables: - - " system.*" - - "INFORMATION_SCHEMA.*" - - "information_schema.*" - restore_schema_on_cluster: "{cluster}" - use_resumable_state: false -clickhouse: - host: 127.0.0.1 - port: 9440 - username: backup - password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true - sync_replicated_tables: true - timeout: 1s - restart_command: bash -c 'echo "FAKE RESTART"' -custom: - # all `kopia` uploads are incremental - upload_command: /custom/kopia/upload.sh {{ .backupName }} - download_command: /custom/kopia/download.sh {{ .backupName }} - delete_command: /custom/kopia/delete.sh {{ .backupName }} - list_command: /custom/kopia/list.sh +general: + disable_progress_bar: true + remote_storage: custom + upload_concurrency: 4 + download_concurrency: 4 + skip_tables: + - " system.*" + - "INFORMATION_SCHEMA.*" + - "information_schema.*" + restore_schema_on_cluster: "{cluster}" + use_resumable_state: false +clickhouse: + host: 127.0.0.1 + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 2s + restart_command: bash -c 'echo "FAKE RESTART"' +custom: + # all `kopia` uploads are incremental + upload_command: /custom/kopia/upload.sh {{ .backupName }} + download_command: /custom/kopia/download.sh {{ .backupName }} + delete_command: /custom/kopia/delete.sh {{ .backupName }} + list_command: /custom/kopia/list.sh diff --git a/test/integration/config-custom-restic.yml b/test/integration/config-custom-restic.yml index 22e30d17..d87379e1 100644 --- a/test/integration/config-custom-restic.yml +++ b/test/integration/config-custom-restic.yml @@ -1,26 +1,26 @@ -general: - disable_progress_bar: true - remote_storage: custom - upload_concurrency: 4 - download_concurrency: 4 - skip_tables: - - " system.*" - - "INFORMATION_SCHEMA.*" - - "information_schema.*" - restore_schema_on_cluster: "{cluster}" - use_resumable_state: false -clickhouse: - host: 127.0.0.1 - port: 9440 - username: backup - password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true - sync_replicated_tables: true - timeout: 1s - restart_command: bash -c 'echo "FAKE RESTART"' -custom: - upload_command: /custom/restic/upload.sh {{ .backupName }} {{ .diffFromRemote }} - download_command: /custom/restic/download.sh {{ .backupName }} - delete_command: /custom/restic/delete.sh {{ .backupName }} - list_command: /custom/restic/list.sh +general: + disable_progress_bar: true + remote_storage: custom + upload_concurrency: 4 + download_concurrency: 4 + skip_tables: + - " system.*" + - "INFORMATION_SCHEMA.*" + - "information_schema.*" + restore_schema_on_cluster: "{cluster}" + use_resumable_state: false +clickhouse: + host: 127.0.0.1 + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 2s + restart_command: bash -c 'echo "FAKE RESTART"' +custom: + upload_command: /custom/restic/upload.sh {{ .backupName }} {{ .diffFromRemote }} + download_command: /custom/restic/download.sh {{ .backupName }} + delete_command: /custom/restic/delete.sh {{ .backupName }} + list_command: /custom/restic/list.sh diff --git a/test/integration/config-custom-rsync.yml b/test/integration/config-custom-rsync.yml index 3443cbbf..72cc7fa7 100644 --- a/test/integration/config-custom-rsync.yml +++ b/test/integration/config-custom-rsync.yml @@ -1,26 +1,26 @@ -general: - disable_progress_bar: true - remote_storage: custom - upload_concurrency: 4 - download_concurrency: 4 - skip_tables: - - " system.*" - - "INFORMATION_SCHEMA.*" - - "information_schema.*" - restore_schema_on_cluster: "{cluster}" - use_resumable_state: false -clickhouse: - host: 127.0.0.1 - port: 9440 - username: backup - password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true - sync_replicated_tables: true - timeout: 1s - restart_command: bash -c 'echo "FAKE RESTART"' -custom: - upload_command: /custom/rsync/upload.sh {{ .backupName }} {{ .diffFromRemote }} - download_command: /custom/rsync/download.sh {{ .backupName }} - delete_command: /custom/rsync/delete.sh {{ .backupName }} - list_command: /custom/rsync/list.sh +general: + disable_progress_bar: true + remote_storage: custom + upload_concurrency: 4 + download_concurrency: 4 + skip_tables: + - " system.*" + - "INFORMATION_SCHEMA.*" + - "information_schema.*" + restore_schema_on_cluster: "{cluster}" + use_resumable_state: false +clickhouse: + host: 127.0.0.1 + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 2s + restart_command: bash -c 'echo "FAKE RESTART"' +custom: + upload_command: /custom/rsync/upload.sh {{ .backupName }} {{ .diffFromRemote }} + download_command: /custom/rsync/download.sh {{ .backupName }} + delete_command: /custom/rsync/delete.sh {{ .backupName }} + list_command: /custom/rsync/list.sh diff --git a/test/integration/config-database-mapping.yml b/test/integration/config-database-mapping.yml index 0c025aa7..4714541b 100644 --- a/test/integration/config-database-mapping.yml +++ b/test/integration/config-database-mapping.yml @@ -1,32 +1,32 @@ -general: - disable_progress_bar: true - remote_storage: s3 - upload_concurrency: 4 - download_concurrency: 4 - restore_schema_on_cluster: "{cluster}" - restore_database_mapping: - database1: default -clickhouse: - host: 127.0.0.1 - port: 9440 - username: backup - password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true - sync_replicated_tables: true - timeout: 1s - restart_command: bash -c 'echo "FAKE RESTART"' -s3: - access_key: access-key - secret_key: it-is-my-super-secret-key - bucket: clickhouse - endpoint: http://minio:9000 - acl: private - force_path_style: true - path: backup/{cluster}/{shard} - disable_ssl: true - compression_format: tar -api: - listen: :7171 - create_integration_tables: true - allow_parallel: true +general: + disable_progress_bar: true + remote_storage: s3 + upload_concurrency: 4 + download_concurrency: 4 + restore_schema_on_cluster: "{cluster}" + restore_database_mapping: + database1: default +clickhouse: + host: 127.0.0.1 + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 2s + restart_command: bash -c 'echo "FAKE RESTART"' +s3: + access_key: access-key + secret_key: it-is-my-super-secret-key + bucket: clickhouse + endpoint: http://minio:9000 + acl: private + force_path_style: true + path: backup/{cluster}/{shard} + disable_ssl: true + compression_format: tar +api: + listen: :7171 + create_integration_tables: true + allow_parallel: true diff --git a/test/integration/config-s3-nodelete.yml b/test/integration/config-s3-nodelete.yml index 72b58809..a4a483ad 100644 --- a/test/integration/config-s3-nodelete.yml +++ b/test/integration/config-s3-nodelete.yml @@ -1,35 +1,35 @@ -general: - disable_progress_bar: true - remote_storage: s3 - upload_concurrency: 4 - download_concurrency: 4 - skip_tables: - - " system.*" - - "INFORMATION_SCHEMA.*" - - "information_schema.*" - - "_temporary_and_external_tables.*" - restore_schema_on_cluster: "{cluster}" -clickhouse: - host: 127.0.0.1 - port: 9440 - username: backup - password: meow=& 123?*%# МЯУ - secure: true - skip_verify: true - sync_replicated_tables: true - timeout: 1s - restart_command: bash -c 'echo "FAKE RESTART"' -s3: - access_key: nodelete - secret_key: nodelete_password - bucket: clickhouse - endpoint: http://minio:9000 - acl: private - force_path_style: true - path: backup - disable_ssl: true - compression_format: tar -api: - listen: :7171 - create_integration_tables: true - allow_parallel: true +general: + disable_progress_bar: true + remote_storage: s3 + upload_concurrency: 4 + download_concurrency: 4 + skip_tables: + - " system.*" + - "INFORMATION_SCHEMA.*" + - "information_schema.*" + - "_temporary_and_external_tables.*" + restore_schema_on_cluster: "{cluster}" +clickhouse: + host: 127.0.0.1 + port: 9440 + username: backup + password: meow=& 123?*%# МЯУ + secure: true + skip_verify: true + sync_replicated_tables: true + timeout: 2s + restart_command: bash -c 'echo "FAKE RESTART"' +s3: + access_key: nodelete + secret_key: nodelete_password + bucket: clickhouse + endpoint: http://minio:9000 + acl: private + force_path_style: true + path: backup + disable_ssl: true + compression_format: tar +api: + listen: :7171 + create_integration_tables: true + allow_parallel: true diff --git a/test/integration/config-s3.yml b/test/integration/config-s3.yml index e58eb944..8e44ed15 100644 --- a/test/integration/config-s3.yml +++ b/test/integration/config-s3.yml @@ -17,7 +17,7 @@ clickhouse: secure: true skip_verify: true sync_replicated_tables: true - timeout: 1s + timeout: 2s restart_command: bash -c 'echo "FAKE RESTART"' backup_mutations: true s3: diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index a5016b8a..63de1a68 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -396,9 +396,10 @@ var incrementData = []TestDataStruct{ func init() { zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs - log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"}) - stdlog.SetOutput(log.Logger) zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} + log.Logger = zerolog.New(zerolog.SyncWriter(consoleWriter)).With().Timestamp().Logger() + stdlog.SetOutput(log.Logger) logLevel := "info" if os.Getenv("LOG_LEVEL") != "" { logLevel = os.Getenv("LOG_LEVEL") @@ -1289,7 +1290,7 @@ func TestS3NoDeletePermission(t *testing.T) { r.NoError(dockerCP("config-s3-nodelete.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + ch.connectWithWait(r, 500*time.Millisecond, 5*time.Second) defer ch.chbackend.Close() generateTestData(ch, r) r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create_remote", "no_delete_backup")) From fd90f25f8bbe6a96e3487ac971483da03b542175 Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 14 Jun 2023 10:19:34 +0400 Subject: [PATCH 11/21] debug thread-safe zerolog usage, https://github.com/rs/zerolog/issues/555, try to remove local logger in upload.go --- pkg/backup/upload.go | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 8553f351..0168229f 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -453,8 +453,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, table for disk := range table.Parts { capacity += len(table.Parts[disk]) } - logger := log.With().Str("logger", "uploadTableData").Logger() - logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity) s := semaphore.NewWeighted(int64(b.cfg.General.UploadConcurrency)) g, ctx := errgroup.WithContext(ctx) var uploadedBytes int64 @@ -479,7 +478,7 @@ breakByError: continue } if err := s.Acquire(ctx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during Upload data parts: %v", err) + log.Error().Msgf("can't acquire semaphore during Upload data parts: %v", err) break breakByError } backupPath := b.getLocalBackupDataPathForTable(backupName, disk, dbAndTablePath) @@ -499,9 +498,9 @@ breakByError: return nil } } - logger.Debug().Msgf("start upload %d files to %s", len(partFiles), remotePath) + log.Debug().Msgf("start upload %d files to %s", len(partFiles), remotePath) if uploadPathBytes, err := b.dst.UploadPath(ctx, 0, backupPath, partFiles, remotePath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { - logger.Error().Msgf("UploadPath return error: %v", err) + log.Error().Msgf("UploadPath return error: %v", err) return fmt.Errorf("can't upload: %v", err) } else { atomic.AddInt64(&uploadedBytes, uploadPathBytes) @@ -509,7 +508,7 @@ breakByError: b.resumableState.AppendToState(remotePathFull, uploadPathBytes) } } - logger.Debug().Msgf("finish upload %d files to %s", len(partFiles), remotePath) + log.Debug().Msgf("finish upload %d files to %s", len(partFiles), remotePath) return nil }) } else { @@ -525,13 +524,13 @@ breakByError: return nil } } - logger.Debug().Msgf("start upload %d files to %s", len(localFiles), remoteDataFile) + log.Debug().Msgf("start upload %d files to %s", len(localFiles), remoteDataFile) retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { return b.dst.UploadCompressedStream(ctx, backupPath, localFiles, remoteDataFile) }) if err != nil { - logger.Error().Msgf("UploadCompressedStream return error: %v", err) + log.Error().Msgf("UploadCompressedStream return error: %v", err) return fmt.Errorf("can't upload: %v", err) } remoteFile, err := b.dst.StatFile(ctx, remoteDataFile) @@ -542,7 +541,7 @@ breakByError: if b.resume { b.resumableState.AppendToState(remoteDataFile, remoteFile.Size()) } - logger.Debug().Msgf("finish upload to %s", remoteDataFile) + log.Debug().Msgf("finish upload to %s", remoteDataFile) return nil }) } @@ -551,7 +550,7 @@ breakByError: if err := g.Wait(); err != nil { return nil, 0, fmt.Errorf("one of uploadTableData go-routine return error: %v", err) } - logger.Debug().Msgf("finish %s.%s with concurrency=%d len(table.Parts[...])=%d uploadedFiles=%v, uploadedBytes=%v", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity, uploadedFiles, uploadedBytes) + log.Debug().Msgf("finish %s.%s with concurrency=%d len(table.Parts[...])=%d uploadedFiles=%v, uploadedBytes=%v", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity, uploadedFiles, uploadedBytes) return uploadedFiles, uploadedBytes, nil } From eea4f62e3396b099cc65eca54076cc5506219faa Mon Sep 17 00:00:00 2001 From: Slach Date: Wed, 14 Jun 2023 12:35:53 +0400 Subject: [PATCH 12/21] zerolog completelly useless for multiple go-routines ;( https://github.com/rs/zerolog/issues/555, try to remove all local loggers with context upload.go --- pkg/backup/backuper.go | 2 - pkg/backup/create.go | 88 ++++++-------- pkg/backup/delete.go | 17 ++- pkg/backup/download.go | 147 +++++++++++------------ pkg/backup/list.go | 29 ++--- pkg/backup/restore.go | 88 ++++++-------- pkg/backup/table_pattern.go | 3 +- pkg/backup/upload.go | 43 +++---- pkg/backup/watch.go | 14 ++- pkg/clickhouse/clickhouse.go | 62 +++++----- pkg/filesystemhelper/filesystemhelper.go | 23 ++-- pkg/metadata/load.go | 3 +- pkg/resumable/state.go | 19 ++- pkg/server/metrics/metrics.go | 17 ++- pkg/server/server.go | 138 ++++++++++----------- pkg/server/utils.go | 13 +- pkg/status/status.go | 26 ++-- pkg/storage/ftp.go | 19 ++- pkg/storage/general.go | 81 +++++-------- pkg/storage/s3.go | 19 ++- test/integration/integration_test.go | 1 - 21 files changed, 383 insertions(+), 469 deletions(-) diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index ee67b2e1..b3e9d26b 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -7,7 +7,6 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/Altinity/clickhouse-backup/pkg/resumable" "github.com/Altinity/clickhouse-backup/pkg/storage" - "github.com/rs/zerolog/log" "path" ) @@ -26,7 +25,6 @@ type Backuper struct { func NewBackuper(cfg *config.Config) *Backuper { ch := &clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } return &Backuper{ cfg: cfg, diff --git a/pkg/backup/create.go b/pkg/backup/create.go index c492679e..9c5b2a94 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -6,7 +6,6 @@ import ( "errors" "fmt" "github.com/Altinity/clickhouse-backup/pkg/status" - "github.com/rs/zerolog" "os" "path" "path/filepath" @@ -50,7 +49,6 @@ func addTable(tables []clickhouse.Table, table clickhouse.Table) []clickhouse.Ta } func filterTablesByPattern(tables []clickhouse.Table, tablePattern string) []clickhouse.Table { - logger := log.With().Str("logger", "filterTablesByPattern").Logger() if tablePattern == "" { return tables } @@ -62,7 +60,7 @@ func filterTablesByPattern(tables []clickhouse.Table, tablePattern string) []cli if matched, _ := filepath.Match(strings.Trim(pattern, " \t\n\r"), tableName); matched { result = addTable(result, t) } else { - logger.Debug().Msgf("%s not matched with %s", tableName, pattern) + log.Debug().Msgf("%s not matched with %s", tableName, pattern) } } } @@ -90,10 +88,6 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st backupName = NewBackupName() } backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "create", - }).Logger() if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -140,9 +134,9 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st partitionsToBackupMap, partitions := filesystemhelper.CreatePartitionsToBackupMap(ctx, b.ch, tables, nil, partitions) // create if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitions, partitionsToBackupMap, schemaOnly, rbacOnly, configsOnly, tables, allDatabases, allFunctions, disks, diskMap, logger, startBackup, version) + err = b.createBackupEmbedded(ctx, backupName, tablePattern, partitions, partitionsToBackupMap, schemaOnly, rbacOnly, configsOnly, tables, allDatabases, allFunctions, disks, diskMap, startBackup, version) } else { - err = b.createBackupLocal(ctx, backupName, partitionsToBackupMap, tables, doBackupData, schemaOnly, rbacOnly, configsOnly, version, disks, diskMap, allDatabases, allFunctions, logger, startBackup) + err = b.createBackupLocal(ctx, backupName, partitionsToBackupMap, tables, doBackupData, schemaOnly, rbacOnly, configsOnly, version, disks, diskMap, allDatabases, allFunctions, startBackup) } if err != nil { return err @@ -155,7 +149,7 @@ func (b *Backuper) CreateBackup(backupName, tablePattern string, partitions []st return nil } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsToBackupMap common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, rbacOnly bool, configsOnly bool, version string, disks []clickhouse.Disk, diskMap map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, logger zerolog.Logger, startBackup time.Time) error { +func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, partitionsToBackupMap common.EmptyMap, tables []clickhouse.Table, doBackupData bool, schemaOnly bool, rbacOnly bool, configsOnly bool, version string, disks []clickhouse.Disk, diskMap map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, startBackup time.Time) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { @@ -172,7 +166,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } if _, err := os.Stat(backupPath); os.IsNotExist(err) { if err = filesystemhelper.Mkdir(backupPath, b.ch, disks); err != nil { - logger.Error().Msgf("can't create directory %s: %v", backupPath, err) + log.Error().Msgf("can't create directory %s: %v", backupPath, err) return err } } @@ -184,24 +178,23 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par case <-ctx.Done(): return ctx.Err() default: - tableLog := logger.With().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Logger() if table.Skip { continue } var realSize map[string]int64 var disksToPartsMap map[string][]metadata.Part if doBackupData { - tableLog.Debug().Msg("create data") + log.Debug().Msg("create data") shadowBackupUUID := strings.ReplaceAll(uuid.New().String(), "-", "") disksToPartsMap, realSize, err = b.AddTableToBackup(ctx, backupName, shadowBackupUUID, disks, &table, partitionsToBackupMap) if err != nil { - tableLog.Error().Msg(err.Error()) + log.Error().Msg(err.Error()) if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - tableLog.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } // fix corner cases after https://github.com/Altinity/clickhouse-backup/issues/379 if cleanShadowErr := b.Clean(ctx); cleanShadowErr != nil { - tableLog.Error().Msg(cleanShadowErr.Error()) + log.Error().Msg(cleanShadowErr.Error()) } return err } @@ -211,19 +204,19 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par } } // https://github.com/Altinity/clickhouse-backup/issues/529 - tableLog.Debug().Msg("get in progress mutations list") + log.Debug().Msg("get in progress mutations list") inProgressMutations := make([]metadata.MutationMetadata, 0) if b.cfg.ClickHouse.BackupMutations { inProgressMutations, err = b.ch.GetInProgressMutations(ctx, table.Database, table.Name) if err != nil { - tableLog.Error().Msg(err.Error()) + log.Error().Msg(err.Error()) if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - tableLog.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } } - tableLog.Debug().Msg("create metadata") + log.Debug().Msg("create metadata") metadataSize, err := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ Table: table.Name, @@ -237,7 +230,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par }, disks) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - tableLog.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } @@ -246,35 +239,35 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName string, par Database: table.Database, Table: table.Name, }) - tableLog.Info().Msgf("done") + log.Info().Str("database", table.Database).Str("table", table.Name).Msgf("done") } } backupRBACSize, backupConfigSize := uint64(0), uint64(0) if rbacOnly { if backupRBACSize, err = b.createRBACBackup(ctx, backupPath, disks); err != nil { - logger.Error().Msgf("error during do RBAC backup: %v", err) + log.Error().Msgf("error during do RBAC backup: %v", err) } else { - logger.Info().Str("size", utils.FormatBytes(backupRBACSize)).Msg("done createRBACBackup") + log.Info().Str("size", utils.FormatBytes(backupRBACSize)).Msg("done createRBACBackup") } } if configsOnly { if backupConfigSize, err = b.createConfigBackup(ctx, backupPath); err != nil { - logger.Error().Msgf("error during do CONFIG backup: %v", err) + log.Error().Msgf("error during do CONFIG backup: %v", err) } else { - logger.Info().Str("size", utils.FormatBytes(backupConfigSize)).Msg("done createConfigBackup") + log.Info().Str("size", utils.FormatBytes(backupConfigSize)).Msg("done createConfigBackup") } } backupMetaFile := path.Join(defaultPath, "backup", backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, version, "regular", diskMap, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, logger); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, version, "regular", diskMap, disks, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions); err != nil { return err } - logger.Info().Str("duration", utils.HumanizeDuration(time.Since(startBackup))).Msg("done") + log.Info().Str("operation", "create").Str("duration", utils.HumanizeDuration(time.Since(startBackup))).Msg("done") return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitions []string, partitionsToBackupMap common.EmptyMap, schemaOnly, rbacOnly, configsOnly bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap map[string]string, logger zerolog.Logger, startBackup time.Time, backupVersion string) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePattern string, partitions []string, partitionsToBackupMap common.EmptyMap, schemaOnly, rbacOnly, configsOnly bool, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap map[string]string, startBackup time.Time, backupVersion string) error { if _, isBackupDiskExists := diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk]; !isBackupDiskExists { return fmt.Errorf("backup disk `%s` not exists in system.disks", b.cfg.ClickHouse.EmbeddedBackupDisk) } @@ -354,7 +347,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa }{Size: 0}) } - logger.Debug().Msg("calculate parts list from embedded backup disk") + log.Debug().Msg("calculate parts list from embedded backup disk") for _, table := range tables { select { case <-ctx.Done(): @@ -366,7 +359,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa disksToPartsMap, err := b.getPartsFromBackupDisk(backupPath, table, partitionsToBackupMap) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - logger.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } @@ -381,7 +374,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa }, disks) if err != nil { if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - logger.Error().Msg(removeBackupErr.Error()) + log.Error().Msg(removeBackupErr.Error()) } return err } @@ -389,11 +382,11 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, tablePa } } backupMetaFile := path.Join(diskMap[b.cfg.ClickHouse.EmbeddedBackupDisk], backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions, logger); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, backupVersion, "embedded", diskMap, disks, backupDataSize[0].Size, backupMetadataSize, 0, 0, tableMetas, allDatabases, allFunctions); err != nil { return err } - logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "create_embedded", "duration": utils.HumanizeDuration(time.Since(startBackup)), }).Msg("done") @@ -438,14 +431,13 @@ func (b *Backuper) getPartsFromBackupDisk(backupPath string, table clickhouse.Ta } func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (uint64, error) { - logger := log.With().Str("logger", "createConfigBackup").Logger() select { case <-ctx.Done(): return 0, ctx.Err() default: backupConfigSize := uint64(0) configBackupPath := path.Join(backupPath, "configs") - logger.Debug().Msgf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) + log.Debug().Msgf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) copyErr := recursiveCopy.Copy(b.cfg.ClickHouse.ConfigDir, configBackupPath, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { backupConfigSize += uint64(srcinfo.Size()) @@ -457,7 +449,6 @@ func (b *Backuper) createConfigBackup(ctx context.Context, backupPath string) (u } func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disks []clickhouse.Disk) (uint64, error) { - logger := log.With().Str("logger", "createRBACBackup").Logger() select { case <-ctx.Done(): return 0, ctx.Err() @@ -468,7 +459,7 @@ func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disk if err != nil { return 0, err } - logger.Debug().Msgf("copy %s -> %s", accessPath, rbacBackup) + log.Debug().Msgf("copy %s -> %s", accessPath, rbacBackup) copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { rbacDataSize += uint64(srcinfo.Size()) @@ -480,17 +471,12 @@ func (b *Backuper) createRBACBackup(ctx context.Context, backupPath string, disk } func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsToBackupMap common.EmptyMap) (map[string][]metadata.Part, map[string]int64, error) { - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "create", - "table": fmt.Sprintf("%s.%s", table.Database, table.Name), - }).Logger() if backupName == "" { return nil, nil, fmt.Errorf("backupName is not defined") } if !strings.HasSuffix(table.Engine, "MergeTree") && table.Engine != "MaterializedMySQL" && table.Engine != "MaterializedPostgreSQL" { - logger.Warn().Str("engine", table.Engine).Msg("supports only schema backup") + log.Warn().Str("engine", table.Engine).Msg("supports only schema backup") return nil, nil, nil } if b.cfg.ClickHouse.CheckPartsColumns { @@ -502,7 +488,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku if err := b.ch.FreezeTable(ctx, table, shadowBackupUUID); err != nil { return nil, nil, err } - logger.Debug().Msg("frozen") + log.Debug().Msg("frozen") version, err := b.ch.GetVersion(ctx) if err != nil { return nil, nil, err @@ -531,7 +517,7 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku } realSize[disk.Name] = size disksToPartsMap[disk.Name] = parts - logger.Debug().Str("disk", disk.Name).Msg("shadow moved") + log.Debug().Str("disk", disk.Name).Msg("shadow moved") // Clean all the files under the shadowPath, cause UNFREEZE unavailable if version < 21004000 { if err := os.RemoveAll(shadowPath); err != nil { @@ -546,11 +532,15 @@ func (b *Backuper) AddTableToBackup(ctx context.Context, backupName, shadowBacku return disksToPartsMap, realSize, err } } - logger.Debug().Msg("done") + log.Debug().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "create", + "table": fmt.Sprintf("%s.%s", table.Database, table.Name), + }).Msg("done") return disksToPartsMap, realSize, nil } -func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, version, tags string, diskMap map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, logger zerolog.Logger) error { +func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, version, tags string, diskMap map[string]string, disks []clickhouse.Disk, backupDataSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function) error { select { case <-ctx.Done(): return ctx.Err() @@ -586,7 +576,7 @@ func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, bac return err } if err := filesystemhelper.Chown(backupMetaFile, b.ch, disks, false); err != nil { - logger.Warn().Msgf("can't chown %s: %v", backupMetaFile, err) + log.Warn().Msgf("can't chown %s: %v", backupMetaFile, err) } return nil } diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index a457a04d..1355ff80 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -18,7 +18,6 @@ import ( // Clean - removed all data in shadow folder func (b *Backuper) Clean(ctx context.Context) error { - logger := log.With().Str("logger", "Clean").Logger() if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -36,7 +35,7 @@ func (b *Backuper) Clean(ctx context.Context) error { if err := b.cleanDir(shadowDir); err != nil { return fmt.Errorf("can't clean '%s': %v", shadowDir, err) } - logger.Info().Msg(shadowDir) + log.Info().Msg(shadowDir) } return nil } @@ -95,7 +94,6 @@ func (b *Backuper) RemoveOldBackupsLocal(ctx context.Context, keepLastBackup boo } func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, disks []clickhouse.Disk) error { - logger := log.With().Str("logger", "RemoveBackupLocal").Logger() var err error start := time.Now() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") @@ -120,13 +118,13 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis if disk.IsBackup { backupPath = path.Join(disk.Path, backupName) } - logger.Debug().Msgf("remove '%s'", backupPath) + log.Debug().Msgf("remove '%s'", backupPath) err = os.RemoveAll(backupPath) if err != nil { return err } } - logger.Info().Str("operation", "delete"). + log.Info().Str("operation", "delete"). Str("location", "local"). Str("backup", backupName). Str("duration", utils.HumanizeDuration(time.Since(start))). @@ -138,12 +136,11 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis } func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) error { - logger := log.With().Str("logger", "RemoveBackupRemote").Logger() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") start := time.Now() if b.cfg.General.RemoteStorage == "none" { err := errors.New("aborted: RemoteStorage set to \"none\"") - logger.Error().Msg(err.Error()) + log.Error().Msg(err.Error()) return err } if b.cfg.General.RemoteStorage == "custom" { @@ -164,7 +161,7 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er } defer func() { if err := bd.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -175,10 +172,10 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er for _, backup := range backupList { if backup.BackupName == backupName { if err := bd.RemoveBackup(ctx, backup); err != nil { - logger.Warn().Msgf("bd.RemoveBackup return error: %v", err) + log.Warn().Msgf("bd.RemoveBackup return error: %v", err) return err } - logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "backup": backupName, "location": "remote", "operation": "delete", diff --git a/pkg/backup/download.go b/pkg/backup/download.go index a133a6b2..f5dca9f1 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -12,7 +12,6 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/resumable" "github.com/Altinity/clickhouse-backup/pkg/status" "github.com/eapache/go-resiliency/retrier" - "github.com/rs/zerolog" "io" "os" "path" @@ -37,10 +36,6 @@ var ( ) func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error { - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "download_legacy", - }).Logger() bd, err := storage.NewBackupDestination(ctx, b.cfg, b.ch, true, "") if err != nil { return err @@ -50,7 +45,7 @@ func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error } defer func() { if err := bd.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -60,7 +55,10 @@ func (b *Backuper) legacyDownload(ctx context.Context, backupName string) error if err != nil { return err } - logger.Info().Msg("done") + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "download_legacy", + }).Msg("done") return nil } @@ -77,10 +75,6 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer b.ch.Close() - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "download", - }).Logger() if b.cfg.General.RemoteStorage == "none" { return fmt.Errorf("general->remote_storage shall not be \"none\" for download, change you config or use REMOTE_STORAGE environment variable") } @@ -104,7 +98,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if strings.Contains(localBackups[i].Tags, "embedded") || b.cfg.General.RemoteStorage == "custom" { return ErrBackupIsAlreadyExists } - logger.Warn().Msgf("%s already exists will try to resume download", backupName) + log.Warn().Msgf("%s already exists will try to resume download", backupName) } } } @@ -117,7 +111,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer func() { if err := b.dst.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -145,7 +139,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if schemaOnly { return fmt.Errorf("'%s' is old format backup and doesn't supports download of schema only", backupName) } - logger.Warn().Msgf("'%s' is old-format backup", backupName) + log.Warn().Msgf("'%s' is old-format backup", backupName) return b.legacyDownload(ctx, backupName) } if len(remoteBackup.Tables) == 0 && !b.cfg.General.AllowEmptyBackups { @@ -180,20 +174,19 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ }) } - logger.Debug().Msgf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) + log.Debug().Str("backup", backupName).Msgf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) downloadSemaphore := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) metadataGroup, metadataCtx := errgroup.WithContext(ctx) for i, t := range tablesForDownload { if err := downloadSemaphore.Acquire(metadataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during Download metadata: %v", err) + log.Error().Msgf("can't acquire semaphore during Download metadata: %v", err) break } idx := i tableTitle := t metadataGroup.Go(func() error { defer downloadSemaphore.Release(1) - tableLogger := logger.With().Str("table_metadata", fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)).Logger() - downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableLogger, tableTitle, schemaOnly, partitions) + downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableTitle, schemaOnly, partitions) if err != nil { return err } @@ -210,11 +203,11 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ for disk := range t.Parts { if _, diskExists := b.DiskToPathMap[disk]; !diskExists && disk != b.cfg.ClickHouse.EmbeddedBackupDisk { b.DiskToPathMap[disk] = b.DiskToPathMap["default"] - logger.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %s", t.Database, t.Table, disk, b.DiskToPathMap["default"]) + log.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %s", t.Database, t.Table, disk, b.DiskToPathMap["default"]) } } } - logger.Debug().Msgf("prepare table SHADOW concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) + log.Debug().Str("backupName", backupName).Msgf("prepare table SHADOW concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) dataGroup, dataCtx := errgroup.WithContext(ctx) for i, tableMetadata := range tableMetadataAfterDownload { @@ -222,7 +215,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ continue } if err := downloadSemaphore.Acquire(dataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during Download table data: %v", err) + log.Error().Msgf("can't acquire semaphore during Download table data: %v", err) break } dataSize += tableMetadata.TotalBytes @@ -233,7 +226,8 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, tableMetadataAfterDownload[idx]); err != nil { return err } - logger.Info(). + log.Info(). + Str("backup_name", backupName). Str("operation", "download_data"). Str("table", fmt.Sprintf("%s.%s", tableMetadataAfterDownload[idx].Database, tableMetadataAfterDownload[idx].Table)). Str("duration", utils.HumanizeDuration(time.Since(start))). @@ -297,24 +291,26 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ b.resumableState.Close() } - logger.Info(). + log.Info(). + Str("operation", "download"). + Str("backup", backupName). Str("duration", utils.HumanizeDuration(time.Since(startDownload))). Str("size", utils.FormatBytes(dataSize+metadataSize+rbacSize+configSize)). Msg("done") return nil } -func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, logger zerolog.Logger, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { +func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { metadataLocalFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata", common.TablePathEncode(tableTitle.Database), fmt.Sprintf("%s.json", common.TablePathEncode(tableTitle.Table))) tm := &metadata.TableMetadata{} if _, err := tm.Load(metadataLocalFile); err == nil { return tm, nil } - tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, logger.With().Fields(map[string]interface{}{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}).Logger(), tableTitle, false, nil) + tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, tableTitle, false, nil) return tm, err } -func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, logger zerolog.Logger, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string) (*metadata.TableMetadata, uint64, error) { +func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string) (*metadata.TableMetadata, uint64, error) { start := time.Now() size := uint64(0) metadataFiles := map[string]string{} @@ -398,8 +394,9 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, b.resumableState.AppendToState(localMetadataFile, written) } } - logger = logger.With().Logger() - logger.Info(). + log.Info(). + Str("operation", "download_metadata"). + Str("backup", backupName). Str("duration", utils.HumanizeDuration(time.Since(start))). Str("size", utils.FormatBytes(size)). Msg("done") @@ -415,7 +412,6 @@ func (b *Backuper) downloadConfigData(ctx context.Context, remoteBackup storage. } func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup storage.Backup, prefix string) (uint64, error) { - logger := log.With().Str("logger", "downloadBackupRelatedDir").Logger() archiveFile := fmt.Sprintf("%s.%s", prefix, b.cfg.GetArchiveExtension()) remoteFile := path.Join(remoteBackup.BackupName, archiveFile) if b.resume { @@ -426,7 +422,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st localDir := path.Join(b.DefaultDataPath, "backup", remoteBackup.BackupName, prefix) remoteFileInfo, err := b.dst.StatFile(ctx, remoteFile) if err != nil { - logger.Debug().Msgf("%s not exists on remote storage, skip download", remoteFile) + log.Debug().Msgf("%s not exists on remote storage, skip download", remoteFile) return 0, nil } retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -443,7 +439,6 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st } func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata) error { - logger := log.With().Str("logger", "downloadTableData").Logger() dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) s := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) @@ -456,7 +451,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. capacity += len(table.Files[disk]) downloadOffset[disk] = 0 } - logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) breakByErrorArchive: for common.SumMapValuesInt(downloadOffset) < capacity { for disk := range table.Files { @@ -465,7 +460,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } archiveFile := table.Files[disk][downloadOffset[disk]] if err := s.Acquire(dataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore %s archive: %v", archiveFile, err) + log.Error().Msgf("can't acquire semaphore %s archive: %v", archiveFile, err) break breakByErrorArchive } tableLocalDir := b.getLocalBackupDataPathForTable(remoteBackup.BackupName, disk, dbAndTableDir) @@ -473,7 +468,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. tableRemoteFile := path.Join(remoteBackup.BackupName, "shadow", common.TablePathEncode(table.Database), common.TablePathEncode(table.Table), archiveFile) g.Go(func() error { defer s.Release(1) - logger.Debug().Msgf("start download %s", tableRemoteFile) + log.Debug().Msgf("start download %s", tableRemoteFile) if b.resume && b.resumableState.IsAlreadyProcessedBool(tableRemoteFile) { return nil } @@ -487,7 +482,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(tableRemoteFile, 0) } - logger.Debug().Msgf("finish download %s", tableRemoteFile) + log.Debug().Msgf("finish download %s", tableRemoteFile) return nil }) } @@ -497,7 +492,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. for disk := range table.Parts { capacity += len(table.Parts[disk]) } - logger.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) breakByErrorDirectory: for disk, parts := range table.Parts { @@ -513,13 +508,13 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } partRemotePath := path.Join(tableRemotePath, part.Name) if err := s.Acquire(dataCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore %s directory: %v", partRemotePath, err) + log.Error().Msgf("can't acquire semaphore %s directory: %v", partRemotePath, err) break breakByErrorDirectory } partLocalPath := path.Join(tableLocalPath, part.Name) g.Go(func() error { defer s.Release(1) - logger.Debug().Msgf("start %s -> %s", partRemotePath, partLocalPath) + log.Debug().Msgf("start %s -> %s", partRemotePath, partLocalPath) if b.resume && b.resumableState.IsAlreadyProcessedBool(partRemotePath) { return nil } @@ -529,7 +524,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(partRemotePath, 0) } - logger.Debug().Msgf("finish %s -> %s", partRemotePath, partLocalPath) + log.Debug().Msgf("finish %s -> %s", partRemotePath, partLocalPath) return nil }) } @@ -550,8 +545,10 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string) error { - logger := log.With().Str("operation", "downloadDiffParts").Logger() - logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("start") + log.Debug(). + Str("operation", "downloadDiffParts"). + Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)). + Msg("start") start := time.Now() downloadedDiffParts := uint32(0) s := semaphore.NewWeighted(int64(b.cfg.General.DownloadConcurrency)) @@ -577,14 +574,14 @@ breakByError: } if err != nil && os.IsNotExist(err) { if err := s.Acquire(downloadDiffCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during downloadDiffParts: %v", err) + log.Error().Msgf("can't acquire semaphore during downloadDiffParts: %v", err) break breakByError } partForDownload := part diskForDownload := disk downloadDiffGroup.Go(func() error { defer s.Release(1) - tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload, logger) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload) if err != nil { return err } @@ -631,22 +628,26 @@ breakByError: if err := downloadDiffGroup.Wait(); err != nil { return fmt.Errorf("one of downloadDiffParts go-routine return error: %v", err) } - logger.Info().Str("duration", utils.HumanizeDuration(time.Since(start))).Str("diff_parts", strconv.Itoa(int(downloadedDiffParts))).Msg("done") + log.Info(). + Str("operation", "downloadDiffParts"). + Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Str("diff_parts", strconv.Itoa(int(downloadedDiffParts))). + Msg("done") return nil } func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLock *sync.Mutex, diffRemoteFilesCache map[string]*sync.Mutex, tableRemoteFile string, tableLocalDir string) error { - logger := log.With().Str("logger", "downloadDiffRemoteFile").Logger() diffRemoteFilesLock.Lock() namedLock, isCached := diffRemoteFilesCache[tableRemoteFile] if isCached { - logger.Debug().Msgf("wait download begin %s", tableRemoteFile) + log.Debug().Msgf("wait download begin %s", tableRemoteFile) namedLock.Lock() diffRemoteFilesLock.Unlock() namedLock.Unlock() - logger.Debug().Msgf("wait download end %s", tableRemoteFile) + log.Debug().Msgf("wait download end %s", tableRemoteFile) } else { - logger.Debug().Msgf("start download from %s", tableRemoteFile) + log.Debug().Msgf("start download from %s", tableRemoteFile) namedLock = &sync.Mutex{} diffRemoteFilesCache[tableRemoteFile] = namedLock namedLock.Lock() @@ -657,18 +658,18 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo return b.dst.DownloadCompressedStream(ctx, tableRemoteFile, tableLocalDir) }) if err != nil { - logger.Warn().Msgf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + log.Warn().Msgf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } else { // remoteFile could be a directory if err := b.dst.DownloadPath(ctx, 0, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration); err != nil { - logger.Warn().Msgf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + log.Warn().Msgf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } namedLock.Unlock() - logger.Debug().Msgf("finish download from %s", tableRemoteFile) + log.Debug().Str("tableRemoteFile", tableRemoteFile).Msgf("finish download") } return nil } @@ -687,21 +688,21 @@ func (b *Backuper) checkNewPath(newPath string, part metadata.Part) error { return nil } -func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part, logger zerolog.Logger) (map[string]string, error) { +func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part) (map[string]string, error) { var requiredTable *metadata.TableMetadata - logger.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Msg("start") requiredBackup, err := b.ReadBackupMetadataRemote(ctx, backup.RequiredBackup) if err != nil { return nil, err } - requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, logger, metadata.TableTitle{Database: table.Database, Table: table.Table}) + requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, metadata.TableTitle{Database: table.Database, Table: table.Table}) if err != nil { - logger.Warn().Msgf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) + log.Warn().Msgf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) return nil, err } // recursive find if part in RequiredBackup also Required - tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, logger, table, requiredTable, part, disk) + tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, table, requiredTable, part, disk) if found { return tableRemoteFiles, nil } @@ -742,18 +743,18 @@ func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadat return nil, fmt.Errorf("%s.%s %s not found on %s and all required backups sequence", table.Database, table.Table, part.Name, requiredBackup.BackupName) } -func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, logger zerolog.Logger, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { - logger.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Msg("start") +func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Msg("start") found := false for _, requiredParts := range requiredTable.Parts { for _, requiredPart := range requiredParts { if requiredPart.Name == part.Name { found = true if requiredPart.Required { - tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part, logger) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part) if err != nil { found = false - logger.Warn().Msgf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) + log.Warn().Msgf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) } return tableRemoteFiles, found, err } @@ -768,8 +769,7 @@ func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metada } func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (map[string]string, error, bool) { - logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}).Logger() - logger.Debug().Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}).Msg("start") tableRemoteFiles := make(map[string]string) // find same disk and part name archive if requiredBackup.DataFormat != "directory" { @@ -788,8 +788,7 @@ func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata } func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}).Logger() - logger.Debug().Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}).Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, remoteDisk, part.Name) tableRemoteFile := path.Join(tableRemotePath, "checksums.txt") @@ -797,8 +796,7 @@ func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup } func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - logger := log.With().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}).Logger() - logger.Debug().Msg("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}).Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) remoteExt := config.ArchiveExtensions[requiredBackup.DataFormat] tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, fmt.Sprintf("%s_%s.%s", remoteDisk, common.TablePathEncode(part.Name), remoteExt)) @@ -807,10 +805,9 @@ func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *m } func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metadata.BackupMetadata, tableRemoteFile string, tableRemotePath string, localDisk string, dbAndTableDir string, part metadata.Part) (string, string, error) { - logger := log.With().Str("logger", "findDiffFileExist").Logger() _, err := b.dst.StatFile(ctx, tableRemoteFile) if err != nil { - logger.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist not found") + log.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist not found") return "", "", err } if tableLocalDir, diskExists := b.DiskToPathMap[localDisk]; !diskExists { @@ -821,7 +818,7 @@ func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metada } else { tableLocalDir = path.Join(tableLocalDir, "backup", requiredBackup.BackupName, "shadow", dbAndTableDir, localDisk) } - logger.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist found") + log.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist found") return tableRemotePath, tableLocalDir, nil } } @@ -840,14 +837,13 @@ func (b *Backuper) ReadBackupMetadataRemote(ctx context.Context, backupName stri } func (b *Backuper) makePartHardlinks(exists, new string) error { - logger := log.With().Str("logger", "makePartHardlinks").Logger() ex, err := os.Open(exists) if err != nil { return err } defer func() { if err = ex.Close(); err != nil { - logger.Warn().Msgf("Can't close %s", exists) + log.Warn().Msgf("Can't close %s", exists) } }() files, err := ex.Readdirnames(-1) @@ -855,7 +851,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { return err } if err := os.MkdirAll(new, 0750); err != nil { - logger.Warn().Msgf("MkDirAll(%s) error: %v", new, err) + log.Warn().Msgf("MkDirAll(%s) error: %v", new, err) return err } for _, f := range files { @@ -865,7 +861,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { existsFInfo, existsStatErr := os.Stat(existsF) newFInfo, newStatErr := os.Stat(newF) if existsStatErr != nil || newStatErr != nil || !os.SameFile(existsFInfo, newFInfo) { - logger.Warn().Msgf("Link %s -> %s error: %v", newF, existsF, err) + log.Warn().Msgf("Link %s -> %s error: %v", newF, existsF, err) return err } } @@ -877,7 +873,6 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri if b.resume && b.resumableState.IsAlreadyProcessedBool(remoteFile) { return nil } - logger := log.With().Str("logger", "downloadSingleBackupFile").Logger() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { remoteReader, err := b.dst.GetFileReader(ctx, remoteFile) @@ -887,7 +882,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = remoteReader.Close() if err != nil { - logger.Warn().Msgf("can't close remoteReader %s", remoteFile) + log.Warn().Msgf("can't close remoteReader %s", remoteFile) } }() localWriter, err := os.OpenFile(localFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640) @@ -898,7 +893,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = localWriter.Close() if err != nil { - logger.Warn().Msgf("can't close localWriter %s", localFile) + log.Warn().Msgf("can't close localWriter %s", localFile) } }() diff --git a/pkg/backup/list.go b/pkg/backup/list.go index d490a090..4caa5dab 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -35,7 +35,6 @@ func (b *Backuper) List(what, format string) error { return nil } func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) error { - logger := log.With().Str("logger", "printBackupsRemote").Logger() switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -73,7 +72,7 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, uploadDate, "remote", required, description); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } default: @@ -83,7 +82,6 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) } func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBackup, format string) error { - logger := log.With().Str("logger", "printBackupsLocal").Logger() switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -122,7 +120,7 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, creationDate, "local", required, description); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } } @@ -134,7 +132,6 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku // PrintLocalBackups - print all backups stored locally func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { - logger := log.With().Str("logger", "PrintLocalBackups").Logger() if !b.ch.IsOpen { if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) @@ -144,7 +141,7 @@ func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, _, err := b.GetLocalBackups(ctx, nil) @@ -163,7 +160,6 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) } defer b.ch.Close() } - logger := log.With().Str("logger", "GetLocalBackups").Logger() if disks == nil { disks, err = b.ch.GetDisks(ctx) if err != nil { @@ -244,7 +240,7 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) }) } if closeErr := d.Close(); closeErr != nil { - logger.Error().Msgf("can't close %s openError: %v", backupPath, closeErr) + log.Error().Msgf("can't close %s openError: %v", backupPath, closeErr) } } } @@ -262,10 +258,9 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { } defer b.ch.Close() } - logger := log.With().Str("logger", "PrintAllBackups").Logger() defer func() { if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() localBackups, _, err := b.GetLocalBackups(ctx, nil) @@ -273,7 +268,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsLocal(ctx, w, localBackups, format); err != nil { - logger.Warn().Msgf("printBackupsLocal return error: %v", err) + log.Warn().Msgf("printBackupsLocal return error: %v", err) } if b.cfg.General.RemoteStorage != "none" { @@ -282,7 +277,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsRemote(w, remoteBackups, format); err != nil { - logger.Warn().Msgf("printBackupsRemote return error: %v", err) + log.Warn().Msgf("printBackupsRemote return error: %v", err) } } return nil @@ -296,11 +291,10 @@ func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error } defer b.ch.Close() } - logger := log.With().Str("logger", "PrintRemoteBackups").Logger() w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, err := b.GetRemoteBackups(ctx, true) @@ -392,7 +386,6 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { return fmt.Errorf("can't connect to clickhouse: %v", err) } defer b.ch.Close() - logger := log.With().Str("logger", "PrintTables").Logger() allTables, err := b.GetTables(ctx, tablePattern) if err != nil { return err @@ -412,16 +405,16 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern string) error { } if table.Skip { if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\tskip\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ",")); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } continue } if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\t\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ",")); err != nil { - logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } if err := w.Flush(); err != nil { - logger.Error().Msgf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } return nil } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 207e0405..73233488 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "github.com/Altinity/clickhouse-backup/pkg/status" - "github.com/rs/zerolog" "os" "os/exec" "path" @@ -39,10 +38,6 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return err } - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "restore", - }).Logger() doRestoreData := !schemaOnly || dataOnly if err := b.ch.Connect(); err != nil { @@ -60,7 +55,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { - logger.Warn().Msgf("%v", err) + log.Warn().Msgf("%v", err) return ErrUnknownClickhouseDataPath } backupMetafileLocalPaths := []string{path.Join(defaultDataPath, "backup", backupName, "metadata.json")} @@ -70,7 +65,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par if err == nil && embeddedBackupPath != "" { backupMetafileLocalPaths = append(backupMetafileLocalPaths, path.Join(embeddedBackupPath, backupName, "metadata.json")) } else if b.cfg.ClickHouse.UseEmbeddedBackupRestore && b.cfg.ClickHouse.EmbeddedBackupDisk == "" { - logger.Warn().Msgf("%v", err) + log.Warn().Msgf("%v", err) } else if err != nil { return err } @@ -106,7 +101,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } } if len(backupMetadata.Tables) == 0 { - logger.Warn().Msgf("'%s' doesn't contains tables for restore", backupName) + log.Warn().Msgf("'%s' doesn't contains tables for restore", backupName) if (!rbacOnly) && (!configsOnly) { return nil } @@ -129,13 +124,13 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par } if needRestart { - logger.Warn().Msgf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) + log.Warn().Msgf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) cmd, err := shellwords.Parse(b.ch.Config.RestartCommand) if err != nil { return err } ctx, cancel := context.WithTimeout(ctx, 180*time.Second) - logger.Info().Msgf("run %s", b.ch.Config.RestartCommand) + log.Info().Msgf("run %s", b.ch.Config.RestartCommand) var out []byte if len(cmd) > 1 { out, err = exec.CommandContext(ctx, cmd[0], cmd[1:]...).CombinedOutput() @@ -143,7 +138,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par out, err = exec.CommandContext(ctx, cmd[0]).CombinedOutput() } cancel() - logger.Debug().Msg(string(out)) + log.Debug().Msg(string(out)) return err } @@ -157,7 +152,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, par return err } } - logger.Info().Msg("done") + log.Info().Msg("done") return nil } @@ -215,14 +210,13 @@ func (b *Backuper) prepareRestoreDatabaseMapping(databaseMapping []string) error // restoreRBAC - copy backup_name>/rbac folder to access_data_path func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []clickhouse.Disk) error { - logger := log.With().Str("logger", "restoreRBAC").Logger() accessPath, err := b.ch.GetAccessManagementPath(ctx, nil) if err != nil { return err } if err = b.restoreBackupRelatedDir(backupName, "access", accessPath, disks); err == nil { markFile := path.Join(accessPath, "need_rebuild_lists.mark") - logger.Info().Msgf("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) + log.Info().Msgf("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) file, err := os.Create(markFile) if err != nil { return err @@ -230,7 +224,7 @@ func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []c _ = file.Close() _ = filesystemhelper.Chown(markFile, b.ch, disks, false) listFilesPattern := path.Join(accessPath, "*.list") - logger.Info().Msgf("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) + log.Info().Msgf("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) if listFiles, err := filepathx.Glob(listFilesPattern); err != nil { return err } else { @@ -257,7 +251,6 @@ func (b *Backuper) restoreConfigs(backupName string, disks []clickhouse.Disk) er } func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinationDir string, disks []clickhouse.Disk) error { - logger := log.With().Str("logger", "restoreBackupRelatedDir").Logger() defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -271,7 +264,7 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat if !info.IsDir() { return fmt.Errorf("%s is not a dir", srcBackupDir) } - logger.Debug().Msgf("copy %s -> %s", srcBackupDir, destinationDir) + log.Debug().Msgf("copy %s -> %s", srcBackupDir, destinationDir) copyOptions := recursiveCopy.Options{OnDirExists: func(src, dest string) recursiveCopy.DirExistsAction { return recursiveCopy.Merge }} @@ -294,11 +287,6 @@ func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinat // RestoreSchema - restore schemas matched by tablePattern from backupName func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern string, dropTable, ignoreDependencies bool, disks []clickhouse.Disk, isEmbedded bool) error { - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "restore", - }).Logger() - defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -339,14 +327,14 @@ func (b *Backuper) RestoreSchema(ctx context.Context, backupName, tablePattern s if len(tablesForRestore) == 0 { return fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) } - if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, logger); dropErr != nil { + if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version); dropErr != nil { return dropErr } var restoreErr error if isEmbedded { restoreErr = b.restoreSchemaEmbedded(ctx, backupName, tablesForRestore) } else { - restoreErr = b.restoreSchemaRegular(tablesForRestore, version, logger) + restoreErr = b.restoreSchemaRegular(tablesForRestore, version) } if restoreErr != nil { return restoreErr @@ -360,7 +348,7 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, return b.restoreEmbedded(ctx, backupName, true, tablesForRestore, nil) } -func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int, logger zerolog.Logger) error { +func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int) error { totalRetries := len(tablesForRestore) restoreRetries := 0 isDatabaseCreated := common.EmptyMap{} @@ -389,7 +377,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i // https://github.com/Altinity/clickhouse-backup/issues/466 if b.cfg.General.RestoreSchemaOnCluster == "" && strings.Contains(schema.Query, "{uuid}") && strings.Contains(schema.Query, "Replicated") { if !strings.Contains(schema.Query, "UUID") { - logger.Warn().Msgf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") + log.Warn().Msgf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") } else { schema.Query = UUIDWithReplicatedMergeTreeRE.ReplaceAllString(schema.Query, "$1$2$3'$4'$5$4$7") } @@ -407,7 +395,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i schema.Database, schema.Table, restoreErr, restoreRetries, ) } else { - logger.Warn().Msgf( + log.Warn().Msgf( "can't create table '%s.%s': %v, will try again", schema.Database, schema.Table, restoreErr, ) } @@ -422,7 +410,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i return nil } -func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int, logger zerolog.Logger) error { +func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int) error { var dropErr error dropRetries := 0 totalRetries := len(tablesForDrop) @@ -463,7 +451,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci schema.Database, schema.Table, dropErr, dropRetries, ) } else { - logger.Warn().Msgf( + log.Warn().Msgf( "can't drop table '%s.%s': %v, will try again", schema.Database, schema.Table, dropErr, ) } @@ -481,10 +469,6 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci // RestoreData - restore data for tables matched by tablePattern from backupName func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePattern string, partitions []string, disks []clickhouse.Disk, isEmbedded bool) error { startRestore := time.Now() - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "restore", - }).Logger() defaultDataPath, err := b.ch.GetDefaultPath(disks) if err != nil { return ErrUnknownClickhouseDataPath @@ -517,16 +501,19 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, tablePatt if len(tablesForRestore) == 0 { return fmt.Errorf("no have found schemas by %s in %s", tablePattern, backupName) } - logger.Debug().Msgf("found %d tables with data in backup", len(tablesForRestore)) + log.Debug().Msgf("found %d tables with data in backup", len(tablesForRestore)) if isEmbedded { err = b.restoreDataEmbedded(ctx, backupName, tablesForRestore, partitions) } else { - err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, disks, logger) + err = b.restoreDataRegular(ctx, backupName, tablePattern, tablesForRestore, diskMap, disks) } if err != nil { return err } - logger.Info().Str("duration", utils.HumanizeDuration(time.Since(startRestore))).Msg("done") + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "restore_data", + }).Str("duration", utils.HumanizeDuration(time.Since(startRestore))).Msg("done") return nil } @@ -534,7 +521,7 @@ func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, t return b.restoreEmbedded(ctx, backupName, false, tablesForRestore, partitions) } -func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk, logger zerolog.Logger) error { +func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, tablePattern string, tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk) error { if len(b.cfg.General.RestoreDatabaseMapping) > 0 { tablePattern = b.changeTablePatternFromRestoreDatabaseMapping(tablePattern) } @@ -542,7 +529,7 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta if err != nil { return err } - disks = b.adjustDisksFromTablesWithSystemDisks(tablesForRestore, diskMap, logger, disks) + disks = b.adjustDisksFromTablesWithSystemDisks(tablesForRestore, diskMap, disks) dstTablesMap := b.prepareDstTablesMap(chTables) missingTables := b.checkMissingTables(tablesForRestore, chTables) @@ -559,7 +546,6 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta tablesForRestore[i].Database = targetDB } } - tableLog := logger.With().Str("table", fmt.Sprintf("%s.%s", dstDatabase, table.Table)).Logger() dstTable, ok := dstTablesMap[metadata.TableTitle{ Database: dstDatabase, Table: table.Table}] @@ -568,41 +554,45 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ta } // https://github.com/Altinity/clickhouse-backup/issues/529 if b.cfg.ClickHouse.RestoreAsAttach { - if err = b.restoreDataRegularByAttach(ctx, backupName, table, disks, dstTable, tableLog, tablesForRestore, i); err != nil { + if err = b.restoreDataRegularByAttach(ctx, backupName, table, disks, dstTable, tablesForRestore, i); err != nil { return err } } else { - if err = b.restoreDataRegularByParts(backupName, table, disks, dstTable, tableLog, tablesForRestore, i); err != nil { + if err = b.restoreDataRegularByParts(backupName, table, disks, dstTable, tablesForRestore, i); err != nil { return err } } // https://github.com/Altinity/clickhouse-backup/issues/529 for _, mutation := range table.Mutations { if err := b.ch.ApplyMutation(ctx, tablesForRestore[i], mutation); err != nil { - tableLog.Warn().Msgf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[i].Database, tablesForRestore[i].Table, err) + log.Warn().Msgf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[i].Database, tablesForRestore[i].Table, err) } } - tableLog.Info().Msg("done") + log.Info(). + Str("operation", "restore"). + Str("database", table.Database). + Str("table", table.Table). + Msg("done") } return nil } -func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger, tablesForRestore ListOfTables, i int) error { +func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, tablesForRestore ListOfTables, i int) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, dstTable.DataPaths, b.ch, false); err != nil { return fmt.Errorf("can't copy data to storage '%s.%s': %v", table.Database, table.Table, err) } - logger.Debug().Msg("data to 'storage' copied") + log.Debug().Msg("data to 'storage' copied") if err := b.ch.AttachTable(ctx, tablesForRestore[i]); err != nil { return fmt.Errorf("can't attach table '%s.%s': %v", tablesForRestore[i].Database, tablesForRestore[i].Table, err) } return nil } -func (b *Backuper) restoreDataRegularByParts(backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger, tablesForRestore ListOfTables, i int) error { +func (b *Backuper) restoreDataRegularByParts(backupName string, table metadata.TableMetadata, disks []clickhouse.Disk, dstTable clickhouse.Table, tablesForRestore ListOfTables, i int) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, dstTable.DataPaths, b.ch, true); err != nil { return fmt.Errorf("can't copy data to datached '%s.%s': %v", table.Database, table.Table, err) } - logger.Debug().Msg("data to 'detached' copied") + log.Debug().Msg("data to 'detached' copied") if err := b.ch.AttachDataParts(tablesForRestore[i], disks); err != nil { return fmt.Errorf("can't attach data parts for table '%s.%s': %v", tablesForRestore[i].Database, tablesForRestore[i].Table, err) } @@ -643,11 +633,11 @@ func (b *Backuper) prepareDstTablesMap(chTables []clickhouse.Table) map[metadata return dstTablesMap } -func (b *Backuper) adjustDisksFromTablesWithSystemDisks(tablesForRestore ListOfTables, diskMap map[string]string, logger zerolog.Logger, disks []clickhouse.Disk) []clickhouse.Disk { +func (b *Backuper) adjustDisksFromTablesWithSystemDisks(tablesForRestore ListOfTables, diskMap map[string]string, disks []clickhouse.Disk) []clickhouse.Disk { for _, t := range tablesForRestore { for disk := range t.Parts { if _, diskExists := diskMap[disk]; !diskExists { - logger.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will restored to %s", t.Database, t.Table, disk, diskMap["default"]) + log.Warn().Msgf("table '%s.%s' require disk '%s' that not found in clickhouse table system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will restored to %s", t.Database, t.Table, disk, diskMap["default"]) found := false for _, d := range disks { if d.Name == disk { diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index 26bf188c..093e4aaf 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -49,7 +49,6 @@ func addTableToListIfNotExistsOrEnrichQueryAndParts(tables ListOfTables, table m func getTableListByPatternLocal(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, metadataPath string, tablePattern string, dropTable bool, partitions []string) (ListOfTables, error) { result := ListOfTables{} tablePatterns := []string{"*"} - logger := log.With().Str("logger", "getTableListByPatternLocal").Logger() if tablePattern != "" { tablePatterns = strings.Split(tablePattern, ",") } @@ -110,7 +109,7 @@ func getTableListByPatternLocal(ctx context.Context, cfg *config.Config, ch *cli } dataParts, err := os.ReadDir(dataPartsPath) if err != nil { - logger.Warn().Msg(err.Error()) + log.Warn().Msg(err.Error()) } parts := map[string][]metadata.Part{ cfg.ClickHouse.EmbeddedBackupDisk: make([]metadata.Part, len(dataParts)), diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index 0168229f..7f7a84ac 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -56,10 +56,6 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if b.cfg.General.RemoteStorage == "custom" { return custom.Upload(ctx, b.cfg, backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly) } - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "upload", - }).Logger() if _, disks, err = b.getLocalBackup(ctx, backupName, nil); err != nil { return fmt.Errorf("can't find local backup: %v", err) } @@ -68,7 +64,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str } defer func() { if err := b.dst.Close(ctx); err != nil { - logger.Warn().Msgf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -81,7 +77,7 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if !b.resume { return fmt.Errorf("'%s' already exists on remote storage", backupName) } else { - logger.Warn().Msgf("'%s' already exists on remote, will try to resume upload", backupName) + log.Warn().Msgf("'%s' already exists on remote, will try to resume upload", backupName) } } } @@ -125,13 +121,13 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str compressedDataSize := int64(0) metadataSize := int64(0) - logger.Debug().Msgf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) + log.Debug().Msgf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) uploadSemaphore := semaphore.NewWeighted(int64(b.cfg.General.UploadConcurrency)) uploadGroup, uploadCtx := errgroup.WithContext(ctx) for i, table := range tablesForUpload { if err := uploadSemaphore.Acquire(uploadCtx, 1); err != nil { - logger.Error().Msgf("can't acquire semaphore during Upload table: %v", err) + log.Error().Msgf("can't acquire semaphore during Upload table: %v", err) break } start := time.Now() @@ -147,9 +143,6 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str idx := i uploadGroup.Go(func() error { defer uploadSemaphore.Release(1) - uploadLogger := logger.With(). - Str("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)). - Logger() var uploadedBytes int64 if !schemaOnly { var files map[string][]string @@ -166,8 +159,8 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str return err } atomic.AddInt64(&metadataSize, tableMetadataSize) - uploadLogger.Info(). - Str("duration", utils.HumanizeDuration(time.Since(start))). + log.Info(). + Str("table", fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table)).Str("duration", utils.HumanizeDuration(time.Since(start))). Str("size", utils.FormatBytes(uint64(uploadedBytes+tableMetadataSize))). Msg("done") return nil @@ -232,7 +225,11 @@ func (b *Backuper) Upload(backupName, diffFrom, diffFromRemote, tablePattern str if b.resume { b.resumableState.Close() } - logger.Info(). + log.Info(). + Fields(map[string]interface{}{ + "backup": backupName, + "operation": "upload", + }). Str("duration", utils.HumanizeDuration(time.Since(startUpload))). Str("size", utils.FormatBytes(uint64(compressedDataSize)+uint64(metadataSize)+uint64(len(newBackupMetadataBody))+backupMetadata.RBACSize+backupMetadata.ConfigSize)). Msg("done") @@ -248,14 +245,13 @@ func (b *Backuper) uploadSingleBackupFile(ctx context.Context, localFile, remote if b.resume && b.resumableState.IsAlreadyProcessedBool(remoteFile) { return nil } - logger := log.With().Str("logger", "uploadSingleBackupFile").Logger() f, err := os.Open(localFile) if err != nil { return fmt.Errorf("can't open %s: %v", localFile, err) } defer func() { if err := f.Close(); err != nil { - logger.Warn().Msgf("can't close %v: %v", f, err) + log.Warn().Msgf("can't close %v: %v", f, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -350,7 +346,6 @@ func (b *Backuper) getTablesForUploadDiffRemote(ctx context.Context, diffFromRem } func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, diffFrom string, diffFromRemote string) error { - logger := log.With().Str("logger", "validateUploadParams").Logger() if b.cfg.General.RemoteStorage == "none" { return fmt.Errorf("general->remote_storage shall not be \"none\" for upload, change you config or use REMOTE_STORAGE environment variable") } @@ -371,7 +366,7 @@ func (b *Backuper) validateUploadParams(ctx context.Context, backupName string, return fmt.Errorf("%s->`compression_format`=%s incompatible with general->upload_by_part=%v", b.cfg.General.RemoteStorage, b.cfg.GetCompressionFormat(), b.cfg.General.UploadByPart) } if (diffFrom != "" || diffFromRemote != "") && b.cfg.ClickHouse.UseEmbeddedBackupRestore { - logger.Warn().Msgf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") + log.Warn().Msgf("--diff-from and --diff-from-remote not compatible with backups created with `use_embedded_backup_restore: true`") } if b.cfg.General.RemoteStorage == "custom" && b.resume { return fmt.Errorf("can't resume for `remote_storage: custom`") @@ -597,7 +592,6 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s return processedSize, nil } } - logger := log.With().Str("logger", "uploadTableMetadataEmbedded").Logger() localTableMetaFile := path.Join(b.EmbeddedBackupDataPath, backupName, "metadata", common.TablePathEncode(tableMetadata.Database), fmt.Sprintf("%s.sql", common.TablePathEncode(tableMetadata.Table))) localReader, err := os.Open(localTableMetaFile) if err != nil { @@ -605,7 +599,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } defer func() { if err := localReader.Close(); err != nil { - logger.Warn().Msgf("can't close %v: %v", localReader, err) + log.Warn().Msgf("can't close %v: %v", localReader, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -626,7 +620,6 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTable *metadata.TableMetadata, newTable *metadata.TableMetadata, checkLocal bool) { - logger := log.With().Str("logger", "markDuplicatedParts").Logger() for disk, newParts := range newTable.Parts { if _, diskExists := existsTable.Parts[disk]; diskExists { if len(existsTable.Parts[disk]) == 0 { @@ -646,7 +639,7 @@ func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTa newPath := path.Join(b.DiskToPathMap[disk], "backup", backup.BackupName, "shadow", dbAndTablePath, disk, newParts[i].Name) if err := filesystemhelper.IsDuplicatedParts(existsPath, newPath); err != nil { - logger.Debug().Msgf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) + log.Debug().Msgf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) continue } } @@ -706,7 +699,6 @@ func (b *Backuper) splitPartFiles(basePath string, parts []metadata.Part) ([]met } func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - logger := log.With().Str("logger", "splitFilesByName").Logger() result := make([]metadata.SplitPartFiles, 0) for i := range parts { if parts[i].Required { @@ -726,7 +718,7 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - logger.Warn().Msgf("filepath.Walk return error: %v", err) + log.Warn().Msgf("filepath.Walk return error: %v", err) } result = append(result, metadata.SplitPartFiles{ Prefix: parts[i].Name, @@ -737,7 +729,6 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m } func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - logger := log.With().Str("logger", "splitFilesBySize").Logger() var size int64 var files []string maxSize := b.cfg.General.MaxFileSize @@ -770,7 +761,7 @@ func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - logger.Warn().Msgf("filepath.Walk return error: %v", err) + log.Warn().Msgf("filepath.Walk return error: %v", err) } } if len(files) > 0 { diff --git a/pkg/backup/watch.go b/pkg/backup/watch.go index e8c51b45..6a13e4f3 100644 --- a/pkg/backup/watch.go +++ b/pkg/backup/watch.go @@ -108,10 +108,6 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } } backupName, err := b.NewBackupWatchName(ctx, backupType) - logger := log.With().Fields(map[string]interface{}{ - "backup": backupName, - "operation": "watch", - }).Logger() if err != nil { return err } @@ -130,14 +126,20 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t } else { createRemoteErr = b.CreateToRemote(backupName, "", diffFromRemote, tablePattern, partitions, schemaOnly, rbac, backupConfig, skipCheckPartsColumns, false, version, commandId) if createRemoteErr != nil { - logger.Error().Msgf("create_remote %s return error: %v", backupName, createRemoteErr) + log.Error().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "watch", + }).Msgf("create_remote %s return error: %v", backupName, createRemoteErr) createRemoteErrCount += 1 } else { createRemoteErrCount = 0 } deleteLocalErr = b.RemoveBackupLocal(ctx, backupName, nil) if deleteLocalErr != nil { - logger.Error().Msgf("delete local %s return error: %v", backupName, deleteLocalErr) + log.Error().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "watch", + }).Msgf("delete local %s return error: %v", backupName, deleteLocalErr) deleteLocalErrCount += 1 } else { deleteLocalErrCount = 0 diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index bcce072d..b1ea8860 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -21,6 +21,7 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/metadata" "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" + "github.com/rs/zerolog/log" ) // ClickHouse - provide @@ -28,7 +29,6 @@ type ClickHouse struct { Config *config.ClickHouseConfig conn driver.Conn disks []Disk - Logger zerolog.Logger version int isPartsColumnPresent int8 IsOpen bool @@ -38,7 +38,7 @@ type ClickHouse struct { func (ch *ClickHouse) Connect() error { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Logger.Error().Msgf("close previous connection error: %v", err) + log.Error().Msgf("close previous connection error: %v", err) } } ch.IsOpen = false @@ -78,7 +78,7 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCert != "" || ch.Config.TLSKey != "" { cert, err := tls.LoadX509KeyPair(ch.Config.TLSCert, ch.Config.TLSKey) if err != nil { - ch.Logger.Error().Msgf("tls.LoadX509KeyPair error: %v", err) + log.Error().Msgf("tls.LoadX509KeyPair error: %v", err) return err } tlsConfig.Certificates = []tls.Certificate{cert} @@ -86,12 +86,12 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCa != "" { caCert, err := os.ReadFile(ch.Config.TLSCa) if err != nil { - ch.Logger.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) + log.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) return err } caCertPool := x509.NewCertPool() if caCertPool.AppendCertsFromPEM(caCert) != true { - ch.Logger.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) + log.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) return fmt.Errorf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) } tlsConfig.RootCAs = caCertPool @@ -104,18 +104,18 @@ func (ch *ClickHouse) Connect() error { } if ch.conn, err = clickhouse.Open(opt); err != nil { - ch.Logger.Error().Msgf("clickhouse connection: %s, clickhouse.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Error().Msgf("clickhouse connection: %s, clickhouse.Open return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } - logFunc := ch.Logger.Info() + logFunc := log.Info() if !ch.Config.LogSQLQueries { - logFunc = ch.Logger.Debug() + logFunc = log.Debug() } logFunc.Stack().Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v?timeout=%v", ch.Config.Host, ch.Config.Port, ch.Config.Timeout)) err = ch.conn.Ping(context.Background()) if err != nil { - ch.Logger.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) return err } else { ch.IsOpen = true @@ -259,13 +259,13 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro func (ch *ClickHouse) Close() { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Logger.Warn().Msgf("can't close clickhouse connection: %v", err) + log.Warn().Msgf("can't close clickhouse connection: %v", err) } } if ch.Config.LogSQLQueries { - ch.Logger.Info().Msg("clickhouse connection closed") + log.Info().Msg("clickhouse connection closed") } else { - ch.Logger.Debug().Msg("clickhouse connection closed") + log.Debug().Msg("clickhouse connection closed") } ch.IsOpen = false } @@ -461,7 +461,7 @@ func (ch *ClickHouse) GetDatabases(ctx context.Context, cfg *config.Config, tabl var result string // 19.4 doesn't have /var/lib/clickhouse/metadata/default.sql if err := ch.SelectSingleRow(ctx, &result, showDatabaseSQL); err != nil { - ch.Logger.Warn().Msgf("can't get create database query: %v", err) + log.Warn().Msgf("can't get create database query: %v", err) allDatabases[i].Query = fmt.Sprintf("CREATE DATABASE `%s` ENGINE = %s", db.Name, db.Engine) } else { // 23.3+ masked secrets https://github.com/Altinity/clickhouse-backup/issues/640 @@ -486,7 +486,7 @@ func (ch *ClickHouse) getTableSizeFromParts(ctx context.Context, table Table) ui } query := fmt.Sprintf("SELECT sum(bytes_on_disk) as size FROM system.parts WHERE active AND database='%s' AND table='%s' GROUP BY database, table", table.Database, table.Name) if err := ch.SelectContext(ctx, &tablesSize, query); err != nil { - ch.Logger.Warn().Msgf("error parsing tablesSize: %v", err) + log.Warn().Msgf("error parsing tablesSize: %v", err) } if len(tablesSize) > 0 { return tablesSize[0].Size @@ -517,7 +517,7 @@ func (ch *ClickHouse) fixVariousVersions(ctx context.Context, t Table, metadataP if strings.Contains(t.CreateTableQuery, "'[HIDDEN]'") { tableSQLPath := path.Join(metadataPath, common.TablePathEncode(t.Database), common.TablePathEncode(t.Name)+".sql") if attachSQL, err := os.ReadFile(tableSQLPath); err != nil { - ch.Logger.Warn().Msgf("can't read %s: %v", tableSQLPath, err) + log.Warn().Msgf("can't read %s: %v", tableSQLPath, err) } else { t.CreateTableQuery = strings.Replace(string(attachSQL), "ATTACH", "CREATE", 1) t.CreateTableQuery = strings.Replace(t.CreateTableQuery, " _ ", " `"+t.Database+"`.`"+t.Name+"` ", 1) @@ -536,7 +536,7 @@ func (ch *ClickHouse) GetVersion(ctx context.Context) (int, error) { var err error query := "SELECT value FROM `system`.`build_options` where name='VERSION_INTEGER'" if err = ch.SelectSingleRow(ctx, &result, query); err != nil { - ch.Logger.Warn().Msgf("can't get ClickHouse version: %v", err) + log.Warn().Msgf("can't get ClickHouse version: %v", err) return 0, nil } ch.version, err = strconv.Atoi(result) @@ -567,7 +567,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name withNameQuery = fmt.Sprintf("WITH NAME '%s'", name) } for _, item := range partitions { - ch.Logger.Debug().Msgf(" partition '%v'", item.PartitionID) + log.Debug().Msgf(" partition '%v'", item.PartitionID) query := fmt.Sprintf( "ALTER TABLE `%v`.`%v` FREEZE PARTITION ID '%v' %s;", table.Database, @@ -585,7 +585,7 @@ func (ch *ClickHouse) FreezeTableOldWay(ctx context.Context, table *Table, name } if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Logger.Warn().Msgf("can't freeze partition: %v", err) + log.Warn().Msgf("can't freeze partition: %v", err) } else { return fmt.Errorf("can't freeze partition '%s': %w", item.PartitionID, err) } @@ -604,9 +604,9 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string if strings.HasPrefix(table.Engine, "Replicated") && ch.Config.SyncReplicatedTables { query := fmt.Sprintf("SYSTEM SYNC REPLICA `%s`.`%s`;", table.Database, table.Name) if err := ch.QueryContext(ctx, query); err != nil { - ch.Logger.Warn().Msgf("can't sync replica: %v", err) + log.Warn().Msgf("can't sync replica: %v", err) } else { - ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") } } if version < 19001005 || ch.Config.FreezeByPart { @@ -619,7 +619,7 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string query := fmt.Sprintf("ALTER TABLE `%s`.`%s` FREEZE %s;", table.Database, table.Name, withNameQuery) if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Logger.Warn().Msgf("can't freeze table: %v", err) + log.Warn().Msgf("can't freeze table: %v", err) return nil } return fmt.Errorf("can't freeze table: %v", err) @@ -643,7 +643,7 @@ func (ch *ClickHouse) AttachDataParts(table metadata.TableMetadata, disks []Disk if err := ch.Query(query); err != nil { return err } - ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk.Name).Str("part", part.Name).Msg("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk.Name).Str("part", part.Name).Msg("attached") } } } @@ -656,7 +656,7 @@ var uuidRE = regexp.MustCompile(`UUID '([^']+)'`) // AttachTable - execute ATTACH TABLE command for specific table func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetadata) error { if len(table.Parts) == 0 { - ch.Logger.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) + log.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) return nil } canContinue, err := ch.CheckReplicationInProgress(table) @@ -703,7 +703,7 @@ func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetad return err } - ch.Logger.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") return nil } func (ch *ClickHouse) ShowCreateTable(ctx context.Context, database, name string) string { @@ -834,7 +834,7 @@ func (ch *ClickHouse) CreateTable(table Table, query string, dropTable, ignoreDe if onCluster != "" && distributedRE.MatchString(query) { matches := distributedRE.FindAllStringSubmatch(query, -1) if onCluster != strings.Trim(matches[0][2], "'\" ") { - ch.Logger.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) + log.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) query = distributedRE.ReplaceAllString(query, fmt.Sprintf("${1}(%s,${3})", onCluster)) } } @@ -857,7 +857,7 @@ func (ch *ClickHouse) IsClickhouseShadow(path string) bool { } defer func() { if err := d.Close(); err != nil { - ch.Logger.Warn().Msgf("can't close directory %v", err) + log.Warn().Msgf("can't close directory %v", err) } }() names, err := d.Readdirnames(-1) @@ -910,9 +910,9 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { var logF *zerolog.Event if !ch.Config.LogSQLQueries { - logF = ch.Logger.Debug() + logF = log.Debug() } else { - logF = ch.Logger.Info() + logF = log.Info() } if len(args) > 0 { logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) @@ -1049,10 +1049,10 @@ func (ch *ClickHouse) CheckReplicationInProgress(table metadata.TableMetadata) ( return false, fmt.Errorf("invalid result for check exists replicas: %+v", existsReplicas) } if existsReplicas[0].InProgress > 0 { - ch.Logger.Warn().Msgf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) + log.Warn().Msgf("%s.%s skipped cause system.replicas entry already exists and replication in progress from another replica", table.Database, table.Table) return false, nil } else { - ch.Logger.Info().Msgf("replication_in_progress status = %+v", existsReplicas) + log.Info().Msgf("replication_in_progress status = %+v", existsReplicas) } } return true, nil @@ -1089,7 +1089,7 @@ func (ch *ClickHouse) CheckSystemPartsColumns(ctx context.Context, table *Table) } if len(isPartsColumnsInconsistent) > 0 { for i := range isPartsColumnsInconsistent { - ch.Logger.Error().Msgf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) + log.Error().Msgf("`%s`.`%s` have inconsistent data types %#v for \"%s\" column", table.Database, table.Name, isPartsColumnsInconsistent[i].Types, isPartsColumnsInconsistent[i].Column) } return fmt.Errorf("`%s`.`%s` have inconsistent data types for active data part in system.parts_columns", table.Database, table.Name) } diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 2321cea5..d71662fd 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -119,12 +119,11 @@ func MkdirAll(path string, ch *clickhouse.ClickHouse, disks []clickhouse.Disk) e // HardlinkBackupPartsToStorage - copy parts for specific table to detached folder func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableMetadata, disks []clickhouse.Disk, tableDataPaths []string, ch *clickhouse.ClickHouse, toDetached bool) error { dstDataPaths := clickhouse.GetDisksByPaths(disks, tableDataPaths) - logger := log.With().Fields(map[string]interface{}{"operation": "HardlinkBackupPartsToStorage"}).Logger() start := time.Now() for _, backupDisk := range disks { backupDiskName := backupDisk.Name if len(backupTable.Parts[backupDiskName]) == 0 { - logger.Debug().Msgf("%s disk have no parts", backupDisk.Name) + log.Debug().Msgf("%s disk have no parts", backupDisk.Name) continue } dstParentDir := dstDataPaths[backupDiskName] @@ -136,9 +135,9 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM info, err := os.Stat(dstPartPath) if err != nil { if os.IsNotExist(err) { - logger.Debug().Msgf("MkDirAll %s", dstPartPath) + log.Debug().Msgf("MkDirAll %s", dstPartPath) if mkdirErr := MkdirAll(dstPartPath, ch, disks); mkdirErr != nil { - logger.Warn().Msgf("error during Mkdir %+v", mkdirErr) + log.Warn().Msgf("error during Mkdir %+v", mkdirErr) } } else { return err @@ -159,14 +158,14 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM filename := strings.Trim(strings.TrimPrefix(filePath, partPath), "/") dstFilePath := filepath.Join(dstPartPath, filename) if info.IsDir() { - logger.Debug().Msgf("MkDir %s", dstFilePath) + log.Debug().Msgf("MkDir %s", dstFilePath) return Mkdir(dstFilePath, ch, disks) } if !info.Mode().IsRegular() { - logger.Debug().Msgf("'%s' is not a regular file, skipping.", filePath) + log.Debug().Msgf("'%s' is not a regular file, skipping.", filePath) return nil } - logger.Debug().Msgf("Link %s -> %s", filePath, dstFilePath) + log.Debug().Msgf("Link %s -> %s", filePath, dstFilePath) if err := os.Link(filePath, dstFilePath); err != nil { if !os.IsExist(err) { return fmt.Errorf("failed to create hard link '%s' -> '%s': %w", filePath, dstFilePath, err) @@ -178,7 +177,7 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM } } } - logger.Debug().Str("duration", utils.HumanizeDuration(time.Since(start))).Msg("done") + log.Debug().Str("duration", utils.HumanizeDuration(time.Since(start))).Msg("done") return nil } @@ -194,7 +193,6 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa } func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap) ([]metadata.Part, int64, error) { - logger := log.With().Str("logger", "MoveShadow").Logger() size := int64(0) parts := make([]metadata.Part, 0) err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error { @@ -221,7 +219,7 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E return os.MkdirAll(dstFilePath, 0750) } if !info.Mode().IsRegular() { - logger.Debug().Msgf("'%s' is not a regular file, skipping", filePath) + log.Debug().Msgf("'%s' is not a regular file, skipping", filePath) return nil } size += info.Size() @@ -231,14 +229,13 @@ func MoveShadow(shadowPath, backupPartsPath string, partitionsBackupMap common.E } func IsDuplicatedParts(part1, part2 string) error { - logger := log.With().Str("logger", "IsDuplicatedParts").Logger() p1, err := os.Open(part1) if err != nil { return err } defer func() { if err = p1.Close(); err != nil { - logger.Warn().Msgf("Can't close %s", part1) + log.Warn().Msgf("Can't close %s", part1) } }() p2, err := os.Open(part2) @@ -247,7 +244,7 @@ func IsDuplicatedParts(part1, part2 string) error { } defer func() { if err = p2.Close(); err != nil { - logger.Warn().Msgf("Can't close %s", part2) + log.Warn().Msgf("Can't close %s", part2) } }() pf1, err := p1.Readdirnames(-1) diff --git a/pkg/metadata/load.go b/pkg/metadata/load.go index f996da92..2e18cf4d 100644 --- a/pkg/metadata/load.go +++ b/pkg/metadata/load.go @@ -7,7 +7,6 @@ import ( ) func (tm *TableMetadata) Load(location string) (uint64, error) { - logger := log.With().Str("logger", "metadata.Load").Logger() data, err := os.ReadFile(location) if err != nil { return 0, err @@ -15,6 +14,6 @@ func (tm *TableMetadata) Load(location string) (uint64, error) { if err := json.Unmarshal(data, tm); err != nil { return 0, err } - logger.Debug().Msgf("success %s", location) + log.Debug().Str("operation", "TableMetadata.Load").Msgf("success %s", location) return uint64(len(data)), nil } diff --git a/pkg/resumable/state.go b/pkg/resumable/state.go index d69819a5..5d8f52d9 100644 --- a/pkg/resumable/state.go +++ b/pkg/resumable/state.go @@ -9,7 +9,6 @@ import ( "strings" "sync" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -17,7 +16,6 @@ type State struct { stateFile string currentState string params map[string]interface{} - logger zerolog.Logger fp *os.File mx *sync.RWMutex } @@ -27,11 +25,10 @@ func NewState(defaultDiskPath, backupName, command string, params map[string]int stateFile: path.Join(defaultDiskPath, "backup", backupName, fmt.Sprintf("%s.state", command)), currentState: "", mx: &sync.RWMutex{}, - logger: log.Logger.With().Str("logger", "resumable").Logger(), } fp, err := os.OpenFile(s.stateFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) if err != nil { - s.logger.Warn().Msgf("can't open %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't open %s error: %v", s.stateFile, err) } s.fp = fp s.LoadState() @@ -57,7 +54,7 @@ func (s *State) LoadParams() { //size 0 during write lines[0] = strings.TrimSuffix(lines[0], ":0") if err := json.Unmarshal([]byte(lines[0]), &s.params); err != nil { - s.logger.Error().Msgf("can't parse state file line 0 as []interface{}: %s", lines[0]) + log.Error().Msgf("can't parse state file line 0 as []interface{}: %s", lines[0]) } } @@ -69,9 +66,9 @@ func (s *State) LoadState() { } else { s.currentState = "" if !os.IsNotExist(err) { - s.logger.Warn().Msgf("can't read %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't read %s error: %v", s.stateFile, err) } else { - s.logger.Warn().Msgf("%s empty, will continue from scratch error: %v", s.stateFile, err) + log.Warn().Msgf("%s empty, will continue from scratch error: %v", s.stateFile, err) } } s.mx.Unlock() @@ -83,11 +80,11 @@ func (s *State) AppendToState(path string, size int64) { if s.fp != nil { _, err := s.fp.WriteString(path + "\n") if err != nil { - s.logger.Warn().Msgf("can't write %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't write %s error: %v", s.stateFile, err) } err = s.fp.Sync() if err != nil { - s.logger.Warn().Msgf("can't sync %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't sync %s error: %v", s.stateFile, err) } } s.currentState += path + "\n" @@ -105,12 +102,12 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) { res := strings.Index(s.currentState, path+":") if res >= 0 { // s.logger is non thread-safe https://github.com/rs/zerolog/issues/242 - s.logger.Info().Msgf("%s already processed", path) + log.Info().Msgf("%s already processed", path) sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")] sSize = sSize[strings.Index(sSize, ":")+1:] size, err = strconv.ParseInt(sSize, 10, 64) if err != nil { - s.logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) + log.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) } } s.mx.RUnlock() diff --git a/pkg/server/metrics/metrics.go b/pkg/server/metrics/metrics.go index 062656a6..ecbca8bf 100644 --- a/pkg/server/metrics/metrics.go +++ b/pkg/server/metrics/metrics.go @@ -2,7 +2,6 @@ package metrics import ( "fmt" - "github.com/rs/zerolog" "time" "github.com/prometheus/client_golang/prometheus" @@ -34,7 +33,6 @@ type APIMetrics struct { NumberBackupsLocalExpected prometheus.Gauge SubCommands map[string][]string - logger zerolog.Logger } func NewAPIMetrics() *APIMetrics { @@ -43,7 +41,6 @@ func NewAPIMetrics() *APIMetrics { "create_remote": {"create", "upload"}, "restore_remote": {"download", "restore"}, }, - logger: log.With().Str("logger", "metrics").Logger(), } return metrics } @@ -175,7 +172,7 @@ func (m *APIMetrics) Start(command string, startTime time.Time) { } } } else { - m.logger.Warn().Msgf("%s not found in LastStart metrics", command) + log.Warn().Msgf("%s not found in LastStart metrics", command) } } func (m *APIMetrics) Finish(command string, startTime time.Time) { @@ -191,19 +188,19 @@ func (m *APIMetrics) Finish(command string, startTime time.Time) { } } } else { - m.logger.Warn().Msgf("%s not found in LastFinish", command) + log.Warn().Msgf("%s not found in LastFinish", command) } } func (m *APIMetrics) Success(command string) { if _, exists := m.SuccessfulCounter[command]; exists { m.SuccessfulCounter[command].Inc() } else { - m.logger.Warn().Msgf("%s not found in SuccessfulCounter metrics", command) + log.Warn().Msgf("%s not found in SuccessfulCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(1) } else { - m.logger.Warn().Msgf("%s not found in LastStatus metrics", command) + log.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -211,12 +208,12 @@ func (m *APIMetrics) Failure(command string) { if _, exists := m.FailedCounter[command]; exists { m.FailedCounter[command].Inc() } else { - m.logger.Warn().Msgf("%s not found in FailedCounter metrics", command) + log.Warn().Msgf("%s not found in FailedCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(0) } else { - m.logger.Warn().Msgf("%s not found in LastStatus metrics", command) + log.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -226,7 +223,7 @@ func (m *APIMetrics) ExecuteWithMetrics(command string, errCounter int, f func() err := f() m.Finish(command, startTime) if err != nil { - m.logger.Error().Msgf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) + log.Error().Msgf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) errCounter += 1 m.Failure(command) } else { diff --git a/pkg/server/server.go b/pkg/server/server.go index bf455402..0e4d1d16 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -6,7 +6,6 @@ import ( "encoding/json" "errors" "fmt" - "github.com/rs/zerolog" "io" "net/http" "net/http/pprof" @@ -44,7 +43,6 @@ type APIServer struct { server *http.Server restart chan struct{} metrics *metrics.APIMetrics - log zerolog.Logger routes []string clickhouseBackupVersion string } @@ -55,25 +53,23 @@ var ( // Run - expose CLI commands as REST API func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBackupVersion string) error { - logger := log.With().Str("logger", "server.Run").Logger() var ( cfg *config.Config err error ) - logger.Debug().Msg("Wait for ClickHouse") + log.Debug().Msg("Wait for ClickHouse") for { cfg, err = config.LoadConfig(configPath) if err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() time.Sleep(5 * time.Second) continue } ch := clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() time.Sleep(5 * time.Second) continue } @@ -88,16 +84,15 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack restart: make(chan struct{}), clickhouseBackupVersion: clickhouseBackupVersion, metrics: metrics.NewAPIMetrics(), - log: log.With().Str("logger", "server").Logger(), } if cfg.API.CreateIntegrationTables { if err := api.CreateIntegrationTables(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() } } api.metrics.RegisterMetrics() - logger.Info().Msgf("Starting API server on %s", api.config.API.ListenAddr) + log.Info().Msgf("Starting API server on %s", api.config.API.ListenAddr) sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, os.Interrupt, syscall.SIGTERM) sighup := make(chan os.Signal, 1) @@ -108,14 +103,14 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack if api.config.API.CompleteResumableAfterRestart { go func() { if err := api.ResumeOperationsAfterRestart(); err != nil { - logger.Error().Msgf("ResumeOperationsAfterRestart return error: %v", err) + log.Error().Msgf("ResumeOperationsAfterRestart return error: %v", err) } }() } go func() { if err := api.UpdateBackupMetrics(context.Background(), false); err != nil { - logger.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() @@ -127,18 +122,18 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack select { case <-api.restart: if err := api.Restart(); err != nil { - logger.Error().Msgf("Failed to restarting API server: %v", err) + log.Error().Msgf("Failed to restarting API server: %v", err) continue } - logger.Info().Msgf("Reloaded by HTTP") + log.Info().Msgf("Reloaded by HTTP") case <-sighup: if err := api.Restart(); err != nil { - logger.Error().Msgf("Failed to restarting API server: %v", err) + log.Error().Msgf("Failed to restarting API server: %v", err) continue } - logger.Info().Msg("Reloaded by SIGHUP") + log.Info().Msg("Reloaded by SIGHUP") case <-sigterm: - logger.Info().Msg("Stopping API server") + log.Info().Msg("Stopping API server") return api.Stop() } } @@ -149,7 +144,7 @@ func (api *APIServer) GetMetrics() *metrics.APIMetrics { } func (api *APIServer) RunWatch(cliCtx *cli.Context) { - api.log.Info().Msg("Starting API Server in watch mode") + log.Info().Msg("Starting API Server in watch mode") b := backup.NewBackuper(api.config) commandId, _ := status.Current.Start("watch") err := b.Watch( @@ -167,7 +162,6 @@ func (api *APIServer) Stop() error { } func (api *APIServer) Restart() error { - logger := log.With().Str("logger", "server.Restart").Logger() _, err := api.ReloadConfig(nil, "restart") if err != nil { return err @@ -183,9 +177,9 @@ func (api *APIServer) Restart() error { err = api.server.ListenAndServeTLS(api.config.API.CertificateFile, api.config.API.PrivateKeyFile) if err != nil { if err == http.ErrServerClosed { - logger.Warn().Msgf("ListenAndServeTLS get signal: %s", err.Error()) + log.Warn().Msgf("ListenAndServeTLS get signal: %s", err.Error()) } else { - logger.Fatal().Stack().Msgf("ListenAndServeTLS error: %s", err.Error()) + log.Fatal().Stack().Msgf("ListenAndServeTLS error: %s", err.Error()) } } }() @@ -194,9 +188,9 @@ func (api *APIServer) Restart() error { go func() { if err = api.server.ListenAndServe(); err != nil { if err == http.ErrServerClosed { - logger.Warn().Msgf("ListenAndServe get signal: %s", err.Error()) + log.Warn().Msgf("ListenAndServe get signal: %s", err.Error()) } else { - logger.Fatal().Stack().Msgf("ListenAndServe error: %s", err.Error()) + log.Fatal().Stack().Msgf("ListenAndServe error: %s", err.Error()) } } }() @@ -206,7 +200,6 @@ func (api *APIServer) Restart() error { // registerHTTPHandlers - resister API routes func (api *APIServer) registerHTTPHandlers() *http.Server { - logger := log.With().Str("logger", "registerHTTPHandlers").Logger() r := mux.NewRouter() r.Use(api.basicAuthMiddleware) r.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -246,7 +239,7 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { routes = append(routes, t) return nil }); err != nil { - logger.Error().Msgf("mux.Router.Walk return error: %v", err) + log.Error().Msgf("mux.Router.Walk return error: %v", err) return nil } @@ -262,9 +255,9 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/metrics" { - api.log.Info().Msgf("API call %s %s", r.Method, r.URL.Path) + log.Info().Msgf("API call %s %s", r.Method, r.URL.Path) } else { - api.log.Debug().Msgf("API call %s %s", r.Method, r.URL.Path) + log.Debug().Msgf("API call %s %s", r.Method, r.URL.Path) } user, pass, _ := r.BasicAuth() query := r.URL.Query() @@ -275,11 +268,11 @@ func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { pass = p[0] } if (user != api.config.API.Username) || (pass != api.config.API.Password) { - api.log.Warn().Msgf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) + log.Warn().Msgf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) w.Header().Set("WWW-Authenticate", "Basic realm=\"Provide username and password\"") w.WriteHeader(http.StatusUnauthorized) if _, err := w.Write([]byte("401 Unauthorized\n")); err != nil { - api.log.Error().Msgf("RequestWriter.Write return error: %v", err) + log.Error().Msgf("RequestWriter.Write return error: %v", err) } return } @@ -316,7 +309,7 @@ func (api *APIServer) actions(w http.ResponseWriter, r *http.Request) { api.writeError(w, http.StatusBadRequest, string(line), err) return } - api.log.Info().Msgf("/backup/actions call: %s", row.Command) + log.Info().Msgf("/backup/actions call: %s", row.Command) args, err := shlex.Split(row.Command) if err != nil { api.writeError(w, http.StatusBadRequest, "", err) @@ -375,14 +368,14 @@ func (api *APIServer) actionsDeleteHandler(row status.ActionRow, args []string, } go func() { if err := api.UpdateBackupMetrics(context.Background(), args[1] == "local"); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() actionsResults = append(actionsResults, actionsResultsRow{ Status: "success", Operation: row.Command, }) - api.log.Info().Msg("DELETED") + log.Info().Msg("DELETED") return actionsResults, nil } @@ -397,12 +390,12 @@ func (api *APIServer) actionsAsyncCommandsHandler(command string, args []string, }) status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("API /backup/actions error: %v", err) + log.Error().Msgf("API /backup/actions error: %v", err) return } go func() { if err := api.UpdateBackupMetrics(context.Background(), command == "create" || command == "restore"); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() }() @@ -433,7 +426,7 @@ func (api *APIServer) actionsKillHandler(row status.ActionRow, args []string, ac func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row status.ActionRow, command string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } commandId, ctx := status.Current.Start(command) @@ -445,14 +438,14 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Error().Msgf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) status.Current.Stop(commandId, err) return actionsResults, err } - api.log.Info().Msg("CLEANED") + log.Info().Msg("CLEANED") metricsErr := api.UpdateBackupMetrics(ctx, false) if metricsErr != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } status.Current.Stop(commandId, nil) actionsResults = append(actionsResults, actionsResultsRow{ @@ -464,7 +457,7 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.ActionRow, args []string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if (!api.config.API.AllowParallel && status.Current.InProgress()) || status.Current.CheckCommandInProgress(row.Command) { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } cfg, err := api.ReloadConfig(w, "watch") @@ -542,7 +535,7 @@ func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.Acti err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("Watch error: %v", err) + log.Error().Msgf("Watch error: %v", err) return } }() @@ -565,7 +558,7 @@ func (api *APIServer) actionsLog(w http.ResponseWriter, r *http.Request) { if q.Get("last") != "" { last, err = strconv.ParseInt(q.Get("last"), 10, 16) if err != nil { - api.log.Warn().Err(err).Send() + log.Warn().Err(err).Send() api.writeError(w, http.StatusInternalServerError, "actions", err) return } @@ -780,7 +773,7 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { // httpCreateHandler - create a backup func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "create", ErrAPILocked) return } @@ -836,7 +829,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "create", err) return } @@ -848,13 +841,13 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) return b.CreateBackup(backupName, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, checkPartsColumns, api.clickhouseBackupVersion, commandId) }) if err != nil { - api.log.Error().Msgf("API /backup/create error: %v", err) + log.Error().Msgf("API /backup/create error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, true); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -876,7 +869,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) // httpWatchHandler - run watch command go routine, can't run the same watch command twice func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -941,7 +934,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { } if status.Current.CheckCommandInProgress(fullCommand) { - api.log.Warn().Msgf("%s error: %v", fullCommand, ErrAPILocked) + log.Warn().Msgf("%s error: %v", fullCommand, ErrAPILocked) api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -952,7 +945,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { err := b.Watch(watchInterval, fullInterval, watchBackupNameTemplate, tablePattern, partitionsToBackup, schemaOnly, rbacOnly, configsOnly, skipCheckPartsColumns, api.clickhouseBackupVersion, commandId, api.GetMetrics(), api.cliCtx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("Watch error: %v", err) + log.Error().Msgf("Watch error: %v", err) return } }() @@ -976,7 +969,7 @@ func (api *APIServer) httpCleanHandler(w http.ResponseWriter, _ *http.Request) { err = b.Clean(ctx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("Clean error: %v", err) + log.Error().Msgf("Clean error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean", err) return } @@ -1001,14 +994,14 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Error().Msgf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean_remote_broken", err) return } err = api.UpdateBackupMetrics(ctx, false) if err != nil { - api.log.Error().Msgf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean_remote_broken", err) return } @@ -1025,7 +1018,7 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt // httpUploadHandler - upload a backup to remote storage func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "upload", ErrAPILocked) return } @@ -1073,7 +1066,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "upload", err) return } @@ -1085,13 +1078,13 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) return b.Upload(name, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) }) if err != nil { - api.log.Error().Msgf("Upload error: %v", err) + log.Error().Msgf("Upload error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, false); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -1119,7 +1112,7 @@ var databaseMappingRE = regexp.MustCompile(`[\w+]:[\w+]`) // httpRestoreHandler - restore a backup from local storage func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "restore", ErrAPILocked) return } @@ -1197,7 +1190,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "restore", err) return } @@ -1210,7 +1203,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) }) status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("API /backup/restore error: %v", err) + log.Error().Msgf("API /backup/restore error: %v", err) api.errorCallback(r.Context(), err, callback) return } @@ -1230,7 +1223,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) // httpDownloadHandler - download a backup from remote to local storage func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "download", ErrAPILocked) return } @@ -1268,7 +1261,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request callback, err := parseCallback(query) if err != nil { - api.log.Error().Err(err).Send() + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "download", err) return } @@ -1280,13 +1273,13 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request return b.Download(name, tablePattern, partitionsToBackup, schemaOnly, resume, commandId) }) if err != nil { - api.log.Error().Msgf("API /backup/download error: %v", err) + log.Error().Msgf("API /backup/download error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return } if err := api.UpdateBackupMetrics(ctx, true); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(r.Context(), err, callback) return @@ -1308,7 +1301,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request // httpDeleteHandler - delete a backup from local or remote storage func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn().Err(ErrAPILocked).Send() + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "delete", ErrAPILocked) return } @@ -1330,13 +1323,13 @@ func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) } status.Current.Stop(commandId, err) if err != nil { - api.log.Error().Msgf("delete backup error: %v", err) + log.Error().Msgf("delete backup error: %v", err) api.writeError(w, http.StatusInternalServerError, "delete", err) return } go func() { if err := api.UpdateBackupMetrics(context.Background(), vars["where"] == "local"); err != nil { - api.log.Error().Msgf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() api.sendJSONEachRow(w, http.StatusOK, struct { @@ -1368,7 +1361,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e numberBackupsRemote := 0 numberBackupsRemoteBroken := 0 - api.log.Info().Msgf("Update backup metrics start (onlyLocal=%v)", onlyLocal) + log.Info().Msgf("Update backup metrics start (onlyLocal=%v)", onlyLocal) if !api.config.API.EnableMetrics { return nil } @@ -1430,7 +1423,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e api.metrics.LastFinish["create_remote"].Set(float64(lastBackupUpload.Unix())) } } - api.log.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "duration": utils.HumanizeDuration(time.Since(startTime)), "LastBackupCreateLocal": lastBackupCreateLocal, "LastBackupCreateRemote": lastBackupCreateRemote, @@ -1469,10 +1462,9 @@ func (api *APIServer) registerMetricsHandlers(r *mux.Router, enableMetrics bool, } func (api *APIServer) CreateIntegrationTables() error { - api.log.Info().Msgf("Create integration tables") + log.Info().Msgf("Create integration tables") ch := &clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %w", err) @@ -1516,14 +1508,13 @@ func (api *APIServer) CreateIntegrationTables() error { func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*config.Config, error) { cfg, err := config.LoadConfig(api.configPath) if err != nil { - api.log.Error().Msgf("config.LoadConfig(%s) return error: %v", api.configPath, err) + log.Error().Msgf("config.LoadConfig(%s) return error: %v", api.configPath, err) if w != nil { api.writeError(w, http.StatusInternalServerError, command, err) } return nil, err } api.config = cfg - api.log = log.With().Str("logger", "server").Logger() api.metrics.NumberBackupsRemoteExpected.Set(float64(cfg.General.BackupsToKeepRemote)) api.metrics.NumberBackupsLocalExpected.Set(float64(cfg.General.BackupsToKeepLocal)) return cfg, nil @@ -1532,14 +1523,13 @@ func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*conf func (api *APIServer) ResumeOperationsAfterRestart() error { ch := clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Logger: log.With().Str("logger", "clickhouse").Logger(), } if err := ch.Connect(); err != nil { return err } defer func() { if err := ch.GetConn().Close(); err != nil { - api.log.Error().Msgf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) + log.Error().Msgf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) } }() disks, err := ch.GetDisks(context.Background()) @@ -1598,7 +1588,7 @@ func (api *APIServer) ResumeOperationsAfterRestart() error { } args = append(args, "--resumable=1", backupName) fullCommand := strings.Join(args, " ") - api.log.Info().Str("operation", "ResumeOperationsAfterRestart").Send() + log.Info().Str("operation", "ResumeOperationsAfterRestart").Send() commandId, _ := status.Current.Start(fullCommand) err, _ = api.metrics.ExecuteWithMetrics(command, 0, func() error { return api.cliApp.Run(append([]string{"clickhouse-backup", "-c", api.configPath, "--command-id", strconv.FormatInt(int64(commandId), 10)}, args...)) diff --git a/pkg/server/utils.go b/pkg/server/utils.go index 489b9f38..caa1d82b 100644 --- a/pkg/server/utils.go +++ b/pkg/server/utils.go @@ -4,18 +4,19 @@ import ( "context" "encoding/json" "fmt" + "github.com/rs/zerolog/log" "net/http" "reflect" ) func (api *APIServer) flushOutput(w http.ResponseWriter, out string) { if _, err := fmt.Fprintln(w, out); err != nil { - api.log.Warn().Msgf("can't write to http.ResponseWriter: %v", err) + log.Warn().Msgf("can't write to http.ResponseWriter: %v", err) } } func (api *APIServer) writeError(w http.ResponseWriter, statusCode int, operation string, err error) { - api.log.Error().Msgf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) + log.Error().Msgf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) w.WriteHeader(statusCode) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate") @@ -45,7 +46,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) + log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } default: @@ -53,7 +54,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) + log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } } @@ -71,7 +72,7 @@ func (api *APIServer) errorCallback(ctx context.Context, err error, callback cal Error: err.Error(), } for _, e := range callback(ctx, payload) { - api.log.Error().Err(e).Send() + log.Error().Err(e).Send() } } @@ -82,6 +83,6 @@ func (api *APIServer) successCallback(ctx context.Context, callback callbackFn) Error: "", } for _, e := range callback(ctx, payload) { - api.log.Error().Err(e).Send() + log.Error().Err(e).Send() } } diff --git a/pkg/status/status.go b/pkg/status/status.go index 366632ad..f4c0a342 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -3,13 +3,12 @@ package status import ( "context" "fmt" - "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "strings" "sync" "time" "github.com/Altinity/clickhouse-backup/pkg/common" - "github.com/rs/zerolog/log" ) const ( @@ -19,15 +18,12 @@ const ( ErrorStatus = "error" ) -var Current = &AsyncStatus{ - logger: log.With().Str("logger", "status").Logger(), -} +var Current = &AsyncStatus{} const NotFromAPI = int(-1) type AsyncStatus struct { commands []ActionRow - logger zerolog.Logger sync.RWMutex } @@ -59,7 +55,7 @@ func (status *AsyncStatus) Start(command string) (int, context.Context) { Cancel: cancel, }) lastCommandId := len(status.commands) - 1 - status.logger.Debug().Msgf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) + log.Debug().Msgf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) return lastCommandId, ctx } @@ -79,10 +75,10 @@ func (status *AsyncStatus) InProgress() bool { defer status.RUnlock() n := len(status.commands) - 1 if n < 0 { - status.logger.Debug().Msgf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) + log.Debug().Msgf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) return false } - status.logger.Debug().Msgf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus) + log.Debug().Msgf("api.status.inProgress -> status.commands[n].Status == %s, inProgress=%v", status.commands[n].Status, status.commands[n].Status == InProgressStatus) return status.commands[n].Status == InProgressStatus } @@ -118,7 +114,7 @@ func (status *AsyncStatus) Stop(commandId int, err error) { status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) status.commands[commandId].Ctx = nil status.commands[commandId].Cancel = nil - status.logger.Debug().Msgf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } func (status *AsyncStatus) Cancel(command string, err error) error { @@ -126,7 +122,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { defer status.Unlock() if len(status.commands) == 0 { err = fmt.Errorf("empty command list") - status.logger.Warn().Err(err).Send() + log.Warn().Err(err).Send() return err } commandId := -1 @@ -147,11 +143,11 @@ func (status *AsyncStatus) Cancel(command string, err error) error { } if commandId == -1 { err = fmt.Errorf("command `%s` not found", command) - status.logger.Warn().Err(err).Send() + log.Warn().Err(err).Send() return err } if status.commands[commandId].Status != InProgressStatus { - status.logger.Warn().Msgf("found `%s` with status=%s", command, status.commands[commandId].Status) + log.Warn().Msgf("found `%s` with status=%s", command, status.commands[commandId].Status) } if status.commands[commandId].Ctx != nil { status.commands[commandId].Cancel() @@ -161,7 +157,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { status.commands[commandId].Error = err.Error() status.commands[commandId].Status = CancelStatus status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.logger.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) return nil } @@ -177,7 +173,7 @@ func (status *AsyncStatus) CancelAll(cancelMsg string) { status.commands[commandId].Status = CancelStatus status.commands[commandId].Error = cancelMsg status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.logger.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } } diff --git a/pkg/storage/ftp.go b/pkg/storage/ftp.go index ee9dd14f..b0edd1f5 100644 --- a/pkg/storage/ftp.go +++ b/pkg/storage/ftp.go @@ -14,13 +14,12 @@ import ( "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/jlaffaye/ftp" "github.com/jolestar/go-commons-pool/v2" - "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) type FTP struct { clients *pool.ObjectPool Config *config.FTPConfig - Logger zerolog.Logger dirCache map[string]bool dirCacheMutex sync.RWMutex } @@ -64,21 +63,21 @@ func (f *FTP) Close(ctx context.Context) error { // getConnectionFromPool *ftp.ServerConn is not thread-safe, so we need implements connection pool func (f *FTP) getConnectionFromPool(ctx context.Context, where string) (*ftp.ServerConn, error) { - f.Logger.Debug().Msgf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + log.Debug().Msgf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) client, err := f.clients.BorrowObject(ctx) if err != nil { - f.Logger.Error().Msgf("can't BorrowObject from FTP Connection Pool: %v", err) + log.Error().Msgf("can't BorrowObject from FTP Connection Pool: %v", err) return nil, err } return client.(*ftp.ServerConn), nil } func (f *FTP) returnConnectionToPool(ctx context.Context, where string, client *ftp.ServerConn) { - f.Logger.Debug().Msgf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + log.Debug().Msgf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) if client != nil { err := f.clients.ReturnObject(ctx, client) if err != nil { - f.Logger.Error().Msgf("can't ReturnObject to FTP Connection Pool: %v", err) + log.Error().Msgf("can't ReturnObject to FTP Connection Pool: %v", err) } } } @@ -175,7 +174,7 @@ func (f *FTP) Walk(ctx context.Context, ftpPath string, recursive bool, process } func (f *FTP) GetFileReader(ctx context.Context, key string) (io.ReadCloser, error) { - f.Logger.Debug().Msgf("GetFileReader key=%s", key) + log.Debug().Msgf("GetFileReader key=%s", key) client, err := f.getConnectionFromPool(ctx, "GetFileReader") if err != nil { return nil, err @@ -194,7 +193,7 @@ func (f *FTP) GetFileReaderWithLocalPath(ctx context.Context, key, _ string) (io } func (f *FTP) PutFile(ctx context.Context, key string, r io.ReadCloser) error { - f.Logger.Debug().Msgf("PutFile key=%s", key) + log.Debug().Msgf("PutFile key=%s", key) client, err := f.getConnectionFromPool(ctx, "PutFile") defer f.returnConnectionToPool(ctx, "PutFile", client) if err != nil { @@ -239,7 +238,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.RLock() if _, exists := f.dirCache[d]; exists { f.dirCacheMutex.RUnlock() - f.Logger.Debug().Msgf("MkdirAll %s exists in dirCache", d) + log.Debug().Msgf("MkdirAll %s exists in dirCache", d) continue } f.dirCacheMutex.RUnlock() @@ -247,7 +246,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.Lock() err = client.MakeDir(d) if err != nil { - f.Logger.Warn().Msgf("MkdirAll MakeDir(%s) return error: %v", d, err) + log.Warn().Msgf("MkdirAll MakeDir(%s) return error: %v", d, err) } else { f.dirCache[d] = true } diff --git a/pkg/storage/general.go b/pkg/storage/general.go index 2899139c..24ecc957 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/rs/zerolog" "io" "os" "path" @@ -49,7 +48,6 @@ type Backup struct { type BackupDestination struct { RemoteStorage - Logger zerolog.Logger compressionFormat string compressionLevel int disableProgressBar bool @@ -67,23 +65,23 @@ func (bd *BackupDestination) RemoveOldBackups(ctx context.Context, keep int) err return err } backupsToDelete := GetBackupsToDelete(backupList, keep) - bd.Logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "duration": utils.HumanizeDuration(time.Since(start)), }).Msg("calculate backup list for delete") for _, backupToDelete := range backupsToDelete { startDelete := time.Now() if err := bd.RemoveBackup(ctx, backupToDelete); err != nil { - bd.Logger.Warn().Msgf("can't delete %s return error : %v", backupToDelete.BackupName, err) + log.Warn().Msgf("can't delete %s return error : %v", backupToDelete.BackupName, err) } - bd.Logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "location": "remote", "backup": backupToDelete.BackupName, "duration": utils.HumanizeDuration(time.Since(startDelete)), }).Msg("done") } - bd.Logger.Info().Fields(map[string]interface{}{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackups", "duration": utils.HumanizeDuration(time.Since(start)), }).Msg("done") @@ -123,17 +121,17 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) listCache := map[string]Backup{} if info, err := os.Stat(listCacheFile); os.IsNotExist(err) || info.IsDir() { - bd.Logger.Debug().Msgf("%s not found, load %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s not found, load %d elements", listCacheFile, len(listCache)) return listCache, nil } f, err := os.Open(listCacheFile) if err != nil { - bd.Logger.Warn().Msgf("can't open %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return listCache, nil } defer func() { if err := f.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() select { @@ -142,15 +140,15 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] default: body, err := io.ReadAll(f) if err != nil { - bd.Logger.Warn().Msgf("can't read %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't read %s return error %v", listCacheFile, err) return listCache, nil } if string(body) != "" { if err := json.Unmarshal(body, &listCache); err != nil { - bd.Logger.Fatal().Stack().Msgf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) + log.Fatal().Stack().Msgf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) } } - bd.Logger.Debug().Msgf("%s load %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s load %d elements", listCacheFile, len(listCache)) return listCache, nil } } @@ -159,12 +157,12 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) f, err := os.OpenFile(listCacheFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { - bd.Logger.Warn().Msgf("can't open %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return nil } defer func() { if err := f.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() for backupName := range listCache { @@ -190,15 +188,15 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma default: body, err := json.MarshalIndent(&listCache, "", "\t") if err != nil { - bd.Logger.Warn().Msgf("can't json marshal %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't json marshal %s return error %v", listCacheFile, err) return nil } _, err = f.Write(body) if err != nil { - bd.Logger.Warn().Msgf("can't write to %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't write to %s return error %v", listCacheFile, err) return nil } - bd.Logger.Debug().Msgf("%s save %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s save %d elements", listCacheFile, len(listCache)) return nil } } @@ -311,7 +309,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, return nil }) if err != nil { - bd.Logger.Warn().Msgf("BackupList bd.Walk return error: %v", err) + log.Warn().Msgf("BackupList bd.Walk return error: %v", err) } // sort by name for the same not parsed metadata.json sort.SliceStable(result, func(i, j int) bool { @@ -343,13 +341,13 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot } defer func() { if err := reader.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close GetFileReader descriptor %v", reader) + log.Warn().Msgf("can't close GetFileReader descriptor %v", reader) } switch reader.(type) { case *os.File: fileName := reader.(*os.File).Name() if err := os.Remove(fileName); err != nil { - bd.Logger.Warn().Msgf("can't remove %s", fileName) + log.Warn().Msgf("can't remove %s", fileName) } } }() @@ -361,7 +359,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot proxyReader := bar.NewProxyReader(bufReader) compressionFormat := bd.compressionFormat if !checkArchiveExtension(path.Ext(remotePath), compressionFormat) { - bd.Logger.Warn().Msgf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) + log.Warn().Msgf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) compressionFormat = strings.Replace(path.Ext(remotePath), ".", "", -1) } z, err := getArchiveReader(compressionFormat) @@ -402,7 +400,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot if err := f.Close(); err != nil { return err } - //bd.Logger.Debug().Msgf("extract %s", extractFile) + //log.Debug().Msgf("extract %s", extractFile) return nil }); err != nil { return err @@ -437,11 +435,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if writerErr != nil { if err := w.CloseWithError(writerErr); err != nil { - bd.Logger.Error().Msgf("can't close after error %v pipe writer error: %v", writerErr, err) + log.Error().Msgf("can't close after error %v pipe writer error: %v", writerErr, err) } } else { if err := w.Close(); err != nil { - bd.Logger.Error().Msgf("can't close pipe writer: %v", err) + log.Error().Msgf("can't close pipe writer: %v", err) } } }() @@ -468,7 +466,7 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc }, } archiveFiles = append(archiveFiles, file) - //bd.Logger.Debug().Msgf("add %s to archive %s", filePath, remotePath) + //log.Debug().Msgf("add %s to archive %s", filePath, remotePath) } if writerErr = z.Archive(ctx, w, archiveFiles); writerErr != nil { return writerErr @@ -479,11 +477,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if readerErr != nil { if err := body.CloseWithError(readerErr); err != nil { - bd.Logger.Error().Msgf("can't close after error %v pipe reader error: %v", writerErr, err) + log.Error().Msgf("can't close after error %v pipe reader error: %v", writerErr, err) } } else { if err := body.Close(); err != nil { - bd.Logger.Error().Msgf("can't close pipe reader: %v", err) + log.Error().Msgf("can't close pipe reader: %v", err) } } }() @@ -508,10 +506,6 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot bar = progressbar.StartNewByteBar(!bd.disableProgressBar, totalBytes) defer bar.Finish() } - logger := bd.Logger.With().Fields(map[string]interface{}{ - "path": remotePath, - "operation": "download", - }).Logger() return bd.Walk(ctx, remotePath, true, func(ctx context.Context, f RemoteFile) error { if bd.Kind() == "SFTP" && (f.Name() == "." || f.Name() == "..") { return nil @@ -520,30 +514,30 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, size int64, remot err := retry.RunCtx(ctx, func(ctx context.Context) error { r, err := bd.GetFileReader(ctx, path.Join(remotePath, f.Name())) if err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } dstFilePath := path.Join(localPath, f.Name()) dstDirPath, _ := path.Split(dstFilePath) if err := os.MkdirAll(dstDirPath, 0750); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } dst, err := os.Create(dstFilePath) if err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } if _, err := io.CopyBuffer(dst, r, nil); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } if err := dst.Close(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } if err := r.Close(); err != nil { - logger.Error().Err(err).Send() + log.Error().Err(err).Send() return err } return nil @@ -584,7 +578,7 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLoc } closeFile := func() { if err := f.Close(); err != nil { - bd.Logger.Warn().Msgf("can't close UploadPath file descriptor %v: %v", f, err) + log.Warn().Msgf("can't close UploadPath file descriptor %v: %v", f, err) } } retry := retrier.New(retrier.ConstantBackoff(RetriesOnFailure, RetriesDuration), nil) @@ -609,7 +603,6 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, size int64, baseLoc } func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, calcMaxSize bool, backupName string) (*BackupDestination, error) { - logger := log.With().Str("logger", "NewBackupDestination").Logger() var err error // https://github.com/Altinity/clickhouse-backup/issues/404 if calcMaxSize { @@ -618,7 +611,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous return nil, err } if cfg.General.MaxFileSize > 0 && cfg.General.MaxFileSize < maxFileSize { - logger.Warn().Msgf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) + log.Warn().Msgf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) } if cfg.General.MaxFileSize <= 0 || cfg.General.MaxFileSize < maxFileSize { cfg.General.MaxFileSize = maxFileSize @@ -646,7 +639,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous azblobStorage.Config.BufferSize = bufferSize return &BackupDestination{ azblobStorage, - logger.With().Str("logger", "azure").Logger(), cfg.AzureBlob.CompressionFormat, cfg.AzureBlob.CompressionLevel, cfg.General.DisableProgressBar, @@ -667,7 +659,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous Concurrency: cfg.S3.Concurrency, BufferSize: 512 * 1024, PartSize: partSize, - Logger: log.With().Str("logger", "S3").Logger(), } s3Storage.Config.Path, err = ch.ApplyMacros(ctx, s3Storage.Config.Path) if err != nil { @@ -684,7 +675,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ s3Storage, - log.With().Str("logger", "s3").Logger(), cfg.S3.CompressionFormat, cfg.S3.CompressionLevel, cfg.General.DisableProgressBar, @@ -706,7 +696,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ googleCloudStorage, - log.With().Str("logger", "gcs").Logger(), cfg.GCS.CompressionFormat, cfg.GCS.CompressionLevel, cfg.General.DisableProgressBar, @@ -719,7 +708,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ tencentStorage, - log.With().Str("logger", "cos").Logger(), cfg.COS.CompressionFormat, cfg.COS.CompressionLevel, cfg.General.DisableProgressBar, @@ -727,7 +715,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous case "ftp": ftpStorage := &FTP{ Config: &cfg.FTP, - Logger: log.With().Str("logger", "FTP").Logger(), } ftpStorage.Config.Path, err = ch.ApplyMacros(ctx, ftpStorage.Config.Path) if err != nil { @@ -735,7 +722,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ ftpStorage, - log.With().Str("logger", "FTP").Logger(), cfg.FTP.CompressionFormat, cfg.FTP.CompressionLevel, cfg.General.DisableProgressBar, @@ -750,7 +736,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ sftpStorage, - log.With().Str("logger", "SFTP").Logger(), cfg.SFTP.CompressionFormat, cfg.SFTP.CompressionLevel, cfg.General.DisableProgressBar, diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 31d5faab..65e14921 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -4,10 +4,6 @@ import ( "context" "crypto/tls" "fmt" - "github.com/Altinity/clickhouse-backup/pkg/config" - "github.com/aws/smithy-go" - awsV2http "github.com/aws/smithy-go/transport/http" - "github.com/rs/zerolog" "io" "net/http" "os" @@ -15,8 +11,7 @@ import ( "strings" "time" - "golang.org/x/sync/errgroup" - + "github.com/Altinity/clickhouse-backup/pkg/config" "github.com/aws/aws-sdk-go-v2/aws" awsV2Config "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/credentials" @@ -25,17 +20,22 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/smithy-go" awsV2Logging "github.com/aws/smithy-go/logging" + awsV2http "github.com/aws/smithy-go/transport/http" "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" ) type S3LogToZeroLogAdapter struct { logger zerolog.Logger } -func newS3Logger(log zerolog.Logger) S3LogToZeroLogAdapter { +func newS3Logger(logger zerolog.Logger) S3LogToZeroLogAdapter { return S3LogToZeroLogAdapter{ - logger: log, + logger: logger, } } @@ -54,7 +54,6 @@ type S3 struct { uploader *s3manager.Uploader downloader *s3manager.Downloader Config *config.S3Config - Logger zerolog.Logger PartSize int64 Concurrency int BufferSize int @@ -108,7 +107,7 @@ func (s *S3) Connect(ctx context.Context) error { } if s.Config.Debug { - awsConfig.Logger = newS3Logger(s.Logger) + awsConfig.Logger = newS3Logger(log.Logger) awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequest | aws.LogResponse } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 63de1a68..fb805a2d 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1912,7 +1912,6 @@ func (ch *TestClickHouse) connect(timeOut string) error { Port: 9000, Timeout: timeOut, }, - Logger: log.With().Str("logger", "clickhouse").Logger(), } var err error for i := 0; i < 3; i++ { From 98245c36a0f6754182e636e6d344ba7a31167561 Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 12 Aug 2023 18:59:52 +0400 Subject: [PATCH 13/21] apply fixes from https://github.com/rs/zerolog/issues/555#issuecomment-1669961273, looks like race conditions gone away, need pull and resolve conflicts from master --- pkg/clickhouse/clickhouse.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index b1ea8860..f49a9a73 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -908,16 +908,14 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args } func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { - var logF *zerolog.Event + level := zerolog.InfoLevel if !ch.Config.LogSQLQueries { - logF = log.Debug() - } else { - logF = log.Info() + level = zerolog.DebugLevel } if len(args) > 0 { - logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) + log.WithLevel(level).Msgf(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) } else { - logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(query)) + log.WithLevel(level).Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(query)) } return query } From 002004e8681cbf7c7596cae88589af4c7752405d Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 13 Aug 2023 09:39:56 +0400 Subject: [PATCH 14/21] fixes for test --- test/integration/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index fb805a2d..140e3246 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -700,7 +700,7 @@ func TestRestoreDatabaseMapping(t *testing.T) { r := require.New(t) r.NoError(dockerCP("config-database-mapping.yml", "clickhouse:/etc/clickhouse-backup/config.yml")) ch := &TestClickHouse{} - ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second) + ch.connectWithWait(r, 500*time.Millisecond, 5*time.Second) defer ch.chbackend.Close() checkRecordset := func(expectedRows int, expectedCount uint64, query string) { result := make([]struct { From 2d000c50f4a90717df1b873e92dc784296fab15f Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 13 Aug 2023 16:42:17 +0400 Subject: [PATCH 15/21] try debug zerolog race condition https://github.com/rs/zerolog/issues/555 --- pkg/backup/download.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/backup/download.go b/pkg/backup/download.go index 3350fbdd..5098ce24 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -406,12 +406,12 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, b.resumableState.AppendToState(localMetadataFile, written) } } - log.Info(). - Str("operation", "download_metadata"). - Str("backup", backupName). - Str("duration", utils.HumanizeDuration(time.Since(start))). - Str("size", utils.FormatBytes(size)). - Msg("done") + log.Info().Fields(map[string]string{ + "operation": "download_metadata", + "backup": backupName, + "duration": utils.HumanizeDuration(time.Since(start)), + "size": utils.FormatBytes(size), + }).Msg("done") return &tableMetadata, size, nil } From 0298ea111cb42d85ac4b9959e6bc07b0cd1926cd Mon Sep 17 00:00:00 2001 From: Slach Date: Sun, 13 Aug 2023 17:10:00 +0400 Subject: [PATCH 16/21] switch to golang 1.21 --- .github/workflows/build.yaml | 4 ++-- .github/workflows/release.yaml | 2 +- Dockerfile | 6 +++--- Vagrantfile | 2 +- go.mod | 2 +- test/integration/install_delve.sh | 6 +++--- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index b22e5133..af45a21d 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -16,7 +16,7 @@ jobs: strategy: matrix: golang-version: - - "1.20" + - "1.21" steps: - name: Checkout project uses: actions/checkout@v3 @@ -210,7 +210,7 @@ jobs: strategy: matrix: golang-version: - - "1.20" + - "1.21" clickhouse: - '1.1.54394' - '19.17' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a38f473b..e87a0364 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: golang-version: - - "1.20" + - "1.21" steps: - name: Checkout project diff --git a/Dockerfile b/Dockerfile index 16b9ada0..3accc2bb 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,16 +14,16 @@ RUN rm -fv /etc/apt/sources.list.d/clickhouse.list && \ echo "deb https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" > /etc/apt/sources.list.d/golang.list && \ echo "deb-src https://ppa.launchpadcontent.net/longsleep/golang-backports/ubuntu ${DISTRIB_CODENAME} main" >> /etc/apt/sources.list.d/golang.list && \ ( apt-get update || true ) && \ - apt-get install -y --no-install-recommends libc-dev golang-1.20 make git gcc musl-dev musl-tools && \ + apt-get install -y --no-install-recommends libc-dev golang-1.21 make git gcc musl-dev musl-tools && \ wget -q -P /root/ https://musl.cc/aarch64-linux-musl-cross.tgz && \ tar -xvf /root/aarch64-linux-musl-cross.tgz -C /root/ && \ mkdir -p /root/go/ -RUN ln -nsfv /usr/lib/go-1.20/bin/go /usr/bin/go +RUN ln -nsfv /usr/lib/go-1.21/bin/go /usr/bin/go VOLUME /root/.cache/go ENV GOCACHE=/root/.cache/go ENV GOPATH=/root/go/ -ENV GOROOT=/usr/lib/go-1.20/ +ENV GOROOT=/usr/lib/go-1.21/ RUN go env WORKDIR /src/ # cache modules when go.mod go.sum changed diff --git a/Vagrantfile b/Vagrantfile index 5f179030..12eae47a 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -101,7 +101,7 @@ Vagrant.configure(2) do |config| apt-get install --no-install-recommends -y clickhouse-client clickhouse-server # golang - export GOLANG_VERSION=1.20 + export GOLANG_VERSION=1.21 apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E add-apt-repository ppa:longsleep/golang-backports apt-get install --no-install-recommends -y golang-${GOLANG_VERSION} diff --git a/go.mod b/go.mod index 8ba055dd..6c96785e 100644 --- a/go.mod +++ b/go.mod @@ -132,4 +132,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -go 1.20 +go 1.21 diff --git a/test/integration/install_delve.sh b/test/integration/install_delve.sh index dcb854d8..4217e679 100755 --- a/test/integration/install_delve.sh +++ b/test/integration/install_delve.sh @@ -4,16 +4,16 @@ apt-get update && apt-get install -y software-properties-common apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 52B59B1571A79DBC054901C0F6BC817356A3D45E add-apt-repository -y ppa:longsleep/golang-backports apt-get update -apt-get install -y golang-1.20 +apt-get install -y golang-1.21 mkdir -p ~/go/ export GOPATH=~/go/ grep -q -F 'export GOPATH=$GOPATH' ~/.bashrc || echo "export GOPATH=$GOPATH" >> ~/.bashrc grep -q -F 'export GOPATH=$GOPATH' /root/.bashrc || echo "export GOPATH=$GOPATH" >> /root/.bashrc -export GOROOT=/usr/lib/go-1.20/ +export GOROOT=/usr/lib/go-1.21/ grep -q -F 'export GOROOT=$GOROOT' ~/.bashrc || echo "export GOROOT=$GOROOT" >> ~/.bashrc grep -q -F 'export GOROOT=$GOROOT' /root/.bashrc || echo "export GOROOT=$GOROOT" >> /root/.bashrc -ln -nsfv /usr/lib/go-1.20/bin/go /usr/bin/go +ln -nsfv /usr/lib/go-1.21/bin/go /usr/bin/go CGO_ENABLED=0 GO111MODULE=on go install -ldflags "-s -w -extldflags '-static'" github.com/go-delve/delve/cmd/dlv@latest From 2251da95f31ea82d0bd731d40fc0e6df981f843a Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 2 Aug 2024 18:03:44 +0400 Subject: [PATCH 17/21] try to apply workaround to avoid race-condtions, https://github.com/Altinity/clickhouse-backup/pull/670#discussion_r1677387856 Signed-off-by: Slach --- go.mod | 4 +- go.sum | 441 +++++++++++++++++++++++++++++++++++ pkg/clickhouse/clickhouse.go | 8 +- 3 files changed, 447 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 6c96785e..ebed3f4d 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.13.6 github.com/prometheus/client_golang v1.16.0 - github.com/rs/zerolog v1.30.0 + github.com/rs/zerolog v1.33.0 github.com/stretchr/testify v1.8.4 github.com/tencentyun/cos-go-sdk-v5 v0.7.42 github.com/urfave/cli v1.22.14 @@ -121,7 +121,7 @@ require ( go4.org v0.0.0-20230225012048-214862532bf5 // indirect golang.org/x/net v0.14.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect - golang.org/x/sys v0.11.0 // indirect + golang.org/x/sys v0.12.0 // indirect golang.org/x/text v0.12.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index 7ab1056a..c9d442b3 100644 --- a/go.sum +++ b/go.sum @@ -9,26 +9,255 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/accessapproval v1.7.1 h1:/5YjNhR6lzCvmJZAnByYkfEgWjfAKwYP6nkuTk6nKFE= +cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68= +cloud.google.com/go/accesscontextmanager v1.8.1 h1:WIAt9lW9AXtqw/bnvrEUaE8VG/7bAAeMzRCBGMkc4+w= +cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo= +cloud.google.com/go/aiplatform v1.48.0 h1:M5davZWCTzE043rJCn+ZLW6hSxfG1KAx4vJTtas2/ec= +cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA= +cloud.google.com/go/analytics v0.21.3 h1:TFBC1ZAqX9/jL56GEXdLrVe5vT3I22bDVWyDwZX4IEg= +cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo= +cloud.google.com/go/apigateway v1.6.1 h1:aBSwCQPcp9rZ0zVEUeJbR623palnqtvxJlUyvzsKGQc= +cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA= +cloud.google.com/go/apigeeconnect v1.6.1 h1:6u/jj0P2c3Mcm+H9qLsXI7gYcTiG9ueyQL3n6vCmFJM= +cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs= +cloud.google.com/go/apigeeregistry v0.7.1 h1:hgq0ANLDx7t2FDZDJQrCMtCtddR/pjCqVuvQWGrQbXw= +cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw= +cloud.google.com/go/appengine v1.8.1 h1:J+aaUZ6IbTpBegXbmEsh8qZZy864ZVnOoWyfa1XSNbI= +cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY= +cloud.google.com/go/area120 v0.8.1 h1:wiOq3KDpdqXmaHzvZwKdpoM+3lDcqsI2Lwhyac7stss= +cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg= +cloud.google.com/go/artifactregistry v1.14.1 h1:k6hNqab2CubhWlGcSzunJ7kfxC7UzpAfQ1UPb9PDCKI= +cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E= +cloud.google.com/go/asset v1.14.1 h1:vlHdznX70eYW4V1y1PxocvF6tEwxJTTarwIGwOhFF3U= +cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ= +cloud.google.com/go/assuredworkloads v1.11.1 h1:yaO0kwS+SnhVSTF7BqTyVGt3DTocI6Jqo+S3hHmCwNk= +cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= +cloud.google.com/go/automl v1.13.1 h1:iP9iQurb0qbz+YOOMfKSEjhONA/WcoOIjt6/m+6pIgo= +cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE= +cloud.google.com/go/baremetalsolution v1.1.1 h1:0Ge9PQAy6cZ1tRrkc44UVgYV15nw2TVnzJzYsMHXF+E= +cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA= +cloud.google.com/go/batch v1.3.1 h1:uE0Q//W7FOGPjf7nuPiP0zoE8wOT3ngoIO2HIet0ilY= +cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A= +cloud.google.com/go/beyondcorp v1.0.0 h1:VPg+fZXULQjs8LiMeWdLaB5oe8G9sEoZ0I0j6IMiG1Q= +cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.53.0 h1:K3wLbjbnSlxhuG5q4pntHv5AEbQM1QqHKGYgwFIqOTg= +cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4= +cloud.google.com/go/billing v1.16.0 h1:1iktEAIZ2uA6KpebC235zi/rCXDdDYQ0bTXTNetSL80= +cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA= +cloud.google.com/go/binaryauthorization v1.6.1 h1:cAkOhf1ic92zEN4U1zRoSupTmwmxHfklcp1X7CCBKvE= +cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U= +cloud.google.com/go/certificatemanager v1.7.1 h1:uKsohpE0hiobx1Eak9jNcPCznwfB6gvyQCcS28Ah9E8= +cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI= +cloud.google.com/go/channel v1.16.0 h1:dqRkK2k7Ll/HHeYGxv18RrfhozNxuTJRkspW0iaFZoY= +cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc= +cloud.google.com/go/cloudbuild v1.13.0 h1:YBbAWcvE4x6xPWTyS+OU4eiUpz5rCS3VCM/aqmfddPA= +cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU= +cloud.google.com/go/clouddms v1.6.1 h1:rjR1nV6oVf2aNNB7B5uz1PDIlBjlOiBgR+q5n7bbB7M= +cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI= +cloud.google.com/go/cloudtasks v1.12.1 h1:cMh9Q6dkvh+Ry5LAPbD/U2aw6KAqdiU6FttwhbTo69w= +cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM= cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.10.0 h1:YR2aPedGVQPpFBZXJnPkqRj8M//8veIZZH5ZvICoXnI= +cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM= +cloud.google.com/go/container v1.24.0 h1:N51t/cgQJFqDD/W7Mb+IvmAPHrf8AbPx7Bb7aF4lROE= +cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4= +cloud.google.com/go/containeranalysis v0.10.1 h1:SM/ibWHWp4TYyJMwrILtcBtYKObyupwOVeceI9pNblw= +cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0= +cloud.google.com/go/datacatalog v1.16.0 h1:qVeQcw1Cz93/cGu2E7TYUPh8Lz5dn5Ws2siIuQ17Vng= +cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4= +cloud.google.com/go/dataflow v0.9.1 h1:VzG2tqsk/HbmOtq/XSfdF4cBvUWRK+S+oL9k4eWkENQ= +cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw= +cloud.google.com/go/dataform v0.8.1 h1:xcWso0hKOoxeW72AjBSIp/UfkvpqHNzzS0/oygHlcqY= +cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M= +cloud.google.com/go/datafusion v1.7.1 h1:eX9CZoyhKQW6g1Xj7+RONeDj1mV8KQDKEB9KLELX9/8= +cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI= +cloud.google.com/go/datalabeling v0.8.1 h1:zxsCD/BLKXhNuRssen8lVXChUj8VxF3ofN06JfdWOXw= +cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY= +cloud.google.com/go/dataplex v1.9.0 h1:yoBWuuUZklYp7nx26evIhzq8+i/nvKYuZr1jka9EqLs= +cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE= +cloud.google.com/go/dataproc/v2 v2.0.1 h1:4OpSiPMMGV3XmtPqskBU/RwYpj3yMFjtMLj/exi425Q= +cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4= +cloud.google.com/go/dataqna v0.8.1 h1:ITpUJep04hC9V7C+gcK390HO++xesQFSUJ7S4nSnF3U= +cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.13.0 h1:ktbC66bOQB3HJPQe8qNI1/aiQ77PMu7hD4mzE6uxe3w= +cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70= +cloud.google.com/go/datastream v1.10.0 h1:ra/+jMv36zTAGPfi8TRne1hXme+UsKtdcK4j6bnqQiw= +cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q= +cloud.google.com/go/deploy v1.13.0 h1:A+w/xpWgz99EYzB6e31gMGAI/P5jTZ2UO7veQK5jQ8o= +cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g= +cloud.google.com/go/dialogflow v1.40.0 h1:sCJbaXt6ogSbxWQnERKAzos57f02PP6WkGbOZvXUdwc= +cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4= +cloud.google.com/go/dlp v1.10.1 h1:tF3wsJ2QulRhRLWPzWVkeDz3FkOGVoMl6cmDUHtfYxw= +cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI= +cloud.google.com/go/documentai v1.22.0 h1:dW8ex9yb3oT9s1yD2+yLcU8Zq15AquRZ+wd0U+TkxFw= +cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E= +cloud.google.com/go/domains v0.9.1 h1:rqz6KY7mEg7Zs/69U6m6LMbB7PxFDWmT3QWNXIqhHm0= +cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE= +cloud.google.com/go/edgecontainer v1.1.1 h1:zhHWnLzg6AqzE+I3gzJqiIwHfjEBhWctNQEzqb+FaRo= +cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk= +cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.6.2 h1:OEJ0MLXXCW/tX1fkxzEZOsv/wRfyFsvDVNaHWBAvoV0= +cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4= +cloud.google.com/go/eventarc v1.13.0 h1:xIP3XZi0Xawx8DEfh++mE2lrIi5kQmCr/KcWhJ1q0J4= +cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI= +cloud.google.com/go/filestore v1.7.1 h1:Eiz8xZzMJc5ppBWkuaod/PUdUZGCFR8ku0uS+Ah2fRw= +cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4= +cloud.google.com/go/firestore v1.12.0 h1:aeEA/N7DW7+l2u5jtkO8I0qv0D95YwjggD8kUHrTHO4= +cloud.google.com/go/firestore v1.12.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4= +cloud.google.com/go/functions v1.15.1 h1:LtAyqvO1TFmNLcROzHZhV0agEJfBi+zfMZsF4RT/a7U= +cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE= +cloud.google.com/go/gkebackup v1.3.0 h1:lgyrpdhtJKV7l1GM15YFt+OCyHMxsQZuSydyNmS0Pxo= +cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU= +cloud.google.com/go/gkeconnect v0.8.1 h1:a1ckRvVznnuvDWESM2zZDzSVFvggeBaVY5+BVB8tbT0= +cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw= +cloud.google.com/go/gkehub v0.14.1 h1:2BLSb8i+Co1P05IYCKATXy5yaaIw/ZqGvVSBTLdzCQo= +cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY= +cloud.google.com/go/gkemulticloud v1.0.0 h1:MluqhtPVZReoriP5+adGIw+ij/RIeRik8KApCW2WMTw= +cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw= +cloud.google.com/go/gsuiteaddons v1.6.1 h1:mi9jxZpzVjLQibTS/XfPZvl+Jr6D5Bs8pGqUjllRb00= +cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY= cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iap v1.8.1 h1:X1tcp+EoJ/LGX6cUPt3W2D4H2Kbqq0pLAsldnsCjLlE= +cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ= +cloud.google.com/go/ids v1.4.1 h1:khXYmSoDDhWGEVxHl4c4IgbwSRR+qE/L4hzP3vaU9Hc= +cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw= +cloud.google.com/go/iot v1.7.1 h1:yrH0OSmicD5bqGBoMlWG8UltzdLkYzNUwNVUVz7OT54= +cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk= +cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= +cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM= +cloud.google.com/go/language v1.10.1 h1:3MXeGEv8AlX+O2LyV4pO4NGpodanc26AmXwOuipEym0= +cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0= +cloud.google.com/go/lifesciences v0.9.1 h1:axkANGx1wiBXHiPcJZAE+TDjjYoJRIDzbHC/WYllCBU= +cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.5.1 h1:Fr7TXftcqTudoyRJa113hyaqlGdiBQkp0Gq7tErFDWI= +cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= +cloud.google.com/go/managedidentities v1.6.1 h1:2/qZuOeLgUHorSdxSQGtnOu9xQkBn37+j+oZQv/KHJY= +cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak= +cloud.google.com/go/maps v1.4.0 h1:PdfgpBLhAoSzZrQXP+/zBc78fIPLZSJp5y8+qSMn2UU= +cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s= +cloud.google.com/go/mediatranslation v0.8.1 h1:50cF7c1l3BanfKrpnTCaTvhf+Fo6kdF21DG0byG7gYU= +cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig= +cloud.google.com/go/memcache v1.10.1 h1:7lkLsF0QF+Mre0O/NvkD9Q5utUNwtzvIYjrOLOs0HO0= +cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA= +cloud.google.com/go/metastore v1.12.0 h1:+9DsxUOHvsqvC0ylrRc/JwzbXJaaBpfIK3tX0Lx8Tcc= +cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA= +cloud.google.com/go/monitoring v1.15.1 h1:65JhLMd+JiYnXr6j5Z63dUYCuOg770p8a/VC+gil/58= +cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM= +cloud.google.com/go/networkconnectivity v1.12.1 h1:LnrYM6lBEeTq+9f2lR4DjBhv31EROSAQi/P5W4Q0AEc= +cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E= +cloud.google.com/go/networkmanagement v1.8.0 h1:/3xP37eMxnyvkfLrsm1nv1b2FbMMSAEAOlECTvoeCq4= +cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0= +cloud.google.com/go/networksecurity v0.9.1 h1:TBLEkMp3AE+6IV/wbIGRNTxnqLXHCTEQWoxRVC18TzY= +cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ= +cloud.google.com/go/notebooks v1.9.1 h1:CUqMNEtv4EHFnbogV+yGHQH5iAQLmijOx191innpOcs= +cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8= +cloud.google.com/go/optimization v1.4.1 h1:pEwOAmO00mxdbesCRSsfj8Sd4rKY9kBrYW7Vd3Pq7cA= +cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk= +cloud.google.com/go/orchestration v1.8.1 h1:KmN18kE/xa1n91cM5jhCh7s1/UfIguSCisw7nTMUzgE= +cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8= +cloud.google.com/go/orgpolicy v1.11.1 h1:I/7dHICQkNwym9erHqmlb50LRU588NPCvkfIY0Bx9jI= +cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE= +cloud.google.com/go/osconfig v1.12.1 h1:dgyEHdfqML6cUW6/MkihNdTVc0INQst0qSE8Ou1ub9c= +cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE= +cloud.google.com/go/oslogin v1.10.1 h1:LdSuG3xBYu2Sgr3jTUULL1XCl5QBx6xwzGqzoDUw1j0= +cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs= +cloud.google.com/go/phishingprotection v0.8.1 h1:aK/lNmSd1vtbft/vLe2g7edXK72sIQbqr2QyrZN/iME= +cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I= +cloud.google.com/go/policytroubleshooter v1.8.0 h1:XTMHy31yFmXgQg57CB3w9YQX8US7irxDX0Fl0VwlZyY= +cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU= +cloud.google.com/go/privatecatalog v0.9.1 h1:B/18xGo+E0EMS9LOEQ0zXz7F2asMgmVgTYGSI89MHOA= +cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= +cloud.google.com/go/pubsublite v1.8.1 h1:pX+idpWMIH30/K7c0epN6V703xpIcMXWRjKJsz0tYGY= +cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2 h1:IGkbudobsTXAwmkEYOzPCQPApUCsN4Gbq3ndGVhHQpI= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU= +cloud.google.com/go/recommendationengine v0.8.1 h1:nMr1OEVHuDambRn+/y4RmNAmnR/pXCuHtH0Y4tCgGRQ= +cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE= +cloud.google.com/go/recommender v1.10.1 h1:UKp94UH5/Lv2WXSQe9+FttqV07x/2p1hFTMMYVFtilg= +cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA= +cloud.google.com/go/redis v1.13.1 h1:YrjQnCC7ydk+k30op7DSjSHw1yAYhqYXFcOq1bSXRYA= +cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg= +cloud.google.com/go/resourcemanager v1.9.1 h1:QIAMfndPOHR6yTmMUB0ZN+HSeRmPjR/21Smq5/xwghI= +cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8= +cloud.google.com/go/resourcesettings v1.6.1 h1:Fdyq418U69LhvNPFdlEO29w+DRRjwDA4/pFamm4ksAg= +cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw= +cloud.google.com/go/retail v1.14.1 h1:gYBrb9u/Hc5s5lUTFXX1Vsbc/9BEvgtioY6ZKaK0DK8= +cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE= +cloud.google.com/go/run v1.2.0 h1:kHeIG8q+N6Zv0nDkBjSOYfK2eWqa5FnaiDPH/7/HirE= +cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo= +cloud.google.com/go/scheduler v1.10.1 h1:yoZbZR8880KgPGLmACOMCiY2tPk+iX4V/dkxqTirlz8= +cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo= +cloud.google.com/go/secretmanager v1.11.1 h1:cLTCwAjFh9fKvU6F13Y4L9vPcx9yiWPyWXE4+zkuEQs= +cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw= +cloud.google.com/go/security v1.15.1 h1:jR3itwycg/TgGA0uIgTItcVhA55hKWiNJxaNNpQJaZE= +cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA= +cloud.google.com/go/securitycenter v1.23.0 h1:XOGJ9OpnDtqg8izd7gYk/XUhj8ytjIalyjjsR6oyG0M= +cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ= +cloud.google.com/go/servicedirectory v1.11.0 h1:pBWpjCFVGWkzVTkqN3TBBIqNSoSHY86/6RL0soSQ4z8= +cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ= +cloud.google.com/go/shell v1.7.1 h1:aHbwH9LSqs4r2rbay9f6fKEls61TAjT63jSyglsw7sI= +cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g= +cloud.google.com/go/spanner v1.47.0 h1:aqiMP8dhsEXgn9K5EZBWxPG7dxIiyM2VaikqeU4iteg= +cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI= +cloud.google.com/go/speech v1.19.0 h1:MCagaq8ObV2tr1kZJcJYgXYbIn8Ai5rp42tyGYw9rls= +cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdTCI= cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0= +cloud.google.com/go/storagetransfer v1.10.0 h1:+ZLkeXx0K0Pk5XdDmG0MnUVqIR18lllsihU/yq39I8Q= +cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA= +cloud.google.com/go/talent v1.6.2 h1:j46ZgD6N2YdpFPux9mc7OAf4YK3tiBCsbLKc8rQx+bU= +cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24= +cloud.google.com/go/texttospeech v1.7.1 h1:S/pR/GZT9p15R7Y2dk2OXD/3AufTct/NSxT4a7nxByw= +cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk= +cloud.google.com/go/tpu v1.6.1 h1:kQf1jgPY04UJBYYjNUO+3GrZtIb57MfGAW2bwgLbR3A= +cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E= +cloud.google.com/go/trace v1.10.1 h1:EwGdOLCNfYOOPtgqo+D2sDLZmRCEO1AagRTJCU6ztdg= +cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk= +cloud.google.com/go/translate v1.8.2 h1:PQHamiOzlehqLBJMnM72lXk/OsMQewZB12BKJ8zXrU0= +cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs= +cloud.google.com/go/video v1.19.0 h1:BRyyS+wU+Do6VOXnb8WfPr42ZXti9hzmLKLUCkggeK4= +cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU= +cloud.google.com/go/videointelligence v1.11.1 h1:MBMWnkQ78GQnRz5lfdTAbBq/8QMCF3wahgtHh3s/J+k= +cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo= +cloud.google.com/go/vision/v2 v2.7.2 h1:ccK6/YgPfGHR/CyESz1mvIbsht5Y2xRsWCPqmTNydEw= +cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU= +cloud.google.com/go/vmmigration v1.7.1 h1:gnjIclgqbEMc+cF5IJuPxp53wjBIlqZ8h9hE8Rkwp7A= +cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro= +cloud.google.com/go/vmwareengine v1.0.0 h1:qsJ0CPlOQu/3MFBGklu752v3AkD+Pdu091UmXJ+EjTA= +cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0= +cloud.google.com/go/vpcaccess v1.7.1 h1:ram0GzjNWElmbxXMIzeOZUkQ9J8ZAahD6V8ilPGqX0Y= +cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs= +cloud.google.com/go/webrisk v1.9.1 h1:Ssy3MkOMOnyRV5H2bkMQ13Umv7CwB/kugo3qkAX83Fk= +cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc= +cloud.google.com/go/websecurityscanner v1.6.1 h1:CfEF/vZ+xXyAR3zC9iaC/QRdf1MEgS20r5UR17Q4gOg= +cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg= +cloud.google.com/go/workflows v1.11.1 h1:2akeQ/PgtRhrNuD/n1WvJd5zb7YyuDZrlOanBj2ihPg= +cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= @@ -47,25 +276,43 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/ch-go v0.58.0 h1:SwCH/WWJnwHnQdehiQsmraC13+uMpkexPR61c2Jj5Qo= github.com/ClickHouse/ch-go v0.58.0/go.mod h1:P0YtetDdxICIC7IuRM9Qv/TMi83W0mRgBVQ9wcpt+4Y= +github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= +github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go/v2 v2.13.0 h1:oP1OlTQIbQKKLnqLzyDhiyNFvN3pbOtM+e/3qdexG9k= github.com/ClickHouse/clickhouse-go/v2 v2.13.0/go.mod h1:xyL0De2K54/n+HGsdtPuyYJq76wefafaHfGUXTDEq/0= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY= +github.com/Microsoft/hcsshim v0.9.6/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409 h1:DTQ/38ao/CfXsrK0cSAL+h4R/u0VVvfWLZEOlLwEROI= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= +github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= +github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antchfx/xmlquery v1.3.17 h1:d0qWjPp/D+vtRw7ivCwT5ApH/3CkQU8JOeo3245PpTk= github.com/antchfx/xmlquery v1.3.17/go.mod h1:Afkq4JIeXut75taLSuI31ISJ/zeq+3jG7TunF7noreA= github.com/antchfx/xpath v1.2.4 h1:dW1HB/JxKvGtJ9WyVGJ0sIoEcqftV3SqIstujI+B9XY= github.com/antchfx/xpath v1.2.4/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= +github.com/apex/logs v1.0.0 h1:adOwhOTeXzZTnVuEK13wuJNBFutP0sOfutRS8NY+G6A= github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a h1:2KLQMJ8msqoPHIPDufkxVcoTtcmE5+1sL9950m4R9Pk= github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0 h1:I4z+fAUqvKfvZV/CHi5dV0QuwbmIvYYFDjG0Ss5QpAs= github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/aws/aws-sdk-go v1.20.6 h1:kmy4Gvdlyez1fV4kw5RYxZzWKVyuHZHgPWeU/YvRsV4= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v1.20.1 h1:rZBf5DWr7YGrnlTK4kgDQGn1ltqOg5orCYb/UhOFZkg= github.com/aws/aws-sdk-go-v2 v1.20.1/go.mod h1:NU06lETsFm8fUC6ZjhgDpVBcGZTFQ6XM+LZWZxMI4ac= @@ -105,7 +352,10 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.21.2 h1:ympg1+Lnq33XLhcK/xTG4yZHPs1O github.com/aws/aws-sdk-go-v2/service/sts v1.21.2/go.mod h1:FQ/DQcOfESELfJi5ED+IPPAjI5xC6nxtSolVVB773jM= github.com/aws/smithy-go v1.14.1 h1:EFKMUmH/iHMqLiwoEDx2rRjRQpI1YCn5jTysoaDujFs= github.com/aws/smithy-go v1.14.1/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bodgit/plumbing v1.3.0 h1:pf9Itz1JOQgn7vEOE7v7nlEfBykYqvUYioC61TwWCFU= @@ -114,22 +364,43 @@ github.com/bodgit/sevenzip v1.4.3 h1:46Rb9vCYdpceC1U+GIR0bS3hP2/Xv8coKFDeLJySV/A github.com/bodgit/sevenzip v1.4.3/go.mod h1:F8n3+0CwbdxqmNy3wFeOAtanza02Ur66AGfs/hbYblI= github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/connesc/cipherio v0.2.1 h1:FGtpTPMbKNNWByNrr9aEBtaJtXjqOzkIXNYJp6OEycw= +github.com/connesc/cipherio v0.2.1/go.mod h1:ukY0MWJDFnJEbXMQtOcn2VmTpRfzcTz4OoVrWGGJZcA= +github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= +github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/containerd v1.6.18 h1:qZbsLvmyu+Vlty0/Ex5xc0z2YtKpIsb5n45mAMI+2Ns= +github.com/containerd/containerd v1.6.18/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -141,9 +412,22 @@ github.com/djherbis/buffer v1.2.0 h1:PH5Dd2ss0C7CRRhQCZ2u7MssF+No9ide8Ye71nPHcrQ github.com/djherbis/buffer v1.2.0/go.mod h1:fjnebbZjCUpPinBRD+TDwXSOeNQ7fPQWLfGQqiAiUyE= github.com/djherbis/nio/v3 v3.0.1 h1:6wxhnuppteMa6RHA4L81Dq7ThkZH8SwnDzXDYy95vB4= github.com/djherbis/nio/v3 v3.0.1/go.mod h1:Ng4h80pbZFMla1yKzm61cF0tqqilXZYrogmWgZxOcmg= +github.com/dmarkham/enumer v1.5.8 h1:fIF11F9l5jyD++YYvxcSH5WgHfeaSGPaN/T4kOQ4qEM= +github.com/dmarkham/enumer v1.5.8/go.mod h1:d10o8R3t/gROm2p3BXqTkMt2+HMuxEmWCXzorAruYak= +github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= +github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.22+incompatible h1:6jX4yB+NtcbldT90k7vBSaWJDB3i+zkVJT9BEK8kQkk= +github.com/docker/docker v20.10.22+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= +github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780 h1:tFh1tRc4CA31yP6qDcu+Trax5wW5GuMxvkIba07qVLY= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -151,29 +435,52 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f h1:7T++XKzy4xg7PKy+bM+Sa9/oe1OC88yz2hXQUISoXfA= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw= github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw= github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI= github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -182,6 +489,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -203,6 +511,7 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -215,15 +524,20 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-pkcs11 v0.2.0 h1:5meDPB26aJ98f+K9G21f0AqZwo/S5BJMJh8nuhMbdsI= +github.com/google/go-pkcs11 v0.2.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12 h1:TgXhFz35pKlZuUz1pNlOKk1UCSXPpuUIc144Wd7SxCA= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= @@ -242,16 +556,26 @@ github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56 github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jackc/puddle/v2 v2.2.0 h1:RdcDk92EJBuBS55nQMMYFXTxwstHug4jkhT5pq8VxPk= +github.com/jackc/puddle/v2 v2.2.0/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg= github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= @@ -262,29 +586,44 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jolestar/go-commons-pool/v2 v2.1.2 h1:E+XGo58F23t7HtZiC/W6jzO2Ux2IccSH/yx4nD+J1CM= github.com/jolestar/go-commons-pool/v2 v2.1.2/go.mod h1:r4NYccrkS5UqP1YQI1COyTZ9UjPJAAGTUxzcsK1kqhY= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -305,25 +644,55 @@ github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebG github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mholt/archiver/v4 v4.0.0-alpha.8 h1:tRGQuDVPh66WCOelqe6LIGh0gwmfwxUrSSDunscGsRM= github.com/mholt/archiver/v4 v4.0.0-alpha.8/go.mod h1:5f7FUYGXdJWUjESffJaYR4R60VhnHxb2X3T1teMyv5A= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615 h1:/mD+ABZyXD39BzJI2XyRJlqdZG11gXFo0SSynL+OFeU= +github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= +github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= +github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= +github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/mozillazg/go-httpheader v0.4.0 h1:aBn6aRXtFzyDLZ4VIRLsZbbJloagQfMnCiYgOq6hK4w= github.com/mozillazg/go-httpheader v0.4.0/go.mod h1:PuT8h0pw6efvp8ZeUec1Rs7dwjK08bt6gKSReGMqtdA= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk= github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= +github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= +github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY= github.com/otiai10/copy v1.12.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= +github.com/pascaldekloe/name v1.0.1 h1:9lnXOHeqeHHnWLbKfH6X98+4+ETVqFqxN09UXSjcMb0= +github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= github.com/paulmach/orb v0.10.0 h1:guVYVqzxHE/CQ1KpfGO077TR0ATHSNjp4s6XGLn3W9s= github.com/paulmach/orb v0.10.0/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= +github.com/paulmach/protoscan v0.2.1 h1:rM0FpcTjUMvPUNk2BhPJrreDKetq43ChnL+x1sRg8O8= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= @@ -347,25 +716,42 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 h1:hp2CYQUINdZMHdvTdXtPOY2ainKl4IoMcpAXEf2xj3Q= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/gunit v1.0.0 h1:RyPDUFcJbvtXlhJPk7v+wnxZRY2EUokhEYl2EJOPToI= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -378,34 +764,57 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563 h1:2VDxTtn9dAqI2DnnvB9fXpPE4DblOmquyzmN2zxTD8A= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563 h1:FoX+MK4vHThvPO6FbP5q98zD8S3n+d5+DbtK7skl++c= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0= github.com/tencentyun/cos-go-sdk-v5 v0.7.42 h1:Up1704BJjI5orycXKjpVpvuOInt9GC5pqY4knyE9Uds= github.com/tencentyun/cos-go-sdk-v5 v0.7.42/go.mod h1:LUFnaqRmGk6pEHOaRmdn2dCZR2j0cSsM5xowWFPTPao= +github.com/testcontainers/testcontainers-go v0.14.0 h1:h0D5GaYG9mhOWr2qHdEKDXpkce/VlvaYOCzTRi6UBi8= +github.com/testcontainers/testcontainers-go v0.14.0/go.mod h1:hSRGJ1G8Q5Bw2gXgPulJOLlEBaYJHeBSOkQM5JLG+JQ= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.1.0 h1:Lo2OsPHlIxXF24zApe15AbK3bJLAOvkkxEA6Ux4c47M= github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2 h1:eGaGNxrtoZf/mBURsnNQKDR7u50Klgcf2eFDQEnc8Bc= github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b h1:m74UWYy+HBs+jMFR9mdZU6shPewugMyH5+GV6LNgW8w= github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -415,9 +824,22 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= +go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= +go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= +go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= +go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc= go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -443,8 +865,10 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -454,8 +878,10 @@ golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -541,11 +967,14 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -559,6 +988,7 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -587,6 +1017,8 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -629,6 +1061,8 @@ google.golang.org/genproto v0.0.0-20230807174057-1744710a1577 h1:Tyk/35yqszRCvar google.golang.org/genproto v0.0.0-20230807174057-1744710a1577/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577 h1:xv8KoglAClYGkprUSmDTKaILtzfD8XzG9NYVXMprjKo= google.golang.org/genproto/googleapis/api v0.0.0-20230807174057-1744710a1577/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577 h1:ZX0eQu2J+jOO87sq8fQG8J/Nfp7D7BhHpixIE5EYK/k= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20230807174057-1744710a1577/go.mod h1:NjCQG/D8JandXxM57PZbAJL1DCNL6EypA0vPPwfsc7c= google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -666,8 +1100,11 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -683,7 +1120,11 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 830ad22e..aeea9a0a 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -109,11 +109,11 @@ func (ch *ClickHouse) Connect() error { return err } - logFunc := log.Info() + logLevel := zerolog.InfoLevel if !ch.Config.LogSQLQueries { - logFunc = log.Debug() + logLevel = zerolog.DebugLevel } - logFunc.Stack().Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v?timeout=%v", ch.Config.Host, ch.Config.Port, ch.Config.Timeout)) + log.WithLevel(logLevel).Stack().Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v?timeout=%v", ch.Config.Host, ch.Config.Port, ch.Config.Timeout)) err = ch.conn.Ping(context.Background()) if err != nil { log.Error().Msgf("clickhouse connection ping: %s return error: %v", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) @@ -121,7 +121,7 @@ func (ch *ClickHouse) Connect() error { } else { ch.IsOpen = true } - logFunc.Stack().Msgf("clickhouse connection open: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + log.WithLevel(logLevel).Msgf("clickhouse connection open: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) return err } From b8b1c66323b45d66b0a98b3c5b713c63445ad8d8 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 2 Aug 2024 18:05:23 +0400 Subject: [PATCH 18/21] trigger zerolog branch build --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index af45a21d..8ae84717 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -7,7 +7,7 @@ on: push: branches: - - master + - * jobs: build: From 1d10034c0080fa75082089a322c8ac5acde105a5 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 2 Aug 2024 18:11:33 +0400 Subject: [PATCH 19/21] trigger zerolog branch build --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8ae84717..ab81be56 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -7,7 +7,7 @@ on: push: branches: - - * + - "*" jobs: build: From 8e963d1322fc2bd485cf44f228550b9edc1ed5c8 Mon Sep 17 00:00:00 2001 From: Slach Date: Fri, 2 Aug 2024 18:29:42 +0400 Subject: [PATCH 20/21] trigger zerolog branch build --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index ab81be56..7d14ef3c 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -162,7 +162,7 @@ jobs: set -x export CLICKHOUSE_TESTS_DIR=$(pwd)/test/testflows/clickhouse_backup - command -v docker-compose || (apt-get update && apt-get install -y python3-pip && pip3 install -U docker-compose) + command -v docker-compose || (sudo apt-get update && sudo apt-get install -y python3-pip && pip3 install -U docker-compose) docker-compose -f ${CLICKHOUSE_TESTS_DIR}/docker-compose/docker-compose.yml pull chmod +x $(pwd)/clickhouse-backup/clickhouse-backup* From a26d40a1bfb680f8917f1e5d8780de6c2ac5e4df Mon Sep 17 00:00:00 2001 From: Slach Date: Sat, 3 Aug 2024 19:25:01 +0400 Subject: [PATCH 21/21] improve build.yaml to avoid double trigger --- .github/workflows/build.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a3de0915..6865d990 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -3,11 +3,11 @@ name: Build on: pull_request: branches: - - master + - "*" push: branches: - - "*" + - master jobs: build: