diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a3de0915..6865d990 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -3,11 +3,11 @@ name: Build on: pull_request: branches: - - master + - "*" push: branches: - - "*" + - master jobs: build: diff --git a/ChangeLog.md b/ChangeLog.md index 44185f5d..d65e591d 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -404,6 +404,7 @@ IMPROVEMENTS - improve support for `use_embedded_backup_restore: true`, applied ugly workaround in test to avoid https://github.com/ClickHouse/ClickHouse/issues/43971, and applied restore workaround to resolve https://github.com/ClickHouse/ClickHouse/issues/42709 - migrate to `clickhouse-go/v2`, fix [540](https://github.com/Altinity/clickhouse-backup/issues/540), close [562](https://github.com/Altinity/clickhouse-backup/pull/562) - add documentation for `AWS_ARN_ROLE` and `AWS_WEB_IDENTITY_TOKEN_FILE`, fix [563](https://github.com/Altinity/clickhouse-backup/issues/563) +- migrate from `apex/log` to `rs/zerolog`, fix RaceConditions, fix [624](https://github.com/Altinity/clickhouse-backup/issues/624),see details https://github.com/apex/log/issues/103 BUG FIXES - hotfix wrong empty files when disk_mapping contains don't exist during creation, affected 2.2.7 version, look details [676](https://github.com/Altinity/clickhouse-backup/issues/676#issue-1771732489) diff --git a/cmd/clickhouse-backup/main.go b/cmd/clickhouse-backup/main.go index 9c83f9b8..07e110db 100644 --- a/cmd/clickhouse-backup/main.go +++ b/cmd/clickhouse-backup/main.go @@ -3,15 +3,17 @@ package main import ( "context" "fmt" + stdlog "log" "os" "strings" - "github.com/apex/log" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/rs/zerolog/pkgerrors" "github.com/urfave/cli" "github.com/Altinity/clickhouse-backup/v2/pkg/backup" "github.com/Altinity/clickhouse-backup/v2/pkg/config" - "github.com/Altinity/clickhouse-backup/v2/pkg/logcli" "github.com/Altinity/clickhouse-backup/v2/pkg/server" "github.com/Altinity/clickhouse-backup/v2/pkg/status" ) @@ -23,7 +25,16 @@ var ( ) func main() { - log.SetHandler(logcli.New(os.Stderr)) + zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + consoleWriter := zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} + //diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) { + // fmt.Printf("Logger Dropped %d messages", missed) + //}) + log.Logger = zerolog.New(zerolog.SyncWriter(consoleWriter)).With().Timestamp().Caller().Logger() + //zerolog.SetGlobalLevel(zerolog.Disabled) + //log.Logger = zerolog.New(os.Stdout).With().Timestamp().Caller().Logger() + stdlog.SetOutput(log.Logger) cliapp := cli.NewApp() cliapp.Name = "clickhouse-backup" cliapp.Usage = "Tool for easy backup of ClickHouse with cloud support" @@ -499,11 +510,11 @@ func main() { Action: func(c *cli.Context) error { b := backup.NewBackuper(config.GetConfigFromCli(c)) if c.Args().Get(1) == "" { - log.Errorf("Backup name must be defined") + log.Err(fmt.Errorf("backup name must be defined")).Send() cli.ShowCommandHelpAndExit(c, c.Command.Name, 1) } if c.Args().Get(0) != "local" && c.Args().Get(0) != "remote" { - log.Errorf("Unknown command '%s'\n", c.Args().Get(0)) + log.Err(fmt.Errorf("Unknown command '%s'\n", c.Args().Get(0))).Send() cli.ShowCommandHelpAndExit(c, c.Command.Name, 1) } return b.Delete(c.Args().Get(0), c.Args().Get(1), c.Int("command-id")) @@ -639,6 +650,6 @@ func main() { }, } if err := cliapp.Run(os.Args); err != nil { - log.Fatal(err.Error()) + log.Fatal().Err(err).Send() } } diff --git a/go.mod b/go.mod index 5efd84d9..a28788a4 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,6 @@ require ( // wrong INSERT syntax, wait when resolve https://github.com/ClickHouse/clickhouse-go/issues/1345 github.com/ClickHouse/clickhouse-go/v2 v2.23.2 github.com/antchfx/xmlquery v1.4.1 - github.com/apex/log v1.9.0 github.com/aws/aws-sdk-go-v2 v1.30.1 github.com/aws/aws-sdk-go-v2/config v1.27.23 github.com/aws/aws-sdk-go-v2/credentials v1.17.23 @@ -20,7 +19,6 @@ require ( github.com/djherbis/buffer v1.2.0 github.com/djherbis/nio/v3 v3.0.1 github.com/eapache/go-resiliency v1.6.0 - github.com/go-logfmt/logfmt v0.6.0 github.com/go-zookeeper/zk v1.0.3 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/uuid v1.6.0 @@ -49,7 +47,11 @@ require ( gopkg.in/yaml.v3 v3.0.1 ) -require golang.org/x/text v0.16.0 +require ( + github.com/apex/log v1.9.0 + github.com/rs/zerolog v1.33.0 + golang.org/x/text v0.16.0 +) require ( cloud.google.com/go v0.115.0 // indirect @@ -104,7 +106,9 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kr/fs v0.1.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mozillazg/go-httpheader v0.2.1 // indirect github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect diff --git a/go.sum b/go.sum index 4c755e5d..d271024e 100644 --- a/go.sum +++ b/go.sum @@ -9,145 +9,30 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= -cloud.google.com/go/accessapproval v1.7.7/go.mod h1:10ZDPYiTm8tgxuMPid8s2DL93BfCt6xBh/Vg0Xd8pU0= -cloud.google.com/go/accesscontextmanager v1.8.7/go.mod h1:jSvChL1NBQ+uLY9zUBdPy9VIlozPoHptdBnRYeWuQoM= -cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= -cloud.google.com/go/analytics v0.23.2/go.mod h1:vtE3olAXZ6edJYk1UOndEs6EfaEc9T2B28Y4G5/a7Fo= -cloud.google.com/go/apigateway v1.6.7/go.mod h1:7wAMb/33Rzln+PrGK16GbGOfA1zAO5Pq6wp19jtIt7c= -cloud.google.com/go/apigeeconnect v1.6.7/go.mod h1:hZxCKvAvDdKX8+eT0g5eEAbRSS9Gkzi+MPWbgAMAy5U= -cloud.google.com/go/apigeeregistry v0.8.5/go.mod h1:ZMg60hq2K35tlqZ1VVywb9yjFzk9AJ7zqxrysOxLi3o= -cloud.google.com/go/appengine v1.8.7/go.mod h1:1Fwg2+QTgkmN6Y+ALGwV8INLbdkI7+vIvhcKPZCML0g= -cloud.google.com/go/area120 v0.8.7/go.mod h1:L/xTq4NLP9mmxiGdcsVz7y1JLc9DI8pfaXRXbnjkR6w= -cloud.google.com/go/artifactregistry v1.14.9/go.mod h1:n2OsUqbYoUI2KxpzQZumm6TtBgtRf++QulEohdnlsvI= -cloud.google.com/go/asset v1.19.1/go.mod h1:kGOS8DiCXv6wU/JWmHWCgaErtSZ6uN5noCy0YwVaGfs= -cloud.google.com/go/assuredworkloads v1.11.7/go.mod h1:CqXcRH9N0KCDtHhFisv7kk+cl//lyV+pYXGi1h8rCEU= cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38= cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/automl v1.13.7/go.mod h1:E+s0VOsYXUdXpq0y4gNZpi0A/s6y9+lAarmV5Eqlg40= -cloud.google.com/go/baremetalsolution v1.2.6/go.mod h1:KkS2BtYXC7YGbr42067nzFr+ABFMs6cxEcA1F+cedIw= -cloud.google.com/go/batch v1.8.7/go.mod h1:O5/u2z8Wc7E90Bh4yQVLQIr800/0PM5Qzvjac3Jxt4k= -cloud.google.com/go/beyondcorp v1.0.6/go.mod h1:wRkenqrVRtnGFfnyvIg0zBFUdN2jIfeojFF9JJDwVIA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.61.0/go.mod h1:PjZUje0IocbuTOdq4DBOJLNYB0WF3pAKBHzAYyxCwFo= -cloud.google.com/go/billing v1.18.5/go.mod h1:lHw7fxS6p7hLWEPzdIolMtOd0ahLwlokW06BzbleKP8= -cloud.google.com/go/binaryauthorization v1.8.3/go.mod h1:Cul4SsGlbzEsWPOz2sH8m+g2Xergb6ikspUyQ7iOThE= -cloud.google.com/go/certificatemanager v1.8.1/go.mod h1:hDQzr50Vx2gDB+dOfmDSsQzJy/UPrYRdzBdJ5gAVFIc= -cloud.google.com/go/channel v1.17.7/go.mod h1:b+FkgBrhMKM3GOqKUvqHFY/vwgp+rwsAuaMd54wCdN4= -cloud.google.com/go/cloudbuild v1.16.1/go.mod h1:c2KUANTtCBD8AsRavpPout6Vx8W+fsn5zTsWxCpWgq4= -cloud.google.com/go/clouddms v1.7.6/go.mod h1:8HWZ2tznZ0mNAtTpfnRNT0QOThqn9MBUqTj0Lx8npIs= -cloud.google.com/go/cloudtasks v1.12.8/go.mod h1:aX8qWCtmVf4H4SDYUbeZth9C0n9dBj4dwiTYi4Or/P4= -cloud.google.com/go/compute v1.27.0/go.mod h1:LG5HwRmWFKM2C5XxHRiNzkLLXW48WwvyVC0mfWsYPOM= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/contactcenterinsights v1.13.2/go.mod h1:AfkSB8t7mt2sIY6WpfO61nD9J9fcidIchtxm9FqJVXk= -cloud.google.com/go/container v1.37.0/go.mod h1:AFsgViXsfLvZHsgHrWQqPqfAPjCwXrZmLjKJ64uhLIw= -cloud.google.com/go/containeranalysis v0.11.6/go.mod h1:YRf7nxcTcN63/Kz9f86efzvrV33g/UV8JDdudRbYEUI= -cloud.google.com/go/datacatalog v1.20.1/go.mod h1:Jzc2CoHudhuZhpv78UBAjMEg3w7I9jHA11SbRshWUjk= -cloud.google.com/go/dataflow v0.9.7/go.mod h1:3BjkOxANrm1G3+/EBnEsTEEgJu1f79mFqoOOZfz3v+E= -cloud.google.com/go/dataform v0.9.4/go.mod h1:jjo4XY+56UrNE0wsEQsfAw4caUs4DLJVSyFBDelRDtQ= -cloud.google.com/go/datafusion v1.7.7/go.mod h1:qGTtQcUs8l51lFA9ywuxmZJhS4ozxsBSus6ItqCUWMU= -cloud.google.com/go/datalabeling v0.8.7/go.mod h1:/PPncW5gxrU15UzJEGQoOT3IobeudHGvoExrtZ8ZBwo= -cloud.google.com/go/dataplex v1.16.1/go.mod h1:szV2OpxfbmRBcw1cYq2ln8QsLR3FJq+EwTTIo+0FnyE= -cloud.google.com/go/dataproc/v2 v2.4.2/go.mod h1:smGSj1LZP3wtnsM9eyRuDYftNAroAl6gvKp/Wk64XDE= -cloud.google.com/go/dataqna v0.8.7/go.mod h1:hvxGaSvINAVH5EJJsONIwT1y+B7OQogjHPjizOFoWOo= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.17.1/go.mod h1:mtzZ2HcVtz90OVrEXXGDc2pO4NM1kiBQy8YV4qGe0ZM= -cloud.google.com/go/datastream v1.10.6/go.mod h1:lPeXWNbQ1rfRPjBFBLUdi+5r7XrniabdIiEaCaAU55o= -cloud.google.com/go/deploy v1.19.0/go.mod h1:BW9vAujmxi4b/+S7ViEuYR65GiEsqL6Mhf5S/9TeDRU= -cloud.google.com/go/dialogflow v1.54.0/go.mod h1:/YQLqB0bdDJl+zFKN+UNQsYUqLfWZb1HsJUQqMT7Q6k= -cloud.google.com/go/dlp v1.14.0/go.mod h1:4fvEu3EbLsHrgH3QFdFlTNIiCP5mHwdYhS/8KChDIC4= -cloud.google.com/go/documentai v1.30.1/go.mod h1:RohRpAfvuv3uk3WQtXPpgQ3YABvzacWnasyJQb6AAPk= -cloud.google.com/go/domains v0.9.7/go.mod h1:u/yVf3BgfPJW3QDZl51qTJcDXo9PLqnEIxfGmGgbHEc= -cloud.google.com/go/edgecontainer v1.2.1/go.mod h1:OE2D0lbkmGDVYLCvpj8Y0M4a4K076QB7E2JupqOR/qU= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.6.8/go.mod h1:EHONVDSum2xxG2p+myyVda/FwwvGbY58ZYC4XqI/lDQ= -cloud.google.com/go/eventarc v1.13.6/go.mod h1:QReOaYnDNdjwAQQWNC7nfr63WnaKFUw7MSdQ9PXJYj0= -cloud.google.com/go/filestore v1.8.3/go.mod h1:QTpkYpKBF6jlPRmJwhLqXfJQjVrQisplyb4e2CwfJWc= -cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= -cloud.google.com/go/functions v1.16.2/go.mod h1:+gMvV5E3nMb9EPqX6XwRb646jTyVz8q4yk3DD6xxHpg= -cloud.google.com/go/gkebackup v1.5.0/go.mod h1:eLaf/+n8jEmIvOvDriGjo99SN7wRvVadoqzbZu0WzEw= -cloud.google.com/go/gkeconnect v0.8.7/go.mod h1:iUH1jgQpTyNFMK5LgXEq2o0beIJ2p7KKUUFerkf/eGc= -cloud.google.com/go/gkehub v0.14.7/go.mod h1:NLORJVTQeCdxyAjDgUwUp0A6BLEaNLq84mCiulsM4OE= -cloud.google.com/go/gkemulticloud v1.2.0/go.mod h1:iN5wBxTLPR6VTBWpkUsOP2zuPOLqZ/KbgG1bZir1Cng= -cloud.google.com/go/gsuiteaddons v1.6.7/go.mod h1:u+sGBvr07OKNnOnQiB/Co1q4U2cjo50ERQwvnlcpNis= cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= -cloud.google.com/go/iap v1.9.6/go.mod h1:YiK+tbhDszhaVifvzt2zTEF2ch9duHtp6xzxj9a0sQk= -cloud.google.com/go/ids v1.4.7/go.mod h1:yUkDC71u73lJoTaoONy0dsA0T7foekvg6ZRg9IJL0AA= -cloud.google.com/go/iot v1.7.7/go.mod h1:tr0bCOSPXtsg64TwwZ/1x+ReTWKlQRVXbM+DnrE54yM= -cloud.google.com/go/kms v1.18.0/go.mod h1:DyRBeWD/pYBMeyiaXFa/DGNyxMDL3TslIKb8o/JkLkw= -cloud.google.com/go/language v1.12.5/go.mod h1:w/6a7+Rhg6Bc2Uzw6thRdKKNjnOzfKTJuxzD0JZZ0nM= -cloud.google.com/go/lifesciences v0.9.7/go.mod h1:FQ713PhjAOHqUVnuwsCe1KPi9oAdaTfh58h1xPiW13g= -cloud.google.com/go/logging v1.10.0/go.mod h1:EHOwcxlltJrYGqMGfghSet736KR3hX1MAj614mrMk9I= cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= -cloud.google.com/go/managedidentities v1.6.7/go.mod h1:UzslJgHnc6luoyx2JV19cTCi2Fni/7UtlcLeSYRzTV8= -cloud.google.com/go/maps v1.11.1/go.mod h1:XcSsd8lg4ZhLPCtJ2YHcu/xLVePBzZOlI7GmR2cRCws= -cloud.google.com/go/mediatranslation v0.8.7/go.mod h1:6eJbPj1QJwiCP8R4K413qMx6ZHZJUi9QFpApqY88xWU= -cloud.google.com/go/memcache v1.10.7/go.mod h1:SrU6+QBhvXJV0TA59+B3oCHtLkPx37eqdKmRUlmSE1k= -cloud.google.com/go/metastore v1.13.6/go.mod h1:OBCVMCP7X9vA4KKD+5J4Q3d+tiyKxalQZnksQMq5MKY= -cloud.google.com/go/monitoring v1.19.0/go.mod h1:25IeMR5cQ5BoZ8j1eogHE5VPJLlReQ7zFp5OiLgiGZw= -cloud.google.com/go/networkconnectivity v1.14.6/go.mod h1:/azB7+oCSmyBs74Z26EogZ2N3UcXxdCHkCPcz8G32bU= -cloud.google.com/go/networkmanagement v1.13.2/go.mod h1:24VrV/5HFIOXMEtVQEUoB4m/w8UWvUPAYjfnYZcBc4c= -cloud.google.com/go/networksecurity v0.9.7/go.mod h1:aB6UiPnh/l32+TRvgTeOxVRVAHAFFqvK+ll3idU5BoY= -cloud.google.com/go/notebooks v1.11.5/go.mod h1:pz6P8l2TvhWqAW3sysIsS0g2IUJKOzEklsjWJfi8sd4= -cloud.google.com/go/optimization v1.6.5/go.mod h1:eiJjNge1NqqLYyY75AtIGeQWKO0cvzD1ct/moCFaP2Q= -cloud.google.com/go/orchestration v1.9.2/go.mod h1:8bGNigqCQb/O1kK7PeStSNlyi58rQvZqDiuXT9KAcbg= -cloud.google.com/go/orgpolicy v1.12.3/go.mod h1:6BOgIgFjWfJzTsVcib/4QNHOAeOjCdaBj69aJVs//MA= -cloud.google.com/go/osconfig v1.12.7/go.mod h1:ID7Lbqr0fiihKMwAOoPomWRqsZYKWxfiuafNZ9j1Y1M= -cloud.google.com/go/oslogin v1.13.3/go.mod h1:WW7Rs1OJQ1iSUckZDilvNBSNPE8on740zF+4ZDR4o8U= -cloud.google.com/go/phishingprotection v0.8.7/go.mod h1:FtYaOyGc/HQQU7wY4sfwYZBFDKAL+YtVBjUj8E3A3/I= -cloud.google.com/go/policytroubleshooter v1.10.5/go.mod h1:bpOf94YxjWUqsVKokzPBibMSAx937Jp2UNGVoMAtGYI= -cloud.google.com/go/privatecatalog v0.9.7/go.mod h1:NWLa8MCL6NkRSt8jhL8Goy2A/oHkvkeAxiA0gv0rIXI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.39.0/go.mod h1:FrEnrSGU6L0Kh3iBaAbIUM8KMR7LqyEkMboVxGXCT+s= -cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI= -cloud.google.com/go/recaptchaenterprise/v2 v2.13.0/go.mod h1:jNYyn2ScR4DTg+VNhjhv/vJQdaU8qz+NpmpIzEE7HFQ= -cloud.google.com/go/recommendationengine v0.8.7/go.mod h1:YsUIbweUcpm46OzpVEsV5/z+kjuV6GzMxl7OAKIGgKE= -cloud.google.com/go/recommender v1.12.3/go.mod h1:OgN0MjV7/6FZUUPgF2QPQtYErtZdZc4u+5onvurcGEI= -cloud.google.com/go/redis v1.16.0/go.mod h1:NLzG3Ur8ykVIZk+i5ienRnycsvWzQ0uCLcil6Htc544= -cloud.google.com/go/resourcemanager v1.9.7/go.mod h1:cQH6lJwESufxEu6KepsoNAsjrUtYYNXRwxm4QFE5g8A= -cloud.google.com/go/resourcesettings v1.7.0/go.mod h1:pFzZYOQMyf1hco9pbNWGEms6N/2E7nwh0oVU1Tz+4qA= -cloud.google.com/go/retail v1.17.0/go.mod h1:GZ7+J084vyvCxO1sjdBft0DPZTCA/lMJ46JKWxWeb6w= -cloud.google.com/go/run v1.3.7/go.mod h1:iEUflDx4Js+wK0NzF5o7hE9Dj7QqJKnRj0/b6rhVq20= -cloud.google.com/go/scheduler v1.10.8/go.mod h1:0YXHjROF1f5qTMvGTm4o7GH1PGAcmu/H/7J7cHOiHl0= -cloud.google.com/go/secretmanager v1.13.1/go.mod h1:y9Ioh7EHp1aqEKGYXk3BOC+vkhlHm9ujL7bURT4oI/4= -cloud.google.com/go/security v1.17.0/go.mod h1:eSuFs0SlBv1gWg7gHIoF0hYOvcSwJCek/GFXtgO6aA0= -cloud.google.com/go/securitycenter v1.30.0/go.mod h1:/tmosjS/dfTnzJxOzZhTXdX3MXWsCmPWfcYOgkJmaJk= -cloud.google.com/go/servicedirectory v1.11.7/go.mod h1:fiO/tM0jBpVhpCAe7Yp5HmEsmxSUcOoc4vPrO02v68I= -cloud.google.com/go/shell v1.7.7/go.mod h1:7OYaMm3TFMSZBh8+QYw6Qef+fdklp7CjjpxYAoJpZbQ= -cloud.google.com/go/spanner v1.63.0/go.mod h1:iqDx7urZpgD7RekZ+CFvBRH6kVTW1ZSEb2HMDKOp5Cc= -cloud.google.com/go/speech v1.23.1/go.mod h1:UNgzNxhNBuo/OxpF1rMhA/U2rdai7ILL6PBXFs70wq0= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.42.0 h1:4QtGpplCVt1wz6g5o1ifXd656P5z+yNgzdw1tVfp0cU= cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ= -cloud.google.com/go/storagetransfer v1.10.6/go.mod h1:3sAgY1bx1TpIzfSzdvNGHrGYldeCTyGI/Rzk6Lc6A7w= -cloud.google.com/go/talent v1.6.8/go.mod h1:kqPAJvhxmhoUTuqxjjk2KqA8zUEeTDmH+qKztVubGlQ= -cloud.google.com/go/texttospeech v1.7.7/go.mod h1:XO4Wr2VzWHjzQpMe3gS58Oj68nmtXMyuuH+4t0wy9eA= -cloud.google.com/go/tpu v1.6.7/go.mod h1:o8qxg7/Jgt7TCgZc3jNkd4kTsDwuYD3c4JTMqXZ36hU= -cloud.google.com/go/trace v1.10.7/go.mod h1:qk3eiKmZX0ar2dzIJN/3QhY2PIFh1eqcIdaN5uEjQPM= -cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs= -cloud.google.com/go/video v1.21.0/go.mod h1:Kqh97xHXZ/bIClgDHf5zkKvU3cvYnLyRefmC8yCBqKI= -cloud.google.com/go/videointelligence v1.11.7/go.mod h1:iMCXbfjurmBVgKuyLedTzv90kcnppOJ6ttb0+rLDID0= -cloud.google.com/go/vision/v2 v2.8.2/go.mod h1:BHZA1LC7dcHjSr9U9OVhxMtLKd5l2jKPzLRALEJvuaw= -cloud.google.com/go/vmmigration v1.7.7/go.mod h1:qYIK5caZY3IDMXQK+A09dy81QU8qBW0/JDTc39OaKRw= -cloud.google.com/go/vmwareengine v1.1.3/go.mod h1:UoyF6LTdrIJRvDN8uUB8d0yimP5A5Ehkr1SRzL1APZw= -cloud.google.com/go/vpcaccess v1.7.7/go.mod h1:EzfSlgkoAnFWEMznZW0dVNvdjFjEW97vFlKk4VNBhwY= -cloud.google.com/go/webrisk v1.9.7/go.mod h1:7FkQtqcKLeNwXCdhthdXHIQNcFWPF/OubrlyRcLHNuQ= -cloud.google.com/go/websecurityscanner v1.6.7/go.mod h1:EpiW84G5KXxsjtFKK7fSMQNt8JcuLA8tQp7j0cyV458= -cloud.google.com/go/workflows v1.12.6/go.mod h1:oDbEHKa4otYg4abwdw2Z094jB0TLLiFGAPA78EDAKag= -dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= @@ -170,14 +55,9 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/ch-go v0.61.5 h1:zwR8QbYI0tsMiEcze/uIMK+Tz1D3XZXLdNrlaOpeEI4= github.com/ClickHouse/ch-go v0.61.5/go.mod h1:s1LJW/F/LcFs5HJnuogFMta50kKDO0lf9zzfrbl0RQg= -github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= github.com/ClickHouse/clickhouse-go/v2 v2.23.2 h1:+DAKPMnxLS7pduQZsrJc8OhdLS2L9MfDEJ2TS+hpYDM= github.com/ClickHouse/clickhouse-go/v2 v2.23.2/go.mod h1:aNap51J1OM3yxQJRgM+AlP/MPkGBCL8A74uQThoQhR0= -github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/Microsoft/hcsshim v0.11.4/go.mod h1:smjE4dvqPX9Zldna+t5FG3rnoHhaB7QYxPRqGcpAD9w= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= -github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= github.com/antchfx/xmlquery v1.4.1 h1:YgpSwbeWvLp557YFTi8E3z6t6/hYjmFEtiEKbDfEbl0= @@ -237,9 +117,7 @@ github.com/bodgit/sevenzip v1.3.0 h1:1ljgELgtHqvgIp8W8kgeEGHIWP4ch3xGI8uOBZgLVKY github.com/bodgit/sevenzip v1.3.0/go.mod h1:omwNcgZTEooWM8gA/IJ2Nk/+ZQ94+GsytRzOJJ8FBlM= github.com/bodgit/windows v1.0.0 h1:rLQ/XjsleZvx4fR1tB/UxQrK+SJ2OFHzfPjLWWOhDIA= github.com/bodgit/windows v1.0.0/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -248,42 +126,30 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/connesc/cipherio v0.2.1 h1:FGtpTPMbKNNWByNrr9aEBtaJtXjqOzkIXNYJp6OEycw= github.com/connesc/cipherio v0.2.1/go.mod h1:ukY0MWJDFnJEbXMQtOcn2VmTpRfzcTz4OoVrWGGJZcA= -github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/djherbis/buffer v1.1.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o= github.com/djherbis/buffer v1.2.0 h1:PH5Dd2ss0C7CRRhQCZ2u7MssF+No9ide8Ye71nPHcrQ= github.com/djherbis/buffer v1.2.0/go.mod h1:fjnebbZjCUpPinBRD+TDwXSOeNQ7fPQWLfGQqiAiUyE= github.com/djherbis/nio/v3 v3.0.1 h1:6wxhnuppteMa6RHA4L81Dq7ThkZH8SwnDzXDYy95vB4= github.com/djherbis/nio/v3 v3.0.1/go.mod h1:Ng4h80pbZFMla1yKzm61cF0tqqilXZYrogmWgZxOcmg= -github.com/dmarkham/enumer v1.5.9/go.mod h1:e4VILe2b1nYK3JKJpRmNdl5xbDQvELc6tQ8b+GsGk6E= -github.com/docker/docker v26.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q= github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -297,24 +163,20 @@ github.com/go-faster/errors v0.7.1 h1:MkJTnDoEdi9pDabt1dpWf7AA8/BaSYZqibYyhZ20AY github.com/go-faster/errors v0.7.1/go.mod h1:5ySTjWFiphBs07IKuiL69nxdfd5+fzh1u7FPGZP2quo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= -github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -355,7 +217,6 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= @@ -383,18 +244,15 @@ github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBY github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafov/m3u8 v0.12.0/go.mod h1:nqzOkfBiZJENr52zTVd/Dcl03yzphIMbJqkXGu+u080= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jlaffaye/ftp v0.2.0 h1:lXNvW7cBu7R/68bknOX3MrRIIqZ61zELs1P2RAiA3lg= github.com/jlaffaye/ftp v0.2.0/go.mod h1:is2Ds5qkhceAPy2xD6RLI6hmp/qysSoymZ+Z2uTnspI= @@ -406,11 +264,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/jolestar/go-commons-pool/v2 v2.1.2 h1:E+XGo58F23t7HtZiC/W6jzO2Ux2IccSH/yx4nD+J1CM= github.com/jolestar/go-commons-pool/v2 v2.1.2/go.mod h1:r4NYccrkS5UqP1YQI1COyTZ9UjPJAAGTUxzcsK1kqhY= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -433,14 +288,17 @@ github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3x github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -448,30 +306,17 @@ github.com/mholt/archiver/v4 v4.0.0-alpha.8 h1:tRGQuDVPh66WCOelqe6LIGh0gwmfwxUrS github.com/mholt/archiver/v4 v4.0.0-alpha.8/go.mod h1:5f7FUYGXdJWUjESffJaYR4R60VhnHxb2X3T1teMyv5A= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= -github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk= github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= -github.com/pascaldekloe/name v1.0.1/go.mod h1:Z//MfYJnH4jVpQ9wkclwu2I2MkHmXTlT9wR5UZScttM= github.com/paulmach/orb v0.11.1 h1:3koVegMC4X/WeiXYz9iswopaTwMem53NzTJuTF20JzU= github.com/paulmach/orb v0.11.1/go.mod h1:5mULz1xQfs3bmQm63QEJA6lNGujuRafwA5S/EnuLaLU= github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY= @@ -484,7 +329,6 @@ github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -502,18 +346,17 @@ github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= @@ -535,7 +378,6 @@ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.563/go.mod github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.563/go.mod h1:uom4Nvi9W+Qkom0exYiJ9VWJjXwyxtPYTkKkaLMlfE0= github.com/tencentyun/cos-go-sdk-v5 v0.7.52 h1:gCCQBWGNs8Bgx0VSsld5vjgi2U+MiMwjbzy7sDMyKiA= github.com/tencentyun/cos-go-sdk-v5 v0.7.52/go.mod h1:UN+VdbCl1hg+kKi5RXqZgaP+Boqfmk+D04GRc4XFk70= -github.com/testcontainers/testcontainers-go v0.28.0/go.mod h1:COlDpUXbwW3owtpMkEB1zo9gwb1CoKVKlyrVPejF4AU= github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw= github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -546,8 +388,6 @@ github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -556,7 +396,6 @@ github.com/urfave/cli v1.22.15/go.mod h1:wSan1hmo5zeyLGBjRJbzRTNk8gwoYa2B9n4q9dm github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= -github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xyproto/gionice v1.3.0 h1:v0X22Iduy+lplGT735pKRc6c550vvXYXvgXy0ZkA+qM= github.com/xyproto/gionice v1.3.0/go.mod h1:bXnNfrv26yrsvmIXmTPztA+/MJ6wDjKv4l5can8P0cw= github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc= @@ -565,7 +404,6 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -585,9 +423,6 @@ go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucg go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= -go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -614,7 +449,6 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -707,9 +541,12 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= @@ -766,7 +603,6 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -788,7 +624,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -807,7 +642,6 @@ google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/H google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4 h1:MuYw1wJzT+ZkybKfaOXKp5hJiZDn2iHaXRw0mRYdHSc= google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:/oe3+SiHAwz6s+M25PyTygWm3lnrhmGqIuIfkoUocqk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d h1:k3zyW3BYYR30e8v3x0bTDdE9vpYFjZHK+HcyqkrppWk= google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= diff --git a/pkg/backup/backup_shard.go b/pkg/backup/backup_shard.go index 2779d3d3..9aca4fb4 100644 --- a/pkg/backup/backup_shard.go +++ b/pkg/backup/backup_shard.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "github.com/rs/zerolog/log" "hash/fnv" ) @@ -95,7 +96,7 @@ func fnvShardReplicaFromString(str string, activeReplicas []string) (string, err h := fnv.New32a() if _, err := h.Write([]byte(str)); err != nil { - return "", fmt.Errorf("can't write %s to fnv.New32a", str) + log.Fatal().Stack().Msgf("can't write %s to fnv.New32a", str) } i := h.Sum32() % uint32(len(activeReplicas)) return activeReplicas[i], nil diff --git a/pkg/backup/backuper.go b/pkg/backup/backuper.go index 964e3d10..0160d49f 100644 --- a/pkg/backup/backuper.go +++ b/pkg/backup/backuper.go @@ -14,8 +14,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/resumable" "github.com/Altinity/clickhouse-backup/v2/pkg/storage" - - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" ) const DirectoryFormat = "directory" @@ -35,7 +34,6 @@ type Backuper struct { vers versioner bs backupSharder dst *storage.BackupDestination - log *apexLog.Entry DiskToPathMap map[string]string DefaultDataPath string EmbeddedBackupDataPath string @@ -47,14 +45,12 @@ type Backuper struct { func NewBackuper(cfg *config.Config, opts ...BackuperOpt) *Backuper { ch := &clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Log: apexLog.WithField("logger", "clickhouse"), } b := &Backuper{ cfg: cfg, ch: ch, vers: ch, bs: nil, - log: apexLog.WithField("logger", "backuper"), } for _, opt := range opts { opt(b) @@ -186,13 +182,13 @@ func (b *Backuper) getEmbeddedBackupDefaultSettings(version int) []string { if (b.cfg.General.RemoteStorage == "s3" || b.cfg.General.RemoteStorage == "gcs") && version >= 23007000 { settings = append(settings, "allow_s3_native_copy=1") if err := b.ch.Query("SET s3_request_timeout_ms=600000"); err != nil { - b.log.Fatalf("SET s3_request_timeout_ms=600000 error: %v", err) + log.Fatal().Msgf("SET s3_request_timeout_ms=600000 error: %v", err) } } if (b.cfg.General.RemoteStorage == "s3" || b.cfg.General.RemoteStorage == "gcs") && version >= 23011000 { if err := b.ch.Query("SET s3_use_adaptive_timeouts=0"); err != nil { - b.log.Fatalf("SET s3_use_adaptive_timeouts=0 error: %v", err) + log.Fatal().Msgf("SET s3_use_adaptive_timeouts=0 error: %v", err) } } if b.cfg.General.RemoteStorage == "azblob" && version >= 24005000 { diff --git a/pkg/backup/create.go b/pkg/backup/create.go index 20ea2f1d..2b9a3819 100644 --- a/pkg/backup/create.go +++ b/pkg/backup/create.go @@ -14,7 +14,6 @@ import ( "sync/atomic" "time" - apexLog "github.com/apex/log" "github.com/google/uuid" recursiveCopy "github.com/otiai10/copy" "golang.org/x/sync/errgroup" @@ -30,6 +29,8 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/storage" "github.com/Altinity/clickhouse-backup/v2/pkg/storage/object_disk" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" + "github.com/rs/zerolog/log" + ) const ( @@ -68,10 +69,6 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, backupName = NewBackupName() } backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") - log := b.log.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "create", - }) if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -129,24 +126,23 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, } partitionsIdMap, partitionsNameList := partition.ConvertPartitionsToIdsMapAndNamesList(ctx, b.ch, tables, nil, partitions) doBackupData := !schemaOnly && !rbacOnly && !configsOnly - backupRBACSize, backupConfigSize, rbacAndConfigsErr := b.createRBACAndConfigsIfNecessary(ctx, backupName, createRBAC, rbacOnly, createConfigs, configsOnly, disks, diskMap, log) + backupRBACSize, backupConfigSize, rbacAndConfigsErr := b.createRBACAndConfigsIfNecessary(ctx, backupName, createRBAC, rbacOnly, createConfigs, configsOnly, disks, diskMap) if rbacAndConfigsErr != nil { return rbacAndConfigsErr } if b.cfg.ClickHouse.UseEmbeddedBackupRestore { - err = b.createBackupEmbedded(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, backupVersion, tablePattern, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, log, startBackup, version) + err = b.createBackupEmbedded(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, backupVersion, tablePattern, partitionsNameList, partitionsIdMap, tables, allDatabases, allFunctions, disks, diskMap, diskTypes, backupRBACSize, backupConfigSize, startBackup, version) } else { - err = b.createBackupLocal(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, rbacOnly, configsOnly, backupVersion, partitionsIdMap, tables, tablePattern, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, log, startBackup, version) + err = b.createBackupLocal(ctx, backupName, diffFromRemote, doBackupData, schemaOnly, rbacOnly, configsOnly, backupVersion, partitionsIdMap, tables, tablePattern, disks, diskMap, diskTypes, allDatabases, allFunctions, backupRBACSize, backupConfigSize, startBackup, version) } if err != nil { // delete local backup if can't create if removeBackupErr := b.RemoveBackupLocal(ctx, backupName, disks); removeBackupErr != nil { - log.Errorf("creating failed -> b.RemoveBackupLocal error: %v", removeBackupErr) + log.Error().Msgf("creating failed -> b.RemoveBackupLocal error: %v", removeBackupErr) } // fix corner cases after https://github.com/Altinity/clickhouse-backup/issues/379 if cleanShadowErr := b.Clean(ctx); cleanShadowErr != nil { - log.Errorf("creating failed -> b.Clean error: %v", cleanShadowErr) - log.Error(cleanShadowErr.Error()) + log.Error().Msgf("creating failed -> b.Clean error: %v", cleanShadowErr) } return err } @@ -158,7 +154,7 @@ func (b *Backuper) CreateBackup(backupName, diffFromRemote, tablePattern string, return nil } -func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupName string, createRBAC bool, rbacOnly bool, createConfigs bool, configsOnly bool, disks []clickhouse.Disk, diskMap map[string]string, log *apexLog.Entry) (uint64, uint64, error) { +func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupName string, createRBAC bool, rbacOnly bool, createConfigs bool, configsOnly bool, disks []clickhouse.Disk, diskMap map[string]string) (uint64, uint64, error) { backupRBACSize, backupConfigSize := uint64(0), uint64(0) backupPath := path.Join(b.DefaultDataPath, "backup") if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { @@ -168,17 +164,17 @@ func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupNa if createRBAC || rbacOnly { var createRBACErr error if backupRBACSize, createRBACErr = b.createBackupRBAC(ctx, backupPath, disks); createRBACErr != nil { - log.Fatalf("error during do RBAC backup: %v", createRBACErr) + log.Fatal().Msgf("error during do RBAC backup: %v", createRBACErr) } else { - log.WithField("size", utils.FormatBytes(backupRBACSize)).Info("done createBackupRBAC") + log.Info().Str("size", utils.FormatBytes(backupRBACSize)).Msg("done createBackupRBAC") } } if createConfigs || configsOnly { var createConfigsErr error if backupConfigSize, createConfigsErr = b.createBackupConfigs(ctx, backupPath); createConfigsErr != nil { - log.Fatalf("error during do CONFIG backup: %v", createConfigsErr) + log.Fatal().Msgf("error during do CONFIG backup: %v", createConfigsErr) } else { - log.WithField("size", utils.FormatBytes(backupConfigSize)).Info("done createBackupConfigs") + log.Info().Str("size", utils.FormatBytes(backupConfigSize)).Msg("done createBackupConfigs") } } if backupRBACSize > 0 || backupConfigSize > 0 { @@ -189,7 +185,7 @@ func (b *Backuper) createRBACAndConfigsIfNecessary(ctx context.Context, backupNa return backupRBACSize, backupConfigSize, nil } -func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRemote string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, tablePattern string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time, version int) error { +func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRemote string, doBackupData, schemaOnly, rbacOnly, configsOnly bool, backupVersion string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, tablePattern string, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, backupRBACSize, backupConfigSize uint64, startBackup time.Time, version int) error { // Create backup dir on all clickhouse disks for _, disk := range disks { if err := filesystemhelper.Mkdir(path.Join(disk.Path, "backup"), b.ch, disks); err != nil { @@ -202,7 +198,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe } if _, err := os.Stat(backupPath); os.IsNotExist(err) { if err = filesystemhelper.Mkdir(backupPath, b.ch, disks); err != nil { - log.Errorf("can't create directory %s: %v", backupPath, err) + log.Error().Msgf("can't create directory %s: %v", backupPath, err) return err } } @@ -239,7 +235,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe } defer func() { if closeErr := b.dst.Close(ctx); closeErr != nil { - log.Warnf("can't close connection to %s: %v", b.dst.Kind(), closeErr) + log.Warn().Msgf("can't close connection to %s: %v", b.dst.Kind(), closeErr) } }() } @@ -266,16 +262,16 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe } idx := tableIdx createBackupWorkingGroup.Go(func() error { - log := log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Name)) + logger := log.With().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Logger() var realSize, objectDiskSize map[string]int64 var disksToPartsMap map[string][]metadata.Part if doBackupData && table.BackupType == clickhouse.ShardBackupFull { - log.Debug("create data") + logger.Debug().Msg("create data") shadowBackupUUID := strings.ReplaceAll(uuid.New().String(), "-", "") var addTableToBackupErr error disksToPartsMap, realSize, objectDiskSize, addTableToBackupErr = b.AddTableToLocalBackup(createCtx, backupName, tablesDiffFromRemote, shadowBackupUUID, disks, &table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}], version) if addTableToBackupErr != nil { - log.Errorf("b.AddTableToLocalBackup error: %v", addTableToBackupErr) + logger.Error().Msgf("b.AddTableToLocalBackup error: %v", addTableToBackupErr) return addTableToBackupErr } // more precise data size calculation @@ -287,17 +283,17 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe } } // https://github.com/Altinity/clickhouse-backup/issues/529 - log.Debug("get in progress mutations list") + logger.Debug().Msg("get in progress mutations list") inProgressMutations := make([]metadata.MutationMetadata, 0) if b.cfg.ClickHouse.BackupMutations && !schemaOnly && !rbacOnly && !configsOnly { var inProgressMutationsErr error inProgressMutations, inProgressMutationsErr = b.ch.GetInProgressMutations(createCtx, table.Database, table.Name) if inProgressMutationsErr != nil { - log.Errorf("b.ch.GetInProgressMutations error: %v", inProgressMutationsErr) + logger.Error().Msgf("b.ch.GetInProgressMutations error: %v", inProgressMutationsErr) return inProgressMutationsErr } } - log.Debug("create metadata") + logger.Debug().Msg("create metadata") if schemaOnly || doBackupData { metadataSize, createTableMetadataErr := b.createTableMetadata(path.Join(backupPath, "metadata"), metadata.TableMetadata{ Table: table.Name, @@ -310,7 +306,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe MetadataOnly: schemaOnly || table.BackupType == clickhouse.ShardBackupSchema, }, disks) if createTableMetadataErr != nil { - log.Errorf("b.createTableMetadata error: %v", createTableMetadataErr) + logger.Error().Msgf("b.createTableMetadata error: %v", createTableMetadataErr) return createTableMetadataErr } atomic.AddUint64(&backupMetadataSize, metadataSize) @@ -321,7 +317,7 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe }) metaMutex.Unlock() } - log.WithField("progress", fmt.Sprintf("%d/%d", idx+1, len(tables))).Infof("done") + logger.Info().Str("progress", fmt.Sprintf("%d/%d", idx+1, len(tables))).Msg("done") return nil }) } @@ -330,14 +326,14 @@ func (b *Backuper) createBackupLocal(ctx context.Context, backupName, diffFromRe } backupMetaFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, diffFromRemote, backupVersion, "regular", diskMap, diskTypes, disks, backupDataSize, backupObjectDiskSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions, log); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, diffFromRemote, backupVersion, "regular", diskMap, diskTypes, disks, backupDataSize, backupObjectDiskSize, backupMetadataSize, backupRBACSize, backupConfigSize, tableMetas, allDatabases, allFunctions); err != nil { return fmt.Errorf("createBackupMetadata return error: %v", err) } - log.WithField("version", backupVersion).WithField("duration", utils.HumanizeDuration(time.Since(startBackup))).Info("done") + log.Info().Str("version", backupVersion).Str("duration", utils.HumanizeDuration(time.Since(startBackup))).Msg("done") return nil } -func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBackup string, doBackupData, schemaOnly bool, backupVersion, tablePattern string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, backupRBACSize, backupConfigSize uint64, log *apexLog.Entry, startBackup time.Time, version int) error { +func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBackup string, doBackupData, schemaOnly bool, backupVersion, tablePattern string, partitionsNameList map[metadata.TableTitle][]string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, tables []clickhouse.Table, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, disks []clickhouse.Disk, diskMap, diskTypes map[string]string, backupRBACSize, backupConfigSize uint64, startBackup time.Time, version int) error { // TODO: Implement sharded backup operations for embedded backups if doesShard(b.cfg.General.ShardedOperationMode) { return fmt.Errorf("cannot perform embedded backup: %w", errShardOperationUnsupported) @@ -421,7 +417,7 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBac } defer func() { if closeErr := b.dst.Close(ctx); closeErr != nil { - log.Warnf("createBackupEmbedded: can't close connection to %s: %v", b.dst.Kind(), closeErr) + log.Warn().Msgf("createBackupEmbedded: can't close connection to %s: %v", b.dst.Kind(), closeErr) } }() } @@ -437,11 +433,11 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBac var disksToPartsMap map[string][]metadata.Part if doBackupData { if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { - log.Debugf("calculate parts list `%s`.`%s` from embedded backup disk `%s`", table.Database, table.Name, b.cfg.ClickHouse.EmbeddedBackupDisk) + log.Debug().Msgf("calculate parts list `%s`.`%s` from embedded backup disk `%s`", table.Database, table.Name, b.cfg.ClickHouse.EmbeddedBackupDisk) disksToPartsMap, err = b.getPartsFromLocalEmbeddedBackupDisk(backupPath, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) } else { - log.Debugf("calculate parts list `%s`.`%s` from embedded backup remote destination", table.Database, table.Name) - disksToPartsMap, err = b.getPartsFromRemoteEmbeddedBackup(ctx, backupName, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}], log) + log.Debug().Msgf("calculate parts list `%s`.`%s` from embedded backup remote destination", table.Database, table.Name) + disksToPartsMap, err = b.getPartsFromRemoteEmbeddedBackup(ctx, backupName, table, partitionsIdMap[metadata.TableTitle{Database: table.Database, Table: table.Name}]) } } if err != nil { @@ -466,15 +462,14 @@ func (b *Backuper) createBackupEmbedded(ctx context.Context, backupName, baseBac } } backupMetaFile := path.Join(backupPath, "metadata.json") - if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, baseBackup, backupVersion, "embedded", diskMap, diskTypes, disks, backupDataSize[0].Size, 0, backupMetadataSize, backupRBACSize, backupConfigSize, tablesTitle, allDatabases, allFunctions, log); err != nil { + if err := b.createBackupMetadata(ctx, backupMetaFile, backupName, baseBackup, backupVersion, "embedded", diskMap, diskTypes, disks, backupDataSize[0].Size, 0, backupMetadataSize, backupRBACSize, backupConfigSize, tablesTitle, allDatabases, allFunctions); err != nil { return err } - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "operation": "create_embedded", "duration": utils.HumanizeDuration(time.Since(startBackup)), - "version": backupVersion, - }).Info("done") + }).Msg("done") return nil } @@ -544,7 +539,7 @@ func (b *Backuper) generateEmbeddedBackupSQL(ctx context.Context, backupName str return backupSQL, tableSizeSQL, nil } -func (b *Backuper) getPartsFromRemoteEmbeddedBackup(ctx context.Context, backupName string, table clickhouse.Table, partitionsIdsMap common.EmptyMap, log *apexLog.Entry) (map[string][]metadata.Part, error) { +func (b *Backuper) getPartsFromRemoteEmbeddedBackup(ctx context.Context, backupName string, table clickhouse.Table, partitionsIdsMap common.EmptyMap) (map[string][]metadata.Part, error) { dirListStr := make([]string, 0) remoteEmbeddedBackupPath := "" if b.cfg.General.RemoteStorage == "s3" { @@ -563,7 +558,7 @@ func (b *Backuper) getPartsFromRemoteEmbeddedBackup(ctx context.Context, backupN }); walkErr != nil { return nil, walkErr } - log.Debugf("getPartsFromRemoteEmbeddedBackup from %s found %d parts", remoteEmbeddedBackupPath, len(dirListStr)) + log.Debug().Msgf("getPartsFromRemoteEmbeddedBackup from %s found %d parts", remoteEmbeddedBackupPath, len(dirListStr)) return b.fillEmbeddedPartsFromDirList(partitionsIdsMap, dirListStr, "default") } @@ -619,14 +614,13 @@ func (b *Backuper) fillEmbeddedPartsFromDirList(partitionsIdsMap common.EmptyMap } func (b *Backuper) createBackupConfigs(ctx context.Context, backupPath string) (uint64, error) { - log := b.log.WithField("logger", "createBackupConfigs") select { case <-ctx.Done(): return 0, ctx.Err() default: backupConfigSize := uint64(0) configBackupPath := path.Join(backupPath, "configs") - log.Debugf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) + log.Debug().Msgf("copy %s -> %s", b.cfg.ClickHouse.ConfigDir, configBackupPath) copyErr := recursiveCopy.Copy(b.cfg.ClickHouse.ConfigDir, configBackupPath, recursiveCopy.Options{ Skip: func(srcinfo os.FileInfo, src, dest string) (bool, error) { backupConfigSize += uint64(srcinfo.Size()) @@ -638,7 +632,6 @@ func (b *Backuper) createBackupConfigs(ctx context.Context, backupPath string) ( } func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disks []clickhouse.Disk) (uint64, error) { - log := b.log.WithField("logger", "createBackupRBAC") select { case <-ctx.Done(): return 0, ctx.Err() @@ -668,7 +661,7 @@ func (b *Backuper) createBackupRBAC(ctx context.Context, backupPath string, disk return rbacDataSize + replicatedRBACDataSize, err } if len(rbacSQLFiles) != 0 { - log.Debugf("copy %s -> %s", accessPath, rbacBackup) + log.Debug().Msgf("copy %s -> %s", accessPath, rbacBackup) copyErr := recursiveCopy.Copy(accessPath, rbacBackup, recursiveCopy.Options{ OnDirExists: func(src, dst string) recursiveCopy.DirExistsAction { return recursiveCopy.Replace @@ -696,7 +689,7 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st }, 0) rbacDataSize := uint64(0) if err = b.ch.SelectContext(ctx, &replicatedRBAC, "SELECT name FROM system.user_directories WHERE type='replicated'"); err == nil && len(replicatedRBAC) > 0 { - k := keeper.Keeper{Log: b.log.WithField("logger", "keeper")} + k := keeper.Keeper{} if err = k.Connect(ctx, b.ch); err != nil { return 0, err } @@ -711,14 +704,14 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st return 0, err } if rbacUUIDObjectsCount == 0 { - b.log.WithField("logger", "createBackupRBACReplicated").Warnf("%s/%s have no childs, skip Dump", replicatedAccessPath, "uuid") + log.Warn().Str("logger", "createBackupRBACReplicated").Msgf("%s/%s have no childs, skip Dump", replicatedAccessPath, "uuid") continue } if err = os.MkdirAll(rbacBackup, 0755); err != nil { return 0, err } dumpFile := path.Join(rbacBackup, userDirectory.Name+".jsonl") - b.log.WithField("logger", "createBackupRBACReplicated").Infof("keeper.Dump %s -> %s", replicatedAccessPath, dumpFile) + log.Info().Str("logger", "createBackupRBACReplicated").Msgf("keeper.Dump %s -> %s", replicatedAccessPath, dumpFile) dumpRBACSize, dumpErr := k.Dump(replicatedAccessPath, dumpFile) if dumpErr != nil { return 0, dumpErr @@ -730,18 +723,18 @@ func (b *Backuper) createBackupRBACReplicated(ctx context.Context, rbacBackup st } func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, tablesDiffFromRemote map[metadata.TableTitle]metadata.TableMetadata, shadowBackupUUID string, diskList []clickhouse.Disk, table *clickhouse.Table, partitionsIdsMap common.EmptyMap, version int) (map[string][]metadata.Part, map[string]int64, map[string]int64, error) { - log := b.log.WithFields(apexLog.Fields{ + logger := log.With().Fields(map[string]interface{}{ "backup": backupName, "operation": "create", "table": fmt.Sprintf("%s.%s", table.Database, table.Name), - }) + }).Logger() if backupName == "" { return nil, nil, nil, fmt.Errorf("backupName is not defined") } if !strings.HasSuffix(table.Engine, "MergeTree") && table.Engine != "MaterializedMySQL" && table.Engine != "MaterializedPostgreSQL" { if table.Engine != "MaterializedView" { - log.WithField("engine", table.Engine).Warnf("supports only schema backup") + logger.Warn().Str("engine", table.Engine).Msg("supports only schema backup") } return nil, nil, nil, nil } @@ -754,7 +747,7 @@ func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, if err := b.ch.FreezeTable(ctx, table, shadowBackupUUID); err != nil { return nil, nil, nil, err } - log.Debug("frozen") + log.Debug().Msg("frozen") realSize := map[string]int64{} objectDiskSize := map[string]int64{} disksToPartsMap := map[string][]metadata.Part{} @@ -782,7 +775,7 @@ func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, realSize[disk.Name] = size disksToPartsMap[disk.Name] = parts - log.WithField("disk", disk.Name).Debug("shadow moved") + logger.Debug().Str("disk", disk.Name).Msg("shadow moved") if len(parts) > 0 && (b.isDiskTypeObject(disk.Type) || b.isDiskTypeEncryptedObject(disk, diskList)) { start := time.Now() if size, err = b.uploadObjectDiskParts(ctx, backupName, tablesDiffFromRemote[metadata.TableTitle{Database: table.Database, Table: table.Name}], backupShadowPath, disk); err != nil { @@ -790,7 +783,7 @@ func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, } objectDiskSize[disk.Name] = size if size > 0 { - log.WithField("disk", disk.Name).WithField("duration", utils.HumanizeDuration(time.Since(start))).WithField("size", utils.FormatBytes(uint64(size))).Info("upload object_disk finish") + log.Info().Str("disk", disk.Name).Str("duration", utils.HumanizeDuration(time.Since(start))).Str("size", utils.FormatBytes(uint64(size))).Msg("upload object_disk finish") } } // Clean all the files under the shadowPath, cause UNFREEZE unavailable @@ -805,11 +798,11 @@ func (b *Backuper) AddTableToLocalBackup(ctx context.Context, backupName string, if version > 21004000 { if err := b.ch.QueryContext(ctx, fmt.Sprintf("ALTER TABLE `%s`.`%s` UNFREEZE WITH NAME '%s'", table.Database, table.Name, shadowBackupUUID)); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81") || strings.Contains(err.Error(), "code: 218")) && b.cfg.ClickHouse.IgnoreNotExistsErrorDuringFreeze { - b.ch.Log.Warnf("can't unfreeze table: %v", err) + logger.Warn().Msgf("can't unfreeze table: %v", err) } } } - log.Debug("done") + log.Debug().Msg("done") return disksToPartsMap, realSize, objectDiskSize, nil } @@ -845,7 +838,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string, partPaths := strings.SplitN(strings.TrimPrefix(fPath, backupShadowPath), "/", 2) for _, part := range tableDiffFromRemote.Parts[disk.Name] { if part.Name == partPaths[0] { - b.log.Debugf("%s exists in diff-from-remote backup", part.Name) + log.Debug().Msgf("%s exists in diff-from-remote backup", part.Name) return nil } } @@ -891,7 +884,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string, return size, nil } -func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, requiredBackup, version, tags string, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, backupDataSize, backupObjectDiskSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function, log *apexLog.Entry) error { +func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, backupName, requiredBackup, version, tags string, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, backupDataSize, backupObjectDiskSize, backupMetadataSize, backupRBACSize, backupConfigSize uint64, tableMetas []metadata.TableTitle, allDatabases []clickhouse.Database, allFunctions []clickhouse.Function) error { select { case <-ctx.Done(): return ctx.Err() @@ -928,9 +921,9 @@ func (b *Backuper) createBackupMetadata(ctx context.Context, backupMetaFile, bac return err } if err := filesystemhelper.Chown(backupMetaFile, b.ch, disks, false); err != nil { - log.Warnf("can't chown %s: %v", backupMetaFile, err) + log.Warn().Msgf("can't chown %s: %v", backupMetaFile, err) } - b.log.Debugf("%s created", backupMetaFile) + log.Debug().Msgf("%s created", backupMetaFile) return nil } } @@ -954,6 +947,6 @@ func (b *Backuper) createTableMetadata(metadataPath string, table metadata.Table if err := filesystemhelper.Chown(metadataFile, b.ch, disks, false); err != nil { return 0, err } - b.log.Debugf("%s created", metadataFile) + log.Debug().Msgf("%s created", metadataFile) return uint64(len(metadataBody)), nil } diff --git a/pkg/backup/delete.go b/pkg/backup/delete.go index 0d34a009..7f08f23d 100644 --- a/pkg/backup/delete.go +++ b/pkg/backup/delete.go @@ -17,13 +17,12 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/storage/object_disk" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - apexLog "github.com/apex/log" "github.com/pkg/errors" + "github.com/rs/zerolog/log" ) // Clean - removed all data in shadow folder func (b *Backuper) Clean(ctx context.Context) error { - log := b.log.WithField("logger", "Clean") if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) } @@ -41,7 +40,7 @@ func (b *Backuper) Clean(ctx context.Context) error { if err := b.cleanDir(shadowDir); err != nil { return fmt.Errorf("can't clean '%s': %v", shadowDir, err) } - log.Info(shadowDir) + log.Info().Msg(shadowDir) } return nil } @@ -108,7 +107,6 @@ func (b *Backuper) RemoveOldBackupsLocal(ctx context.Context, keepLastBackup boo } func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, disks []clickhouse.Disk) error { - log := b.log.WithField("logger", "RemoveBackupLocal") var err error start := time.Now() backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") @@ -142,12 +140,12 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis } defer func() { if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() b.dst = bd } - err = b.cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx, backupName, disks, backup, hasObjectDisks, log) + err = b.cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx, backupName, disks, backup, hasObjectDisks) if err != nil { return err } @@ -156,40 +154,40 @@ func (b *Backuper) RemoveBackupLocal(ctx context.Context, backupName string, dis if disk.IsBackup { backupPath = path.Join(disk.Path, backupName) } - log.Infof("remove '%s'", backupPath) + log.Info().Msgf("remove '%s'", backupPath) if err = os.RemoveAll(backupPath); err != nil { return err } } - log.WithField("operation", "delete"). - WithField("location", "local"). - WithField("backup", backupName). - WithField("duration", utils.HumanizeDuration(time.Since(start))). - Info("done") + log.Info().Str("operation", "delete"). + Str("location", "local"). + Str("backup", backupName). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Msg("done") return nil } } return fmt.Errorf("'%s' is not found on local storage", backupName) } -func (b *Backuper) cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx context.Context, backupName string, disks []clickhouse.Disk, backup LocalBackup, hasObjectDisks bool, log *apexLog.Entry) error { +func (b *Backuper) cleanEmbeddedAndObjectDiskLocalIfSameRemoteNotPresent(ctx context.Context, backupName string, disks []clickhouse.Disk, backup LocalBackup, hasObjectDisks bool) error { skip, err := b.skipIfTheSameRemoteBackupPresent(ctx, backup.BackupName, backup.Tags) - log.Debugf("b.skipIfTheSameRemoteBackupPresent return skip=%v", skip) + log.Debug().Msgf("b.skipIfTheSameRemoteBackupPresent return skip=%v", skip) if err != nil { return err } if !skip && (hasObjectDisks || (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk == "")) { startTime := time.Now() if deletedKeys, deleteErr := b.cleanBackupObjectDisks(ctx, backupName); deleteErr != nil { - log.Warnf("b.cleanBackupObjectDisks return error: %v", deleteErr) + log.Warn().Msgf("b.cleanBackupObjectDisks return error: %v", deleteErr) return err } else { - log.WithField("backup", backupName).WithField("duration", utils.HumanizeDuration(time.Since(startTime))).Infof("cleanBackupObjectDisks deleted %d keys", deletedKeys) + log.Info().Str("backup", backupName).Str("duration", utils.HumanizeDuration(time.Since(startTime))).Msgf("cleanBackupObjectDisks deleted %d keys", deletedKeys) } } if !skip && (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "") { if err = b.cleanLocalEmbedded(ctx, backup, disks); err != nil { - log.Warnf("b.cleanLocalEmbedded return error: %v", err) + log.Warn().Msgf("b.cleanLocalEmbedded return error: %v", err) return err } } @@ -203,7 +201,7 @@ func (b *Backuper) hasObjectDisksLocal(backupList []LocalBackup, backupName stri if !disk.IsBackup && (b.isDiskTypeObject(disk.Type) || b.isDiskTypeEncryptedObject(disk, disks)) { backupExists, err := os.ReadDir(path.Join(disk.Path, "backup", backup.BackupName)) if err == nil && len(backupExists) > 0 { - apexLog.Debugf("hasObjectDisksLocal: found object disk %s", disk.Name) + log.Debug().Msgf("hasObjectDisksLocal: found object disk %s", disk.Name) return true } } @@ -225,7 +223,7 @@ func (b *Backuper) cleanLocalEmbedded(ctx context.Context, backup LocalBackup, d return err } if !info.IsDir() && !strings.HasSuffix(filePath, ".json") && !strings.HasPrefix(filePath, path.Join(backupPath, "access")) { - apexLog.Debugf("object_disk.ReadMetadataFromFile(%s)", filePath) + log.Debug().Msgf("object_disk.ReadMetadataFromFile(%s)", filePath) meta, err := object_disk.ReadMetadataFromFile(filePath) if err != nil { return err @@ -264,12 +262,11 @@ func (b *Backuper) skipIfTheSameRemoteBackupPresent(ctx context.Context, backupN } func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) error { - log := b.log.WithField("logger", "RemoveBackupRemote") backupName = utils.CleanBackupNameRE.ReplaceAllString(backupName, "") start := time.Now() if b.cfg.General.RemoteStorage == "none" { err := errors.New("aborted: RemoteStorage set to \"none\"") - log.Error(err.Error()) + log.Error().Msg(err.Error()) return err } if b.cfg.General.RemoteStorage == "custom" { @@ -290,7 +287,7 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er } defer func() { if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -302,28 +299,28 @@ func (b *Backuper) RemoveBackupRemote(ctx context.Context, backupName string) er } for _, backup := range backupList { if backup.BackupName == backupName { - err = b.cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx, backup, log) + err = b.cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx, backup) if err != nil { return err } if err = bd.RemoveBackupRemote(ctx, backup); err != nil { - log.Warnf("bd.RemoveBackup return error: %v", err) + log.Warn().Msgf("bd.RemoveBackup return error: %v", err) return err } - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "backup": backupName, "location": "remote", "operation": "delete", "duration": utils.HumanizeDuration(time.Since(start)), - }).Info("done") + }).Msg("done") return nil } } return fmt.Errorf("'%s' is not found on remote storage", backupName) } -func (b *Backuper) cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx context.Context, backup storage.Backup, log *apexLog.Entry) error { +func (b *Backuper) cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx context.Context, backup storage.Backup) error { var skip bool var err error if skip, err = b.skipIfSameLocalBackupPresent(ctx, backup.BackupName, backup.Tags); err != nil { @@ -332,7 +329,7 @@ func (b *Backuper) cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx con if !skip { if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" { if err = b.cleanRemoteEmbedded(ctx, backup); err != nil { - log.Warnf("b.cleanRemoteEmbedded return error: %v", err) + log.Warn().Msgf("b.cleanRemoteEmbedded return error: %v", err) return err } return nil @@ -340,9 +337,9 @@ func (b *Backuper) cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx con if b.hasObjectDisksRemote(backup) || (b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk == "") { startTime := time.Now() if deletedKeys, deleteErr := b.cleanBackupObjectDisks(ctx, backup.BackupName); deleteErr != nil { - log.Warnf("b.cleanBackupObjectDisks return error: %v", deleteErr) + log.Warn().Msgf("b.cleanBackupObjectDisks return error: %v", deleteErr) } else { - log.WithField("backup", backup.BackupName).WithField("duration", utils.HumanizeDuration(time.Since(startTime))).Infof("cleanBackupObjectDisks deleted %d keys", deletedKeys) + log.Info().Str("backup", backup.BackupName).Str("duration", utils.HumanizeDuration(time.Since(startTime))).Msgf("cleanBackupObjectDisks deleted %d keys", deletedKeys) } return nil } @@ -369,7 +366,7 @@ func (b *Backuper) cleanRemoteEmbedded(ctx context.Context, backup storage.Backu if err != nil { return err } - apexLog.Debugf("object_disk.ReadMetadataFromReader(%s)", f.Name()) + log.Debug().Msgf("object_disk.ReadMetadataFromReader(%s)", f.Name()) meta, err := object_disk.ReadMetadataFromReader(r, f.Name()) if err != nil { return err @@ -449,7 +446,7 @@ func (b *Backuper) cleanPartialRequiredBackup(ctx context.Context, disks []click if err = b.RemoveBackupLocal(ctx, localBackup.BackupName, disks); err != nil { return fmt.Errorf("CleanPartialRequiredBackups %s -> RemoveBackupLocal cleaning error: %v", localBackup.BackupName, err) } else { - b.log.Infof("CleanPartialRequiredBackups %s deleted", localBackup.BackupName) + log.Info().Msgf("CleanPartialRequiredBackups %s deleted", localBackup.BackupName) } } } diff --git a/pkg/backup/download.go b/pkg/backup/download.go index ac563b0c..bdb60c13 100644 --- a/pkg/backup/download.go +++ b/pkg/backup/download.go @@ -13,6 +13,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/resumable" "github.com/Altinity/clickhouse-backup/v2/pkg/status" "github.com/eapache/go-resiliency/retrier" + "github.com/rs/zerolog" "io" "io/fs" "math/rand" @@ -32,7 +33,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/storage" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" ) var ( @@ -51,10 +52,6 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer b.ch.Close() - log := b.log.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "download", - }) if b.cfg.General.RemoteStorage == "none" { return fmt.Errorf("general->remote_storage shall not be \"none\" for download, change you config or use REMOTE_STORAGE environment variable") } @@ -90,7 +87,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if errors.Is(isResumeExists, os.ErrNotExist) { return ErrBackupIsAlreadyExists } - log.Warnf("%s already exists will try to resume download", backupName) + log.Warn().Msgf("%s already exists will try to resume download", backupName) } } } @@ -103,7 +100,7 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } defer func() { if err := b.dst.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -156,16 +153,16 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ }) } - log.Debugf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) + log.Debug().Str("backup", backupName).Msgf("prepare table METADATA concurrent semaphore with concurrency=%d len(tablesForDownload)=%d", b.cfg.General.DownloadConcurrency, len(tablesForDownload)) tableMetadataAfterDownload := make([]*metadata.TableMetadata, len(tablesForDownload)) metadataGroup, metadataCtx := errgroup.WithContext(ctx) metadataGroup.SetLimit(int(b.cfg.General.DownloadConcurrency)) for i, t := range tablesForDownload { - metadataLogger := log.WithField("table_metadata", fmt.Sprintf("%s.%s", t.Database, t.Table)) + metadataLogger := log.With().Str("table_metadata", fmt.Sprintf("%s.%s", t.Database, t.Table)).Logger() idx := i tableTitle := t metadataGroup.Go(func() error { - downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, metadataLogger, tableTitle, schemaOnly, partitions, b.resume) + downloadedMetadata, size, err := b.downloadTableMetadata(metadataCtx, backupName, disks, tableTitle, schemaOnly, partitions, b.resume, metadataLogger) if err != nil { return err } @@ -179,16 +176,16 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } // download, missed .inner. tables, https://github.com/Altinity/clickhouse-backup/issues/765 var missedInnerTableErr error - tableMetadataAfterDownload, tablesForDownload, metadataSize, missedInnerTableErr = b.downloadMissedInnerTablesMetadata(ctx, backupName, metadataSize, tablesForDownload, tableMetadataAfterDownload, disks, schemaOnly, partitions, log) + tableMetadataAfterDownload, tablesForDownload, metadataSize, missedInnerTableErr = b.downloadMissedInnerTablesMetadata(ctx, backupName, metadataSize, tablesForDownload, tableMetadataAfterDownload, disks, schemaOnly, partitions) if missedInnerTableErr != nil { return fmt.Errorf("b.downloadMissedInnerTablesMetadata error: %v", missedInnerTableErr) } if !schemaOnly { - if reBalanceErr := b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownload, disks, remoteBackup, log); reBalanceErr != nil { + if reBalanceErr := b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownload, disks, remoteBackup); reBalanceErr != nil { return reBalanceErr } - log.Debugf("prepare table SHADOW concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) + log.Debug().Str("backupName", backupName).Msgf("prepare table DATA concurrent semaphore with concurrency=%d len(tableMetadataAfterDownload)=%d", b.cfg.General.DownloadConcurrency, len(tableMetadataAfterDownload)) dataGroup, dataCtx := errgroup.WithContext(ctx) dataGroup.SetLimit(int(b.cfg.General.DownloadConcurrency)) @@ -203,14 +200,15 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ if err := b.downloadTableData(dataCtx, remoteBackup.BackupMetadata, *tableMetadataAfterDownload[idx]); err != nil { return err } - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ + "backup_name": backupName, "operation": "download_data", "table": fmt.Sprintf("%s.%s", tableMetadataAfterDownload[idx].Database, tableMetadataAfterDownload[idx].Table), "progress": fmt.Sprintf("%d/%d", idx+1, len(tableMetadataAfterDownload)), "duration": utils.HumanizeDuration(time.Since(start)), "size": utils.FormatBytes(tableMetadataAfterDownload[idx].TotalBytes), "version": backupVersion, - }).Info("done") + }).Msg("done") return nil }) } @@ -275,16 +273,18 @@ func (b *Backuper) Download(backupName string, tablePattern string, partitions [ } } - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "download", "duration": utils.HumanizeDuration(time.Since(startDownload)), "download_size": utils.FormatBytes(dataSize + metadataSize + rbacSize + configSize), "object_disk_size": utils.FormatBytes(backupMetadata.ObjectDiskSize), "version": backupVersion, - }).Info("done") + }).Msg("done") return nil } -func (b *Backuper) reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownload []*metadata.TableMetadata, disks []clickhouse.Disk, remoteBackup storage.Backup, log *apexLog.Entry) error { +func (b *Backuper) reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownload []*metadata.TableMetadata, disks []clickhouse.Disk, remoteBackup storage.Backup) error { var disksByStoragePolicyAndType map[string]map[string][]clickhouse.Disk filterDisksByTypeAndStoragePolicies := func(disk string, diskType string, disks []clickhouse.Disk, remoteBackup storage.Backup, t metadata.TableMetadata) (string, []clickhouse.Disk, error) { _, ok := remoteBackup.DiskTypes[disk] @@ -368,7 +368,7 @@ func (b *Backuper) reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDown rebalancedDisksStr := strings.TrimPrefix( strings.Replace(fmt.Sprintf("%v", rebalancedDisks), ":{}", "", -1), "map", ) - log.Warnf("table '%s.%s' require disk '%s' that not found in system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %v", t.Database, t.Table, disk, rebalancedDisksStr) + log.Warn().Msgf("table '%s.%s' require disk '%s' that not found in system.disks, you can add nonexistent disks to `disk_mapping` in `clickhouse` config section, data will download to %v", t.Database, t.Table, disk, rebalancedDisksStr) } } if isRebalanced { @@ -381,18 +381,19 @@ func (b *Backuper) reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDown return nil } -func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, log *apexLog.Entry, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { +func (b *Backuper) downloadTableMetadataIfNotExists(ctx context.Context, backupName string, tableTitle metadata.TableTitle) (*metadata.TableMetadata, error) { metadataLocalFile := path.Join(b.DefaultDataPath, "backup", backupName, "metadata", common.TablePathEncode(tableTitle.Database), fmt.Sprintf("%s.json", common.TablePathEncode(tableTitle.Table))) tm := &metadata.TableMetadata{} if _, err := tm.Load(metadataLocalFile); err == nil { return tm, nil } // we always download full metadata in this case without filter by partitions - tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, log.WithFields(apexLog.Fields{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}), tableTitle, false, nil, false) + logger := log.With().Fields(map[string]interface{}{"operation": "downloadTableMetadataIfNotExists", "backupName": backupName, "table_metadata_diff": fmt.Sprintf("%s.%s", tableTitle.Database, tableTitle.Table)}).Logger() + tm, _, err := b.downloadTableMetadata(ctx, backupName, nil, tableTitle, false, nil, false, logger) return tm, err } -func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, log *apexLog.Entry, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string, resume bool) (*metadata.TableMetadata, uint64, error) { +func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, disks []clickhouse.Disk, tableTitle metadata.TableTitle, schemaOnly bool, partitions []string, resume bool, logger zerolog.Logger) (*metadata.TableMetadata, uint64, error) { start := time.Now() size := uint64(0) metadataFiles := map[string]string{} @@ -481,16 +482,18 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string, b.resumableState.AppendToState(localMetadataFile, written) } } - log. - WithField("duration", utils.HumanizeDuration(time.Since(start))). - WithField("size", utils.FormatBytes(size)). - Info("done") + logger.Info().Fields(map[string]string{ + "operation": "download_metadata", + "backup": backupName, + "duration": utils.HumanizeDuration(time.Since(start)), + "size": utils.FormatBytes(size), + }).Msg("done") return &tableMetadata, size, nil } // downloadMissedInnerTablesMetadata - download, missed .inner. tables if materialized view query not contains `TO db.table` clause, https://github.com/Altinity/clickhouse-backup/issues/765 // @todo think about parallel download if sequentially will slow -func (b *Backuper) downloadMissedInnerTablesMetadata(ctx context.Context, backupName string, metadataSize uint64, tablesForDownload []metadata.TableTitle, tableMetadataAfterDownload []*metadata.TableMetadata, disks []clickhouse.Disk, schemaOnly bool, partitions []string, log *apexLog.Entry) ([]*metadata.TableMetadata, []metadata.TableTitle, uint64, error) { +func (b *Backuper) downloadMissedInnerTablesMetadata(ctx context.Context, backupName string, metadataSize uint64, tablesForDownload []metadata.TableTitle, tableMetadataAfterDownload []*metadata.TableMetadata, disks []clickhouse.Disk, schemaOnly bool, partitions []string) ([]*metadata.TableMetadata, []metadata.TableTitle, uint64, error) { if b.isEmbedded { return tableMetadataAfterDownload, tablesForDownload, metadataSize, nil } @@ -517,8 +520,8 @@ func (b *Backuper) downloadMissedInnerTablesMetadata(ctx context.Context, backup } if !innerTableExists { innerTableTitle := metadata.TableTitle{Database: t.Database, Table: innerTableName} - metadataLogger := log.WithField("missed_inner_metadata", fmt.Sprintf("%s.%s", innerTableTitle.Database, innerTableTitle.Table)) - innerTableMetadata, size, err := b.downloadTableMetadata(ctx, backupName, disks, metadataLogger, innerTableTitle, schemaOnly, partitions, b.resume) + metadataLogger := log.With().Str("missed_inner_metadata", fmt.Sprintf("%s.%s", innerTableTitle.Database, innerTableTitle.Table)).Logger() + innerTableMetadata, size, err := b.downloadTableMetadata(ctx, backupName, disks, innerTableTitle, schemaOnly, partitions, b.resume, metadataLogger) if err != nil { return tableMetadataAfterDownload, tablesForDownload, metadataSize, err } @@ -540,8 +543,6 @@ func (b *Backuper) downloadConfigData(ctx context.Context, remoteBackup storage. } func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup storage.Backup, prefix string) (uint64, error) { - log := b.log.WithField("logger", "downloadBackupRelatedDir") - localDir := path.Join(b.DefaultDataPath, "backup", remoteBackup.BackupName, prefix) if remoteBackup.DataFormat != DirectoryFormat { @@ -584,7 +585,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st } remoteFileInfo, err := b.dst.StatFile(ctx, remoteSource) if err != nil { - log.Debugf("%s not exists on remote storage, skip download", remoteSource) + log.Debug().Msgf("%s not exists on remote storage, skip download", remoteSource) return 0, nil } retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -601,7 +602,6 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st } func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata) error { - log := b.log.WithField("logger", "downloadTableData") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -615,7 +615,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. capacity += len(table.Files[disk]) downloadOffset[disk] = 0 } - log.Debugf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Files[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) for common.SumMapValuesInt(downloadOffset) < capacity { for disk := range table.Files { if downloadOffset[disk] >= len(table.Files[disk]) { @@ -631,7 +631,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. downloadOffset[disk] += 1 tableRemoteFile := path.Join(remoteBackup.BackupName, "shadow", common.TablePathEncode(table.Database), common.TablePathEncode(table.Table), archiveFile) dataGroup.Go(func() error { - log.Debugf("start download %s", tableRemoteFile) + log.Debug().Msgf("start download %s", tableRemoteFile) if b.resume && b.resumableState.IsAlreadyProcessedBool(tableRemoteFile) { return nil } @@ -645,7 +645,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(tableRemoteFile, 0) } - log.Debugf("finish download %s", tableRemoteFile) + log.Debug().Msgf("finish download %s", tableRemoteFile) return nil }) } @@ -655,7 +655,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. for disk := range table.Parts { capacity += len(table.Parts[disk]) } - log.Debugf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.DownloadConcurrency, capacity) for disk, parts := range table.Parts { tableRemotePath := path.Join(remoteBackup.BackupName, "shadow", dbAndTableDir, disk) @@ -677,7 +677,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. partRemotePath := path.Join(tableRemotePath, part.Name) partLocalPath := path.Join(tableLocalPath, part.Name) dataGroup.Go(func() error { - log.Debugf("start %s -> %s", partRemotePath, partLocalPath) + log.Debug().Msgf("start %s -> %s", partRemotePath, partLocalPath) if b.resume && b.resumableState.IsAlreadyProcessedBool(partRemotePath) { return nil } @@ -687,7 +687,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. if b.resume { b.resumableState.AppendToState(partRemotePath, 0) } - log.Debugf("finish %s -> %s", partRemotePath, partLocalPath) + log.Debug().Msgf("finish %s -> %s", partRemotePath, partLocalPath) return nil }) } @@ -708,8 +708,10 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata. } func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata.BackupMetadata, table metadata.TableMetadata, dbAndTableDir string) error { - log := b.log.WithField("operation", "downloadDiffParts") - log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Debug("start") + log.Debug(). + Str("operation", "downloadDiffParts"). + Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)). + Msg("start") start := time.Now() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -749,7 +751,7 @@ func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata. if b.resume && b.resumableState.IsAlreadyProcessedBool(existsPath) { if newPathDirList, newPathDirErr := os.ReadDir(newPath); newPathDirErr != nil { newPathDirErr = fmt.Errorf("os.ReadDir(%s) error: %v", newPath, newPathDirErr) - log.Error(newPathDirErr.Error()) + log.Error().Msg(newPathDirErr.Error()) return newPathDirErr } else if len(newPathDirList) == 0 { return fmt.Errorf("os.ReadDir(%s) expect return non empty list", newPath) @@ -762,7 +764,7 @@ func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata. diskForDownload = part.RebalancedDisk } downloadDiffGroup.Go(func() error { - tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload, log) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(downloadDiffCtx, remoteBackup, table, diskForDownload, partForDownload) if err != nil { return err } @@ -809,25 +811,29 @@ func (b *Backuper) downloadDiffParts(ctx context.Context, remoteBackup metadata. if err := downloadDiffGroup.Wait(); err != nil { return fmt.Errorf("one of downloadDiffParts go-routine return error: %v", err) } - log.WithField("duration", utils.HumanizeDuration(time.Since(start))).WithField("diff_parts", strconv.Itoa(int(downloadedDiffParts))).Info("done") + log.Info(). + Str("operation", "downloadDiffParts"). + Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)). + Str("duration", utils.HumanizeDuration(time.Since(start))). + Str("diff_parts", strconv.Itoa(int(downloadedDiffParts))). + Msg("done") return nil } func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLock *sync.Mutex, diffRemoteFilesCache map[string]*sync.Mutex, tableRemoteFile string, tableLocalDir string) error { - log := b.log.WithField("logger", "downloadDiffRemoteFile") if b.resume && b.resumableState.IsAlreadyProcessedBool(tableRemoteFile) { return nil } diffRemoteFilesLock.Lock() namedLock, isCached := diffRemoteFilesCache[tableRemoteFile] if isCached { - log.Debugf("wait download begin %s", tableRemoteFile) + log.Debug().Msgf("wait download begin %s", tableRemoteFile) namedLock.Lock() diffRemoteFilesLock.Unlock() namedLock.Unlock() - log.Debugf("wait download end %s", tableRemoteFile) + log.Debug().Msgf("wait download end %s", tableRemoteFile) } else { - log.Debugf("start download from %s", tableRemoteFile) + log.Debug().Msgf("start download from %s", tableRemoteFile) namedLock = &sync.Mutex{} diffRemoteFilesCache[tableRemoteFile] = namedLock namedLock.Lock() @@ -838,13 +844,13 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo return b.dst.DownloadCompressedStream(ctx, tableRemoteFile, tableLocalDir, b.cfg.General.DownloadMaxBytesPerSecond) }) if err != nil { - log.Warnf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + log.Warn().Msgf("DownloadCompressedStream %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } else { // remoteFile could be a directory if err := b.dst.DownloadPath(ctx, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.DownloadMaxBytesPerSecond); err != nil { - log.Warnf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) + log.Warn().Msgf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err) return err } } @@ -852,7 +858,7 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo if b.resume { b.resumableState.AppendToState(tableRemoteFile, 0) } - log.Debugf("finish download from %s", tableRemoteFile) + log.Debug().Str("tableRemoteFile", tableRemoteFile).Msgf("finish download") } return nil } @@ -871,21 +877,21 @@ func (b *Backuper) checkNewPath(newPath string, part metadata.Part) error { return nil } -func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part, log *apexLog.Entry) (map[string]string, error) { +func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadata.BackupMetadata, table metadata.TableMetadata, disk string, part metadata.Part) (map[string]string, error) { var requiredTable *metadata.TableMetadata - log.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Debugf("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffBackupFilesRemote"}).Msg("start") requiredBackup, err := b.ReadBackupMetadataRemote(ctx, backup.RequiredBackup) if err != nil { return nil, err } - requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, log, metadata.TableTitle{Database: table.Database, Table: table.Table}) + requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, metadata.TableTitle{Database: table.Database, Table: table.Table}) if err != nil { - log.Warnf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) + log.Warn().Msgf("downloadTableMetadataIfNotExists %s / %s.%s return error", requiredBackup.BackupName, table.Database, table.Table) return nil, err } // recursive find if part in RequiredBackup also Required - tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, table, requiredTable, part, disk, log) + tableRemoteFiles, found, err := b.findDiffRecursive(ctx, requiredBackup, table, requiredTable, part, disk) if found { return tableRemoteFiles, nil } @@ -926,18 +932,18 @@ func (b *Backuper) findDiffBackupFilesRemote(ctx context.Context, backup metadat return nil, fmt.Errorf("%s.%s %s not found on %s and all required backups sequence", table.Database, table.Table, part.Name, requiredBackup.BackupName) } -func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string, log *apexLog.Entry) (map[string]string, bool, error) { - log.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Debugf("start") +func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, requiredTable *metadata.TableMetadata, part metadata.Part, disk string) (map[string]string, bool, error) { + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffRecursive"}).Msg("start") found := false for _, requiredParts := range requiredTable.Parts { for _, requiredPart := range requiredParts { if requiredPart.Name == part.Name { found = true if requiredPart.Required { - tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part, log) + tableRemoteFiles, err := b.findDiffBackupFilesRemote(ctx, *requiredBackup, table, disk, part) if err != nil { found = false - log.Warnf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) + log.Warn().Msgf("try find %s.%s %s recursive return err: %v", table.Database, table.Table, part.Name, err) } return tableRemoteFiles, found, err } @@ -952,8 +958,7 @@ func (b *Backuper) findDiffRecursive(ctx context.Context, requiredBackup *metada } func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (map[string]string, error, bool) { - log := apexLog.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}) - log.Debugf("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePart"}).Msg("start") tableRemoteFiles := make(map[string]string) // find same disk and part name archive if requiredBackup.DataFormat != DirectoryFormat { @@ -972,8 +977,7 @@ func (b *Backuper) findDiffOnePart(ctx context.Context, requiredBackup *metadata } func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - log := apexLog.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}) - log.Debugf("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartDirectory"}).Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, remoteDisk, part.Name) tableRemoteFile := path.Join(tableRemotePath, "checksums.txt") @@ -981,8 +985,7 @@ func (b *Backuper) findDiffOnePartDirectory(ctx context.Context, requiredBackup } func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *metadata.BackupMetadata, table metadata.TableMetadata, localDisk, remoteDisk string, part metadata.Part) (string, string, error) { - log := apexLog.WithFields(apexLog.Fields{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}) - log.Debugf("start") + log.Debug().Fields(map[string]interface{}{"database": table.Database, "table": table.Table, "part": part.Name, "logger": "findDiffOnePartArchive"}).Msg("start") dbAndTableDir := path.Join(common.TablePathEncode(table.Database), common.TablePathEncode(table.Table)) remoteExt := config.ArchiveExtensions[requiredBackup.DataFormat] tableRemotePath := path.Join(requiredBackup.BackupName, "shadow", dbAndTableDir, fmt.Sprintf("%s_%s.%s", remoteDisk, common.TablePathEncode(part.Name), remoteExt)) @@ -992,9 +995,8 @@ func (b *Backuper) findDiffOnePartArchive(ctx context.Context, requiredBackup *m func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metadata.BackupMetadata, tableRemoteFile string, tableRemotePath string, localDisk string, dbAndTableDir string, part metadata.Part) (string, string, error) { _, err := b.dst.StatFile(ctx, tableRemoteFile) - log := b.log.WithField("logger", "findDiffFileExist") if err != nil { - log.WithFields(apexLog.Fields{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Debugf("findDiffFileExist not found") + log.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist not found") return "", "", err } tableLocalDir, diskExists := b.DiskToPathMap[localDisk] @@ -1011,7 +1013,7 @@ func (b *Backuper) findDiffFileExist(ctx context.Context, requiredBackup *metada } else { tableLocalDir = path.Join(tableLocalDir, "backup", requiredBackup.BackupName, "shadow", dbAndTableDir, localDisk) } - log.WithFields(apexLog.Fields{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Debugf("findDiffFileExist found") + log.Debug().Fields(map[string]interface{}{"tableRemoteFile": tableRemoteFile, "tableRemotePath": tableRemotePath, "part": part.Name}).Msg("findDiffFileExist found") return tableRemotePath, tableLocalDir, nil } @@ -1029,13 +1031,12 @@ func (b *Backuper) ReadBackupMetadataRemote(ctx context.Context, backupName stri } func (b *Backuper) makePartHardlinks(exists, new string) error { - log := apexLog.WithField("logger", "makePartHardlinks") _, err := os.Stat(exists) if err != nil { return err } if err = os.MkdirAll(new, 0750); err != nil { - log.Warnf("MkDirAll(%s) error: %v", new, err) + log.Warn().Msgf("MkDirAll(%s) error: %v", new, err) return err } if walkErr := filepath.Walk(exists, func(fPath string, fInfo os.FileInfo, err error) error { @@ -1047,7 +1048,7 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { newF := path.Join(new, fPath) if fInfo.IsDir() { if err = os.MkdirAll(newF, fInfo.Mode()); err != nil { - log.Warnf("MkdirAll(%s) error: %v", fPath, err) + log.Warn().Msgf("MkdirAll(%s) error: %v", fPath, err) return err } return nil @@ -1057,13 +1058,13 @@ func (b *Backuper) makePartHardlinks(exists, new string) error { existsFInfo, existsStatErr := os.Stat(existsF) newFInfo, newStatErr := os.Stat(newF) if existsStatErr != nil || newStatErr != nil || !os.SameFile(existsFInfo, newFInfo) { - log.Warnf("Link %s -> %s error: %v, existsStatErr: %v newStatErr: %v", existsF, newF, err, existsStatErr, newStatErr) + log.Warn().Msgf("Link %s -> %s error: %v, existsStatErr: %v newStatErr: %v", existsF, newF, err, existsStatErr, newStatErr) return err } } return nil }); walkErr != nil { - log.Warnf("Link recursively %s -> %s return error: %v", new, exists, walkErr) + log.Warn().Msgf("Link recursively %s -> %s return error: %v", new, exists, walkErr) return walkErr } return nil @@ -1077,7 +1078,6 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri return size, nil } } - log := b.log.WithField("logger", "downloadSingleBackupFile") retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { @@ -1088,7 +1088,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = remoteReader.Close() if err != nil { - log.Warnf("can't close remoteReader %s", remoteFile) + log.Warn().Msgf("can't close remoteReader %s", remoteFile) } }() localWriter, err := os.OpenFile(localFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0640) @@ -1099,7 +1099,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri defer func() { err = localWriter.Close() if err != nil { - log.Warnf("can't close localWriter %s", localFile) + log.Warn().Msgf("can't close localWriter %s", localFile) } }() diff --git a/pkg/backup/download_test.go b/pkg/backup/download_test.go index c36b8f9c..2e93324b 100644 --- a/pkg/backup/download_test.go +++ b/pkg/backup/download_test.go @@ -5,7 +5,6 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" "github.com/Altinity/clickhouse-backup/v2/pkg/storage" - apexLog "github.com/apex/log" "github.com/stretchr/testify/assert" "regexp" "testing" @@ -13,7 +12,6 @@ import ( ) var b = Backuper{ - log: &apexLog.Entry{}, DefaultDataPath: "/var/lib/clickhouse", DiskToPathMap: map[string]string{ "default": "/var/lib/clickhouse", @@ -67,7 +65,6 @@ var jbodDisks = []clickhouse.Disk{ IsBackup: false, }, } -var log = apexLog.WithField("logger", "test") var remoteBackup = storage.Backup{ BackupMetadata: metadata.BackupMetadata{ @@ -136,7 +133,7 @@ func TestReBalanceTablesMetadataIfDiskNotExists_Files_NoErrors(t *testing.T) { for i := range tableMetadataAfterDownload { tableMetadataAfterDownloadRepacked[i] = &tableMetadataAfterDownload[i] } - assert.NoError(t, b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, remoteBackup, log)) + assert.NoError(t, b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, remoteBackup)) //rebalanced table meta := tableMetadataAfterDownload[1] assert.Equal(t, 4, len(meta.RebalancedFiles), "expect 4 rebalanced files in %s.%s", meta.Database, meta.Table) @@ -191,7 +188,7 @@ func TestReBalanceTablesMetadataIfDiskNotExists_Parts_NoErrors(t *testing.T) { for i := range tableMetadataAfterDownload { tableMetadataAfterDownloadRepacked[i] = &tableMetadataAfterDownload[i] } - assert.NoError(t, b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, remoteBackup, log)) + assert.NoError(t, b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, remoteBackup)) // no files re-balance for _, meta := range tableMetadataAfterDownload { assert.Equal(t, 0, len(meta.RebalancedFiles)) @@ -237,7 +234,7 @@ func TestReBalanceTablesMetadataIfDiskNotExists_CheckErrors(t *testing.T) { for i := range tableMetadataAfterDownload { tableMetadataAfterDownloadRepacked[i] = &tableMetadataAfterDownload[i] } - err := b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, invalidRemoteBackup, log) + err := b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, invalidRemoteBackup) assert.Error(t, err) assert.Equal(t, "disk: hdd2 not found in disk_types section map[string]string{\"default\":\"local\", \"s3\":\"s3\", \"s3_disk2\":\"s3\"} in Test/metadata.json", @@ -250,7 +247,7 @@ func TestReBalanceTablesMetadataIfDiskNotExists_CheckErrors(t *testing.T) { for i := range tableMetadataAfterDownload { tableMetadataAfterDownloadRepacked[i] = &tableMetadataAfterDownload[i] } - err = b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, invalidRemoteBackup, log) + err = b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, invalidRemoteBackup) assert.Error(t, err) assert.Equal(t, "disk: hdd2, diskType: unknown not found in system.disks", err.Error()) @@ -266,7 +263,7 @@ func TestReBalanceTablesMetadataIfDiskNotExists_CheckErrors(t *testing.T) { invalidTable.Table = "test3" invalidTable.Query = "CREATE TABLE default.test3(id UInt64) ENGINE=MergeTree() ORDER BY id SETTINGS storage_policy='invalid'" tableMetadataAfterDownloadRepacked = []*metadata.TableMetadata{&invalidTable} - err = b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, invalidRemoteBackup, log) + err = b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, baseDisks, invalidRemoteBackup) assert.Error(t, err) matched, matchErr := regexp.MatchString(`storagePolicy: invalid with diskType: \w+ not found in system.disks`, err.Error()) assert.NoError(t, matchErr) @@ -282,7 +279,7 @@ func TestReBalanceTablesMetadataIfDiskNotExists_CheckErrors(t *testing.T) { "hdd2": {{Name: "part_3_3_0"}, {Name: "part_4_4_0"}}, } tableMetadataAfterDownloadRepacked = []*metadata.TableMetadata{&invalidTable} - err = b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, invalidDisks, invalidRemoteBackup, log) + err = b.reBalanceTablesMetadataIfDiskNotExists(tableMetadataAfterDownloadRepacked, invalidDisks, invalidRemoteBackup) assert.Error(t, err) assert.Equal(t, "250B free space, not found in system.disks with `local` type", err.Error()) diff --git a/pkg/backup/list.go b/pkg/backup/list.go index 2b93cde9..f1939a2f 100644 --- a/pkg/backup/list.go +++ b/pkg/backup/list.go @@ -20,7 +20,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/storage" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" ) // List - list backups to stdout from command line @@ -38,7 +38,6 @@ func (b *Backuper) List(what, format string) error { return nil } func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) error { - log := apexLog.WithField("logger", "printBackupsRemote") switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -70,7 +69,7 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, uploadDate, "remote", required, description); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } default: @@ -80,7 +79,6 @@ func printBackupsRemote(w io.Writer, backupList []storage.Backup, format string) } func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBackup, format string) error { - log := apexLog.WithField("logger", "printBackupsLocal") switch format { case "latest", "last", "l": if len(backupList) < 1 { @@ -116,7 +114,7 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku size = "???" } if bytes, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", backup.BackupName, size, creationDate, "local", required, description); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } } @@ -128,7 +126,6 @@ func printBackupsLocal(ctx context.Context, w io.Writer, backupList []LocalBacku // PrintLocalBackups - print all backups stored locally func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { - log := apexLog.WithField("logger", "PrintLocalBackups") if !b.ch.IsOpen { if err := b.ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %v", err) @@ -138,7 +135,7 @@ func (b *Backuper) PrintLocalBackups(ctx context.Context, format string) error { w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, _, err := b.GetLocalBackups(ctx, nil) @@ -157,7 +154,6 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) } defer b.ch.Close() } - log := b.log.WithField("logger", "GetLocalBackups") if disks == nil { disks, err = b.ch.GetDisks(ctx, true) if err != nil { @@ -236,7 +232,7 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) backupMetadataBody, err := os.ReadFile(backupMetafilePath) if err != nil { if !os.IsNotExist(err) { - b.log.Warnf("list can't read %s error: %s", backupMetafilePath, err) + log.Warn().Msgf("list can't read %s error: %s", backupMetafilePath, err) } result = addBrokenBackupIfNotExists(result, name, info, "broken metadata.json not found") continue @@ -263,7 +259,7 @@ func (b *Backuper) GetLocalBackups(ctx context.Context, disks []clickhouse.Disk) } if closeErr := d.Close(); closeErr != nil { - log.Errorf("can't close %s error: %v", backupPath, closeErr) + log.Error().Msgf("can't close %s error: %v", backupPath, closeErr) } } } @@ -281,10 +277,9 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { } defer b.ch.Close() } - log := b.log.WithField("logger", "PrintAllBackups") defer func() { if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() localBackups, _, err := b.GetLocalBackups(ctx, nil) @@ -292,7 +287,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsLocal(ctx, w, localBackups, format); err != nil { - log.Warnf("printBackupsLocal return error: %v", err) + log.Warn().Msgf("printBackupsLocal return error: %v", err) } if b.cfg.General.RemoteStorage != "none" { @@ -301,7 +296,7 @@ func (b *Backuper) PrintAllBackups(ctx context.Context, format string) error { return err } if err = printBackupsRemote(w, remoteBackups, format); err != nil { - log.Warnf("printBackupsRemote return error: %v", err) + log.Warn().Msgf("printBackupsRemote return error: %v", err) } } return nil @@ -315,11 +310,10 @@ func (b *Backuper) PrintRemoteBackups(ctx context.Context, format string) error } defer b.ch.Close() } - log := b.log.WithField("logger", "PrintRemoteBackups") w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', tabwriter.DiscardEmptyColumns) defer func() { if err := w.Flush(); err != nil { - log.Errorf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } }() backupList, err := b.GetRemoteBackups(ctx, true) @@ -369,7 +363,7 @@ func (b *Backuper) GetRemoteBackups(ctx context.Context, parseMetadata bool) ([] } defer func() { if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() backupList, err := bd.BackupList(ctx, parseMetadata, "") @@ -427,13 +421,13 @@ func (b *Backuper) PrintTables(printAll bool, tablePattern, remoteBackup string) } if err := w.Flush(); err != nil { - b.log.Errorf("can't flush tabular writer error: %v", err) + log.Error().Msgf("can't flush tabular writer error: %v", err) } return nil } func (b *Backuper) printTablesLocal(ctx context.Context, tablePattern string, printAll bool, w *tabwriter.Writer) error { - log := b.log.WithField("logger", "PrintTablesLocal") + logger := log.With().Str("logger", "PrintTablesLocal").Logger() allTables, err := b.GetTables(ctx, tablePattern) if err != nil { return err @@ -452,12 +446,12 @@ func (b *Backuper) printTablesLocal(ctx context.Context, tablePattern string, pr } if table.Skip { if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\tskip\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ",")); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } continue } if bytes, err := fmt.Fprintf(w, "%s.%s\t%s\t%v\t%v\n", table.Database, table.Name, utils.FormatBytes(table.TotalBytes), strings.Join(tableDisks, ","), table.BackupType); err != nil { - log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + logger.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } return nil @@ -484,7 +478,7 @@ func (b *Backuper) GetTablesRemote(ctx context.Context, backupName string, table } defer func() { if err := bd.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -538,7 +532,7 @@ func (b *Backuper) printTablesRemote(ctx context.Context, backupName string, tab continue } if bytes, err := fmt.Fprintf(w, "%s.%s\tskip=%v\n", t.Database, t.Name, t.Skip); err != nil { - b.log.Errorf("fmt.Fprintf write %d bytes return error: %v", bytes, err) + log.Error().Msgf("fmt.Fprintf write %d bytes return error: %v", bytes, err) } } diff --git a/pkg/backup/restore.go b/pkg/backup/restore.go index 8e7c81e0..d075b6c1 100644 --- a/pkg/backup/restore.go +++ b/pkg/backup/restore.go @@ -5,6 +5,8 @@ import ( "context" "encoding/json" "fmt" + apexLog "github.com/apex/log" + "github.com/rs/zerolog" "io" "io/fs" "net/url" @@ -17,9 +19,9 @@ import ( "sync/atomic" "time" - apexLog "github.com/apex/log" "github.com/mattn/go-shellwords" recursiveCopy "github.com/otiai10/copy" + "github.com/rs/zerolog/log" "github.com/yargevad/filepathx" "golang.org/x/sync/errgroup" "golang.org/x/text/cases" @@ -56,10 +58,6 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab return err } - log := apexLog.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "restore", - }) doRestoreData := (!schemaOnly && !rbacOnly && !configsOnly) || dataOnly if err := b.ch.Connect(); err != nil { @@ -81,12 +79,12 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab } b.DefaultDataPath, err = b.ch.GetDefaultPath(disks) if err != nil { - log.Warnf("%v", err) + log.Warn().Msgf("%v", err) return ErrUnknownClickhouseDataPath } if b.cfg.General.RestoreSchemaOnCluster != "" { if b.cfg.General.RestoreSchemaOnCluster, err = b.ch.ApplyMacros(ctx, b.cfg.General.RestoreSchemaOnCluster); err != nil { - log.Warnf("%v", err) + log.Warn().Msgf("%v", err) return err } } @@ -130,10 +128,10 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab if !restoreRBAC && !rbacOnly && !restoreConfigs && !configsOnly { if !b.cfg.General.AllowEmptyBackups { err = fmt.Errorf("'%s' doesn't contains tables for restore, if you need it, you can setup `allow_empty_backups: true` in `general` config section", backupName) - log.Errorf("%v", err) + log.Error().Msgf("%v", err) return err } - log.Warnf("'%s' doesn't contains tables for restore", backupName) + log.Warn().Msgf("'%s' doesn't contains tables for restore", backupName) return nil } } @@ -142,19 +140,19 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab if err := b.restoreRBAC(ctx, backupName, disks, version, dropExists); err != nil { return err } - log.Infof("RBAC successfully restored") + log.Info().Msgf("RBAC successfully restored") needRestart = true } if configsOnly || restoreConfigs { if err := b.restoreConfigs(backupName, disks); err != nil { return err } - log.Infof("CONFIGS successfully restored") + log.Info().Msgf("CONFIGS successfully restored") needRestart = true } if needRestart { - if err := b.restartClickHouse(ctx, backupName, log); err != nil { + if err := b.restartClickHouse(ctx, backupName); err != nil { return err } if rbacOnly || configsOnly { @@ -178,7 +176,7 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab } defer func() { if err := b.dst.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() } @@ -231,10 +229,10 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab } } - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "duration": utils.HumanizeDuration(time.Since(startRestore)), "version": backupVersion, - }).Info("done") + }).Msg("done") return nil } @@ -282,19 +280,19 @@ func (b *Backuper) getTablesForRestoreLocal(ctx context.Context, backupName stri return tablesForRestore, partitionsNames, nil } -func (b *Backuper) restartClickHouse(ctx context.Context, backupName string, log *apexLog.Entry) error { - log.Warnf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) +func (b *Backuper) restartClickHouse(ctx context.Context, backupName string) error { + log.Warn().Msgf("%s contains `access` or `configs` directory, so we need exec %s", backupName, b.ch.Config.RestartCommand) for _, cmd := range strings.Split(b.ch.Config.RestartCommand, ";") { cmd = strings.Trim(cmd, " \t\r\n") if strings.HasPrefix(cmd, "sql:") { cmd = strings.TrimPrefix(cmd, "sql:") if err := b.ch.QueryContext(ctx, cmd); err != nil { - log.Warnf("restart sql: %s, error: %v", cmd, err) + log.Warn().Msgf("restart sql: %s, error: %v", cmd, err) } } if strings.HasPrefix(cmd, "exec:") { cmd = strings.TrimPrefix(cmd, "exec:") - if err := b.executeShellCommandWithTimeout(ctx, cmd, log); err != nil { + if err := b.executeShellCommandWithTimeout(ctx, cmd); err != nil { return err } } @@ -312,30 +310,30 @@ breakByReconnect: if err := b.ch.Connect(); err == nil { break breakByReconnect } - log.Infof("wait 3 seconds") + log.Info().Msg("wait 3 seconds") time.Sleep(3 * time.Second) } } return nil } -func (b *Backuper) executeShellCommandWithTimeout(ctx context.Context, cmd string, log *apexLog.Entry) error { +func (b *Backuper) executeShellCommandWithTimeout(ctx context.Context, cmd string) error { shellCmd, err := shellwords.Parse(cmd) if err != nil { return err } shellCtx, shellCancel := context.WithTimeout(ctx, 180*time.Second) defer shellCancel() - log.Infof("run %s", cmd) + log.Info().Msgf("run %s", cmd) var out []byte if len(shellCmd) > 1 { out, err = exec.CommandContext(shellCtx, shellCmd[0], shellCmd[1:]...).CombinedOutput() } else { out, err = exec.CommandContext(shellCtx, shellCmd[0]).CombinedOutput() } - log.Debug(string(out)) + log.Debug().Msgf(string(out)) if err != nil { - log.Warnf("restart exec: %s, error: %v", cmd, err) + log.Warn().Msgf("restart exec: %s, error: %v", cmd, err) } return nil } @@ -402,7 +400,6 @@ func (b *Backuper) prepareRestoreMapping(objectMapping []string, objectType stri // restoreRBAC - copy backup_name>/rbac folder to access_data_path func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []clickhouse.Disk, version int, dropExists bool) error { - log := b.log.WithField("logger", "restoreRBAC") accessPath, err := b.ch.GetAccessManagementPath(ctx, nil) if err != nil { return err @@ -410,7 +407,7 @@ func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []c var k *keeper.Keeper replicatedUserDirectories := make([]clickhouse.UserDirectory, 0) if err = b.ch.SelectContext(ctx, &replicatedUserDirectories, "SELECT name FROM system.user_directories WHERE type='replicated'"); err == nil && len(replicatedUserDirectories) > 0 { - k = &keeper.Keeper{Log: b.log.WithField("logger", "keeper")} + k = &keeper.Keeper{} if connErr := k.Connect(ctx, b.ch); connErr != nil { return fmt.Errorf("but can't connect to keeper: %v", connErr) } @@ -424,7 +421,7 @@ func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []c if err = b.restoreBackupRelatedDir(backupName, "access", accessPath, disks, []string{"*.jsonl"}); err == nil { markFile := path.Join(accessPath, "need_rebuild_lists.mark") - log.Infof("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) + log.Info().Msgf("create %s for properly rebuild RBAC after restart clickhouse-server", markFile) file, err := os.Create(markFile) if err != nil { return err @@ -432,7 +429,7 @@ func (b *Backuper) restoreRBAC(ctx context.Context, backupName string, disks []c _ = file.Close() _ = filesystemhelper.Chown(markFile, b.ch, disks, false) listFilesPattern := path.Join(accessPath, "*.list") - log.Infof("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) + log.Info().Msgf("remove %s for properly rebuild RBAC after restart clickhouse-server", listFilesPattern) if listFiles, err := filepathx.Glob(listFilesPattern); err != nil { return err } else { @@ -473,7 +470,7 @@ func (b *Backuper) restoreRBACResolveAllConflicts(ctx context.Context, backupNam if resolveErr := b.resolveRBACConflictIfExist(ctx, string(sql), accessPath, version, k, replicatedUserDirectories, dropExists); resolveErr != nil { return resolveErr } - b.log.Debugf("%s b.resolveRBACConflictIfExist(%s) no error", fPath, string(sql)) + log.Debug().Msgf("%s b.resolveRBACConflictIfExist(%s) no error", fPath, string(sql)) } if strings.HasSuffix(fPath, ".jsonl") { file, openErr := os.Open(fPath) @@ -487,14 +484,14 @@ func (b *Backuper) restoreRBACResolveAllConflicts(ctx context.Context, backupNam data := keeper.DumpNode{} jsonErr := json.Unmarshal([]byte(line), &data) if jsonErr != nil { - b.log.Errorf("can't %s json.Unmarshal error: %v line: %s", fPath, line, jsonErr) + log.Error().Msgf("can't %s json.Unmarshal error: %v line: %s", fPath, line, jsonErr) continue } if strings.HasPrefix(data.Path, "uuid/") { if resolveErr := b.resolveRBACConflictIfExist(ctx, data.Value, accessPath, version, k, replicatedUserDirectories, dropExists); resolveErr != nil { return resolveErr } - b.log.Debugf("%s:%s b.resolveRBACConflictIfExist(%s) no error", fPath, data.Path, data.Value) + log.Debug().Msgf("%s:%s b.resolveRBACConflictIfExist(%s) no error", fPath, data.Path, data.Value) } } @@ -503,7 +500,7 @@ func (b *Backuper) restoreRBACResolveAllConflicts(ctx context.Context, backupNam } if closeErr := file.Close(); closeErr != nil { - b.log.Warnf("can't close %s error: %v", fPath, closeErr) + log.Warn().Msgf("can't close %s error: %v", fPath, closeErr) } } @@ -521,7 +518,7 @@ func (b *Backuper) resolveRBACConflictIfExist(ctx context.Context, sql string, a return detectErr } if isExists, existsRBACType, existsRBACObjectId := b.isRBACExists(ctx, kind, name, accessPath, version, k, replicatedUserDirectories); isExists { - b.log.Warnf("RBAC object kind=%s, name=%s already present, will %s", kind, name, b.cfg.General.RBACConflictResolution) + log.Warn().Msgf("RBAC object kind=%s, name=%s already present, will %s", kind, name, b.cfg.General.RBACConflictResolution) if b.cfg.General.RBACConflictResolution == "recreate" || dropExists { if dropErr := b.dropExistsRBAC(ctx, kind, name, accessPath, existsRBACType, existsRBACObjectId, k); dropErr != nil { return dropErr @@ -547,13 +544,13 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a } systemTable, systemTableExists := rbacSystemTableNames[kind] if !systemTableExists { - b.log.Errorf("unsupported RBAC object kind: %s", kind) + log.Error().Msgf("unsupported RBAC object kind: %s", kind) return false, "", "" } isRBACExistsSQL := fmt.Sprintf("SELECT toString(id) AS id, name FROM `system`.`%s` WHERE name=? LIMIT 1", systemTable) existsRBACRow := make([]clickhouse.RBACObject, 0) if err := b.ch.SelectContext(ctx, &existsRBACRow, isRBACExistsSQL, name); err != nil { - b.log.Warnf("RBAC object resolve failed, check SQL GRANTS or settings for user which you use to connect to clickhouse-server, kind: %s, name: %s, error: %v", kind, name, err) + log.Warn().Msgf("RBAC object resolve failed, check SQL GRANTS or settings for user which you use to connect to clickhouse-server, kind: %s, name: %s, error: %v", kind, name, err) return false, "", "" } if len(existsRBACRow) == 0 { @@ -565,7 +562,7 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a checkRBACExists := func(sql string) bool { existsKind, existsName, detectErr := b.detectRBACObject(sql) if detectErr != nil { - b.log.Warnf("isRBACExists error: %v", detectErr) + log.Warn().Msgf("isRBACExists error: %v", detectErr) return false } if existsKind == kind && existsName == name { @@ -579,7 +576,7 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a for _, f := range sqlFiles { sql, readErr := os.ReadFile(f) if readErr != nil { - b.log.Warnf("read %s error: %v", f, readErr) + log.Warn().Msgf("read %s error: %v", f, readErr) continue } if checkRBACExists(string(sql)) { @@ -587,7 +584,7 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a } } } else { - b.log.Warnf("access/*.sql error: %v", globErr) + log.Warn().Msgf("access/*.sql error: %v", globErr) } //search in keeper replicated user directory @@ -595,7 +592,7 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a for _, userDirectory := range replicatedUserDirectories { replicatedAccessPath, getAccessErr := k.GetReplicatedAccessPath(userDirectory.Name) if getAccessErr != nil { - b.log.Warnf("b.isRBACExists -> k.GetReplicatedAccessPath error: %v", getAccessErr) + log.Warn().Msgf("b.isRBACExists -> k.GetReplicatedAccessPath error: %v", getAccessErr) continue } isExists := false @@ -612,7 +609,7 @@ func (b *Backuper) isRBACExists(ctx context.Context, kind string, name string, a return false, nil }) if walkErr != nil { - b.log.Warnf("b.isRBACExists -> k.Walk error: %v", walkErr) + log.Warn().Msgf("b.isRBACExists -> k.Walk error: %v", walkErr) continue } if isExists { @@ -728,11 +725,10 @@ func (b *Backuper) restoreRBACReplicated(backupName string, backupPrefixDir stri if k == nil || len(replicatedUserDirectories) == 0 { return nil } - log := b.log.WithField("logger", "restoreRBACReplicated") srcBackupDir := path.Join(b.DefaultDataPath, "backup", backupName, backupPrefixDir) info, err := os.Stat(srcBackupDir) if err != nil { - log.Warnf("stat: %s error: %v", srcBackupDir, err) + log.Warn().Msgf("stat: %s error: %v", srcBackupDir, err) return err } @@ -762,7 +758,7 @@ func (b *Backuper) restoreRBACReplicated(backupName string, backupPrefixDir stri if err != nil { return err } - log.Infof("keeper.Restore(%s) -> %s", jsonLFile, replicatedAccessPath) + log.Info().Msgf("keeper.Restore(%s) -> %s", jsonLFile, replicatedAccessPath) if err := k.Restore(jsonLFile, replicatedAccessPath); err != nil { return err } @@ -780,22 +776,21 @@ func (b *Backuper) restoreConfigs(backupName string, disks []clickhouse.Disk) er } func (b *Backuper) restoreBackupRelatedDir(backupName, backupPrefixDir, destinationDir string, disks []clickhouse.Disk, skipPatterns []string) error { - log := b.log.WithField("logger", "restoreBackupRelatedDir") srcBackupDir := path.Join(b.DefaultDataPath, "backup", backupName, backupPrefixDir) info, err := os.Stat(srcBackupDir) if err != nil { - log.Warnf("stat: %s error: %v", srcBackupDir, err) + log.Warn().Msgf("stat: %s error: %v", srcBackupDir, err) return err } existsFiles, _ := os.ReadDir(destinationDir) for _, existsF := range existsFiles { existsI, _ := existsF.Info() - log.Debugf("%s %v %v", path.Join(destinationDir, existsF.Name()), existsI.Size(), existsI.ModTime()) + log.Debug().Msgf("%s %v %v", path.Join(destinationDir, existsF.Name()), existsI.Size(), existsI.ModTime()) } if !info.IsDir() { return fmt.Errorf("%s is not a dir", srcBackupDir) } - log.Debugf("copy %s -> %s", srcBackupDir, destinationDir) + log.Debug().Msgf("copy %s -> %s", srcBackupDir, destinationDir) copyOptions := recursiveCopy.Options{ OnDirExists: func(src, dst string) recursiveCopy.DirExistsAction { return recursiveCopy.Merge @@ -848,24 +843,24 @@ func (b *Backuper) dropExistPartitions(ctx context.Context, tablesForRestore Lis // RestoreSchema - restore schemas matched by tablePattern from backupName func (b *Backuper) RestoreSchema(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, disks []clickhouse.Disk, tablesForRestore ListOfTables, ignoreDependencies bool, version int) error { - log := apexLog.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "restore_schema", - }) startRestoreSchema := time.Now() - if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version, log); dropErr != nil { + if dropErr := b.dropExistsTables(tablesForRestore, ignoreDependencies, version); dropErr != nil { return dropErr } var restoreErr error if b.isEmbedded { restoreErr = b.restoreSchemaEmbedded(ctx, backupName, backupMetadata, disks, tablesForRestore, version) } else { - restoreErr = b.restoreSchemaRegular(tablesForRestore, version, log) + restoreErr = b.restoreSchemaRegular(tablesForRestore, version) } if restoreErr != nil { return restoreErr } - log.WithField("duration", utils.HumanizeDuration(time.Since(startRestoreSchema))).Info("done") + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "restore_schema", + "duration": utils.HumanizeDuration(time.Since(startRestoreSchema)), + }).Msg("done") return nil } @@ -879,7 +874,7 @@ func (b *Backuper) restoreSchemaEmbedded(ctx context.Context, backupName string, if !b.cfg.General.AllowEmptyBackups { return fmt.Errorf("no tables for restore") } - b.log.Warnf("no tables for restore in embeddded backup %s/metadata.json", backupName) + log.Warn().Msgf("no tables for restore in embeddded backup %s/metadata.json", backupName) return nil } if b.cfg.ClickHouse.EmbeddedBackupDisk != "" { @@ -997,7 +992,7 @@ func (b *Backuper) fixEmbeddedMetadataSQLQuery(ctx context.Context, sqlBytes []b if UUIDWithMergeTreeRE.Match(sqlBytes) && version < 23009000 { sqlQuery = UUIDWithMergeTreeRE.ReplaceAllString(sqlQuery, "$1$2$3'$4'$5$4$7") } else { - apexLog.Warnf("%s contains `{uuid}` macro, will replace to `{database}/{table}` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath) + log.Warn().Msgf("%s contains `{uuid}` macro, will replace to `{database}/{table}` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath) filePathParts := strings.Split(filePath, "/") database, err := url.QueryUnescape(filePathParts[len(filePathParts)-3]) if err != nil { @@ -1023,9 +1018,9 @@ func (b *Backuper) fixEmbeddedMetadataSQLQuery(ctx context.Context, sqlBytes []b return "", false, err } if len(settings) != 2 { - apexLog.Fatalf("can't get %#v from preprocessed_configs/config.xml", replicaXMLSettings) + log.Fatal().Msgf("can't get %#v from preprocessed_configs/config.xml", replicaXMLSettings) } - apexLog.Warnf("%s contains `ReplicatedMergeTree()` without parameters, will replace to '%s` and `%s` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath, settings["default_replica_path"], settings["default_replica_name"]) + log.Warn().Msgf("%s contains `ReplicatedMergeTree()` without parameters, will replace to '%s` and `%s` see https://github.com/ClickHouse/ClickHouse/issues/42709 for details", filePath, settings["default_replica_path"], settings["default_replica_name"]) matches := emptyReplicatedMergeTreeRE.FindStringSubmatch(sqlQuery) substitution := fmt.Sprintf("$1$2('%s','%s')$4", settings["default_replica_path"], settings["default_replica_name"]) if matches[2] != "" { @@ -1037,7 +1032,7 @@ func (b *Backuper) fixEmbeddedMetadataSQLQuery(ctx context.Context, sqlBytes []b return sqlQuery, sqlMetadataChanged, nil } -func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int, log *apexLog.Entry) error { +func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version int) error { totalRetries := len(tablesForRestore) restoreRetries := 0 isDatabaseCreated := common.EmptyMap{} @@ -1066,7 +1061,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i // https://github.com/Altinity/clickhouse-backup/issues/466 if b.cfg.General.RestoreSchemaOnCluster == "" && strings.Contains(schema.Query, "{uuid}") && strings.Contains(schema.Query, "Replicated") { if !strings.Contains(schema.Query, "UUID") { - log.Warnf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") + log.Warn().Msgf("table query doesn't contains UUID, can't guarantee properly restore for ReplicatedMergeTree") } else { schema.Query = UUIDWithMergeTreeRE.ReplaceAllString(schema.Query, "$1$2$3'$4'$5$4$7") } @@ -1084,7 +1079,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i schema.Database, schema.Table, restoreErr, restoreRetries, ) } else { - log.Warnf( + log.Warn().Msgf( "can't create table '%s.%s': %v, will try again", schema.Database, schema.Table, restoreErr, ) } @@ -1099,7 +1094,7 @@ func (b *Backuper) restoreSchemaRegular(tablesForRestore ListOfTables, version i return nil } -func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int, log *apexLog.Entry) error { +func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependencies bool, version int) error { var dropErr error dropRetries := 0 totalRetries := len(tablesForDrop) @@ -1141,7 +1136,7 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci schema.Database, schema.Table, dropErr, dropRetries, ) } else { - log.Warnf( + log.Warn().Msgf( "can't drop table '%s.%s': %v, will try again", schema.Database, schema.Table, dropErr, ) } @@ -1160,11 +1155,6 @@ func (b *Backuper) dropExistsTables(tablesForDrop ListOfTables, ignoreDependenci func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, dataOnly bool, metadataPath, tablePattern string, partitions []string, disks []clickhouse.Disk, version int) error { var err error startRestoreData := time.Now() - log := apexLog.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "restore_data", - }) - diskMap := make(map[string]string, len(disks)) diskTypes := make(map[string]string, len(disks)) for _, disk := range disks { @@ -1182,28 +1172,31 @@ func (b *Backuper) RestoreData(ctx context.Context, backupName string, backupMet if err != nil { // fix https://github.com/Altinity/clickhouse-backup/issues/832 if b.cfg.General.AllowEmptyBackups && os.IsNotExist(err) { - log.Warnf("b.getTableListByPatternLocal return error: %v", err) + log.Warn().Msgf("b.getTableListByPatternLocal return error: %v", err) return nil } return err } if len(tablesForRestore) == 0 { if b.cfg.General.AllowEmptyBackups { - log.Warnf("not found schemas by %s in %s", tablePattern, backupName) + log.Warn().Msgf("not found schemas by %s in %s", tablePattern, backupName) return nil } return fmt.Errorf("not found schemas schemas by %s in %s", tablePattern, backupName) } - log.Debugf("found %d tables with data in backup", len(tablesForRestore)) + log.Debug().Msgf("found %d tables with data in backup", len(tablesForRestore)) if b.isEmbedded { err = b.restoreDataEmbedded(ctx, backupName, dataOnly, version, tablesForRestore, partitionsNameList) } else { - err = b.restoreDataRegular(ctx, backupName, backupMetadata, tablePattern, tablesForRestore, diskMap, diskTypes, disks, log) + err = b.restoreDataRegular(ctx, backupName, backupMetadata, tablePattern, tablesForRestore, diskMap, diskTypes, disks) } if err != nil { return err } - log.WithField("duration", utils.HumanizeDuration(time.Since(startRestoreData))).Info("done") + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "restore_data", + }).Str("duration", utils.HumanizeDuration(time.Since(startRestoreData))).Msg("done") return nil } @@ -1211,7 +1204,7 @@ func (b *Backuper) restoreDataEmbedded(ctx context.Context, backupName string, d return b.restoreEmbedded(ctx, backupName, false, dataOnly, version, tablesForRestore, partitionsNameList) } -func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, tablePattern string, tablesForRestore ListOfTables, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, log *apexLog.Entry) error { +func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, tablePattern string, tablesForRestore ListOfTables, diskMap, diskTypes map[string]string, disks []clickhouse.Disk) error { if len(b.cfg.General.RestoreDatabaseMapping) > 0 { tablePattern = b.changeTablePatternFromRestoreMapping(tablePattern, "database") } @@ -1257,7 +1250,7 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ba tablesForRestore[i].Table = targetTable } } - log := log.WithField("table", fmt.Sprintf("%s.%s", dstDatabase, dstTableName)) + logger := log.With().Str("table", fmt.Sprintf("%s.%s", dstDatabase, dstTableName)).Logger() dstTable, ok := dstTablesMap[metadata.TableTitle{ Database: dstDatabase, Table: dstTableName}] @@ -1268,24 +1261,24 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ba restoreBackupWorkingGroup.Go(func() error { // https://github.com/Altinity/clickhouse-backup/issues/529 if b.cfg.ClickHouse.RestoreAsAttach { - if restoreErr := b.restoreDataRegularByAttach(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, log); restoreErr != nil { + if restoreErr := b.restoreDataRegularByAttach(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, logger); restoreErr != nil { return restoreErr } } else { - if restoreErr := b.restoreDataRegularByParts(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, log); restoreErr != nil { + if restoreErr := b.restoreDataRegularByParts(restoreCtx, backupName, backupMetadata, table, diskMap, diskTypes, disks, dstTable, logger); restoreErr != nil { return restoreErr } } // https://github.com/Altinity/clickhouse-backup/issues/529 for _, mutation := range table.Mutations { if err := b.ch.ApplyMutation(restoreCtx, tablesForRestore[idx], mutation); err != nil { - log.Warnf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[idx].Database, tablesForRestore[idx].Table, err) + log.Warn().Msgf("can't apply mutation %s for table `%s`.`%s` : %v", mutation.Command, tablesForRestore[idx].Database, tablesForRestore[idx].Table, err) } } - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "duration": utils.HumanizeDuration(time.Since(tableRestoreStartTime)), "progress": fmt.Sprintf("%d/%d", idx+1, len(tablesForRestore)), - }).Info("done") + }).Msg("done") return nil }) } @@ -1295,11 +1288,11 @@ func (b *Backuper) restoreDataRegular(ctx context.Context, backupName string, ba return nil } -func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry) error { +func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, b.ch, false); err != nil { return fmt.Errorf("can't copy data to storage '%s.%s': %v", table.Database, table.Table, err) } - log.Debug("data to 'storage' copied") + logger.Debug().Msg("data to 'storage' copied") var size int64 var err error start := time.Now() @@ -1307,7 +1300,7 @@ func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName st return fmt.Errorf("can't restore object_disk server-side copy data parts '%s.%s': %v", table.Database, table.Table, err) } if size > 0 { - log.WithField("duration", utils.HumanizeDuration(time.Since(start))).WithField("size", utils.FormatBytes(uint64(size))).Info("download object_disks finish") + logger.Info().Str("duration", utils.HumanizeDuration(time.Since(start))).Str("size", utils.FormatBytes(uint64(size))).Msg("download object_disks finish") } if err := b.ch.AttachTable(ctx, table, dstTable); err != nil { return fmt.Errorf("can't attach table '%s.%s': %v", table.Database, table.Table, err) @@ -1315,19 +1308,19 @@ func (b *Backuper) restoreDataRegularByAttach(ctx context.Context, backupName st return nil } -func (b *Backuper) restoreDataRegularByParts(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, log *apexLog.Entry) error { +func (b *Backuper) restoreDataRegularByParts(ctx context.Context, backupName string, backupMetadata metadata.BackupMetadata, table metadata.TableMetadata, diskMap, diskTypes map[string]string, disks []clickhouse.Disk, dstTable clickhouse.Table, logger zerolog.Logger) error { if err := filesystemhelper.HardlinkBackupPartsToStorage(backupName, table, disks, diskMap, dstTable.DataPaths, b.ch, true); err != nil { return fmt.Errorf("can't copy data to detached '%s.%s': %v", table.Database, table.Table, err) } - log.Debug("data to 'detached' copied") - log.Info("download object_disks start") + logger.Debug().Msg("data to 'detached' copied") + logger.Info().Msg("download object_disks start") var size int64 var err error start := time.Now() if size, err = b.downloadObjectDiskParts(ctx, backupName, backupMetadata, table, diskMap, diskTypes, disks); err != nil { return fmt.Errorf("can't restore object_disk server-side copy data parts '%s.%s': %v", table.Database, table.Table, err) } - log.WithField("duration", utils.HumanizeDuration(time.Since(start))).WithField("size", utils.FormatBytes(uint64(size))).Info("download object_disks finish") + log.Info().Str("duration", utils.HumanizeDuration(time.Since(start))).Str("size", utils.FormatBytes(uint64(size))).Msg("download object_disks finish") if err := b.ch.AttachDataParts(table, dstTable); err != nil { return fmt.Errorf("can't attach data parts for table '%s.%s': %v", table.Database, table.Table, err) } @@ -1482,7 +1475,7 @@ func (b *Backuper) findObjectDiskPartRecursive(ctx context.Context, backup metad return "", "", err } var requiredTable *metadata.TableMetadata - requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, log, metadata.TableTitle{Database: table.Database, Table: table.Table}) + requiredTable, err = b.downloadTableMetadataIfNotExists(ctx, requiredBackup.BackupName, metadata.TableTitle{Database: table.Database, Table: table.Table}) // @todo think about add check what if disk type could changed (should already restricted, cause upload seek part in the same disk name) for requiredDiskName, parts := range requiredTable.Parts { for _, requiredPart := range parts { diff --git a/pkg/backup/table_pattern.go b/pkg/backup/table_pattern.go index e60c908f..dfcf6196 100644 --- a/pkg/backup/table_pattern.go +++ b/pkg/backup/table_pattern.go @@ -13,14 +13,14 @@ import ( "sort" "strings" - apexLog "github.com/apex/log" - "github.com/google/uuid" - "github.com/Altinity/clickhouse-backup/v2/pkg/common" "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/filesystemhelper" "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" "github.com/Altinity/clickhouse-backup/v2/pkg/partition" + + "github.com/google/uuid" + "github.com/rs/zerolog/log" ) type ListOfTables []metadata.TableMetadata @@ -51,7 +51,6 @@ func (b *Backuper) getTableListByPatternLocal(ctx context.Context, metadataPath result := ListOfTables{} resultPartitionNames := map[metadata.TableTitle][]string{} tablePatterns := []string{"*"} - log := apexLog.WithField("logger", "getTableListByPatternLocal") if tablePattern != "" { tablePatterns = strings.Split(tablePattern, ",") } @@ -93,7 +92,7 @@ func (b *Backuper) getTableListByPatternLocal(ctx context.Context, metadataPath } if isEmbeddedMetadata { // embedded backup to s3 disk could contain only s3 key names inside .sql file - t, err := prepareTableMetadataFromSQL(data, metadataPath, names, log, b.cfg, database, table) + t, err := prepareTableMetadataFromSQL(data, metadataPath, names, b.cfg, database, table) if err != nil { return err } @@ -152,31 +151,31 @@ func (b *Backuper) shouldSkipByTableEngine(t metadata.TableMetadata) bool { for _, engine := range b.cfg.ClickHouse.SkipTableEngines { //b.log.Debugf("engine=%s query=%s", engine, t.Query) if strings.ToLower(engine) == "dictionary" && (strings.HasPrefix(t.Query, "ATTACH DICTIONARY") || strings.HasPrefix(t.Query, "CREATE DICTIONARY")) { - b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + log.Warn().Msgf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) return true } if strings.ToLower(engine) == "materializedview" && (strings.HasPrefix(t.Query, "ATTACH MATERIALIZED VIEW") || strings.HasPrefix(t.Query, "CREATE MATERIALIZED VIEW")) { - b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + log.Warn().Msgf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) return true } if strings.ToLower(engine) == "view" && (strings.HasPrefix(t.Query, "ATTACH VIEW") || strings.HasPrefix(t.Query, "CREATE VIEW")) { - b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + log.Warn().Msgf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) return true } if strings.ToLower(engine) == "liveview" && (strings.HasPrefix(t.Query, "ATTACH LIVE") || strings.HasPrefix(t.Query, "CREATE LIVE")) { - b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + log.Warn().Msgf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) return true } if strings.ToLower(engine) == "windowview" && (strings.HasPrefix(t.Query, "ATTACH WINDOW") || strings.HasPrefix(t.Query, "CREATE WINDOW")) { - b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + log.Warn().Msgf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) return true } if engine != "" { if shouldSkip, err := regexp.MatchString(fmt.Sprintf("(?mi)ENGINE\\s*=\\s*%s([\\(\\s]|\\s*)", engine), t.Query); err == nil && shouldSkip { - b.log.Warnf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) + log.Warn().Msgf("shouldSkipByTableEngine engine=%s found in : %s", engine, t.Query) return true } else if err != nil { - b.log.Warnf("shouldSkipByTableEngine engine=%s return error: %v", engine, err) + log.Warn().Msgf("shouldSkipByTableEngine engine=%s return error: %v", engine, err) } } } @@ -199,7 +198,7 @@ func (b *Backuper) checkShallSkipped(p string, metadataPath string) ([]string, s return names, database, table, tableFullName, shallSkipped, true } -func prepareTableMetadataFromSQL(data []byte, metadataPath string, names []string, log *apexLog.Entry, cfg *config.Config, database string, table string) (metadata.TableMetadata, error) { +func prepareTableMetadataFromSQL(data []byte, metadataPath string, names []string, cfg *config.Config, database string, table string) (metadata.TableMetadata, error) { query := string(data) if strings.HasPrefix(query, "ATTACH") || strings.HasPrefix(query, "CREATE") { query = strings.Replace(query, "ATTACH", "CREATE", 1) @@ -213,7 +212,7 @@ func prepareTableMetadataFromSQL(data []byte, metadataPath string, names []strin } dataParts, err := os.ReadDir(dataPartsPath) if err != nil { - log.Warn(err.Error()) + log.Warn().Err(err).Send() } parts := map[string][]metadata.Part{ cfg.ClickHouse.EmbeddedBackupDisk: make([]metadata.Part, len(dataParts)), diff --git a/pkg/backup/upload.go b/pkg/backup/upload.go index a7195a6d..56841695 100644 --- a/pkg/backup/upload.go +++ b/pkg/backup/upload.go @@ -28,7 +28,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/filesystemhelper" "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" "github.com/yargevad/filepathx" ) @@ -57,10 +57,6 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr if b.cfg.General.RemoteStorage == "custom" { return custom.Upload(ctx, b.cfg, backupName, diffFrom, diffFromRemote, tablePattern, partitions, schemaOnly) } - log := apexLog.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "upload", - }) if _, disks, err = b.getLocalBackup(ctx, backupName, nil); err != nil { return fmt.Errorf("can't find local backup: %v", err) } @@ -69,7 +65,7 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr } defer func() { if err := b.dst.Close(ctx); err != nil { - b.log.Warnf("can't close BackupDestination error: %v", err) + log.Warn().Msgf("can't close BackupDestination error: %v", err) } }() @@ -82,7 +78,7 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr if !b.resume { return fmt.Errorf("'%s' already exists on remote storage", backupName) } else { - log.Warnf("'%s' already exists on remote, will try to resume upload", backupName) + log.Warn().Msgf("'%s' already exists on remote, will try to resume upload", backupName) } } } @@ -131,7 +127,7 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr compressedDataSize := int64(0) metadataSize := int64(0) - log.Debugf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) + log.Debug().Msgf("prepare table concurrent semaphore with concurrency=%d len(tablesForUpload)=%d", b.cfg.General.UploadConcurrency, len(tablesForUpload)) uploadGroup, uploadCtx := errgroup.WithContext(ctx) uploadGroup.SetLimit(int(b.cfg.General.UploadConcurrency)) @@ -165,13 +161,13 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr return err } atomic.AddInt64(&metadataSize, tableMetadataSize) - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "table": fmt.Sprintf("%s.%s", tablesForUpload[idx].Database, tablesForUpload[idx].Table), "progress": fmt.Sprintf("%d/%d", idx+1, len(tablesForUpload)), "duration": utils.HumanizeDuration(time.Since(start)), "size": utils.FormatBytes(uint64(uploadedBytes + tableMetadataSize)), "version": backupVersion, - }).Info("done") + }).Msg("done") return nil }) } @@ -233,12 +229,14 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr if b.resume { b.resumableState.Close() } - log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "upload", "duration": utils.HumanizeDuration(time.Since(startUpload)), "upload_size": utils.FormatBytes(uint64(compressedDataSize) + uint64(metadataSize) + uint64(len(newBackupMetadataBody)) + backupMetadata.RBACSize + backupMetadata.ConfigSize), "object_disk_size": utils.FormatBytes(backupMetadata.ObjectDiskSize), "version": backupVersion, - }).Info("done") + }).Msg("done") // Remote old backup retention if err = b.RemoveOldBackupsRemote(ctx); err != nil { @@ -269,28 +267,28 @@ func (b *Backuper) RemoveOldBackupsRemote(ctx context.Context) error { return err } backupsToDelete := storage.GetBackupsToDeleteRemote(backupList, b.cfg.General.BackupsToKeepRemote) - b.dst.Log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackupsRemote", "duration": utils.HumanizeDuration(time.Since(start)), - }).Info("calculate backup list for delete remote") + }).Msg("calculate backup list for delete remote") for _, backupToDelete := range backupsToDelete { startDelete := time.Now() - err = b.cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx, backupToDelete, b.dst.Log) + err = b.cleanEmbeddedAndObjectDiskRemoteIfSameLocalNotPresent(ctx, backupToDelete) if err != nil { return err } if err := b.dst.RemoveBackupRemote(ctx, backupToDelete); err != nil { - b.dst.Log.Warnf("can't deleteKey %s return error : %v", backupToDelete.BackupName, err) + log.Warn().Msgf("can't deleteKey %s return error : %v", backupToDelete.BackupName, err) } - b.dst.Log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "operation": "RemoveOldBackupsRemote", "location": "remote", "backup": backupToDelete.BackupName, "duration": utils.HumanizeDuration(time.Since(startDelete)), - }).Info("done") + }).Msg("done") } - b.dst.Log.WithFields(apexLog.Fields{"operation": "RemoveOldBackupsRemote", "duration": utils.HumanizeDuration(time.Since(start))}).Info("done") + log.Info().Fields(map[string]interface{}{"operation": "RemoveOldBackupsRemote", "duration": utils.HumanizeDuration(time.Since(start))}).Msg("done") return nil } @@ -300,14 +298,13 @@ func (b *Backuper) uploadSingleBackupFile(ctx context.Context, localFile, remote return size, nil } } - log := b.log.WithField("logger", "uploadSingleBackupFile") f, err := os.Open(localFile) if err != nil { return 0, fmt.Errorf("can't open %s: %v", localFile, err) } defer func() { if err := f.Close(); err != nil { - log.Warnf("can't close %v: %v", f, err) + log.Warn().Msgf("can't close %v: %v", f, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -432,7 +429,7 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate if !b.cfg.General.RBACBackupAlways { return 0, fmt.Errorf("list %s return list=%v with err=%v", localFilesGlobPattern, localFiles, err) } - b.log.Warnf("list %s return list=%v with err=%v", localFilesGlobPattern, localFiles, err) + log.Warn().Msgf("list %s return list=%v with err=%v", localFilesGlobPattern, localFiles, err) return 0, nil } @@ -484,8 +481,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet for disk := range table.Parts { capacity += len(table.Parts[disk]) } - log := b.log.WithField("logger", "uploadTableData") - log.Debugf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity) + log.Debug().Msgf("start %s.%s with concurrency=%d len(table.Parts[...])=%d", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity) ctx, cancel := context.WithCancel(ctx) defer cancel() dataGroup, ctx := errgroup.WithContext(ctx) @@ -526,9 +522,9 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet return nil } } - log.Debugf("start upload %d files to %s", len(partFiles), remotePath) + log.Debug().Msgf("start upload %d files to %s", len(partFiles), remotePath) if uploadPathBytes, err := b.dst.UploadPath(ctx, backupPath, partFiles, remotePath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.UploadMaxBytesPerSecond); err != nil { - log.Errorf("UploadPath return error: %v", err) + log.Error().Msgf("UploadPath return error: %v", err) return fmt.Errorf("can't upload: %v", err) } else { atomic.AddInt64(&uploadedBytes, uploadPathBytes) @@ -558,13 +554,13 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet return nil } } - log.Debugf("start upload %d files to %s", len(localFiles), remoteDataFile) + log.Debug().Msgf("start upload %d files to %s", len(localFiles), remoteDataFile) retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) err := retry.RunCtx(ctx, func(ctx context.Context) error { return b.dst.UploadCompressedStream(ctx, backupPath, localFiles, remoteDataFile, b.cfg.General.UploadMaxBytesPerSecond) }) if err != nil { - log.Errorf("UploadCompressedStream return error: %v", err) + log.Error().Msgf("UploadCompressedStream return error: %v", err) return fmt.Errorf("can't upload: %v", err) } @@ -589,7 +585,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet } } } - log.Debugf("finish upload to %s", remoteDataFile) + log.Debug().Msgf("finish upload to %s", remoteDataFile) return nil }) } @@ -598,7 +594,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet if err := dataGroup.Wait(); err != nil { return nil, 0, fmt.Errorf("one of uploadTableData go-routine return error: %v", err) } - log.Debugf("finish %s.%s with concurrency=%d len(table.Parts[...])=%d uploadedFiles=%v, uploadedBytes=%v", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity, uploadedFiles, uploadedBytes) + log.Debug().Msgf("finish %s.%s with concurrency=%d len(table.Parts[...])=%d uploadedFiles=%v, uploadedBytes=%v", table.Database, table.Table, b.cfg.General.UploadConcurrency, capacity, uploadedFiles, uploadedBytes) return uploadedFiles, uploadedBytes, nil } @@ -648,7 +644,6 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s return processedSize, nil } } - log := b.log.WithField("logger", "uploadTableMetadataEmbedded") localTableMetaFile := path.Join(b.EmbeddedBackupDataPath, backupName, "metadata", common.TablePathEncode(tableMetadata.Database), fmt.Sprintf("%s.sql", common.TablePathEncode(tableMetadata.Table))) var info os.FileInfo var localReader *os.File @@ -657,7 +652,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s if err != nil { err = fmt.Errorf("can't open %s: %v", localTableMetaFile, err) if requiredBackupName != "" { - log.Warnf("%v", err) + log.Warn().Err(err).Send() return 0, nil } else { return 0, err @@ -668,7 +663,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } defer func() { if err := localReader.Close(); err != nil { - log.Warnf("can't close %v: %v", localReader, err) + log.Warn().Msgf("can't close %v: %v", localReader, err) } }() retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), nil) @@ -685,7 +680,6 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s } func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTable *metadata.TableMetadata, newTable *metadata.TableMetadata, checkLocal bool) { - log := b.log.WithField("logger", "markDuplicatedParts") for disk, newParts := range newTable.Parts { if _, diskExists := existsTable.Parts[disk]; diskExists { if len(existsTable.Parts[disk]) == 0 { @@ -705,7 +699,7 @@ func (b *Backuper) markDuplicatedParts(backup *metadata.BackupMetadata, existsTa newPath := path.Join(b.DiskToPathMap[disk], "backup", backup.BackupName, "shadow", dbAndTablePath, disk, newParts[i].Name) if err := filesystemhelper.IsDuplicatedParts(existsPath, newPath); err != nil { - log.Debugf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) + log.Debug().Msgf("part '%s' and '%s' must be the same: %v", existsPath, newPath, err) continue } } @@ -765,7 +759,6 @@ func (b *Backuper) splitPartFiles(basePath string, parts []metadata.Part) ([]met } func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - log := b.log.WithField("logger", "splitFilesByName") result := make([]metadata.SplitPartFiles, 0) for i := range parts { if parts[i].Required { @@ -785,7 +778,7 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - log.Warnf("filepath.Walk return error: %v", err) + log.Warn().Msgf("filepath.Walk return error: %v", err) } result = append(result, metadata.SplitPartFiles{ Prefix: parts[i].Name, @@ -796,7 +789,6 @@ func (b *Backuper) splitFilesByName(basePath string, parts []metadata.Part) ([]m } func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]metadata.SplitPartFiles, error) { - log := b.log.WithField("logger", "splitFilesBySize") var size int64 var files []string maxSize := b.cfg.General.MaxFileSize @@ -829,7 +821,7 @@ func (b *Backuper) splitFilesBySize(basePath string, parts []metadata.Part) ([]m return nil }) if err != nil { - log.Warnf("filepath.Walk return error: %v", err) + log.Warn().Msgf("filepath.Walk return error: %v", err) } } if len(files) > 0 { diff --git a/pkg/backup/watch.go b/pkg/backup/watch.go index 0065827a..2560f50e 100644 --- a/pkg/backup/watch.go +++ b/pkg/backup/watch.go @@ -6,7 +6,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/server/metrics" "github.com/Altinity/clickhouse-backup/v2/pkg/status" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" "github.com/urfave/cli" "regexp" "strings" @@ -106,17 +106,13 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t if cfg, err := config.LoadConfig(config.GetConfigPath(cliCtx)); err == nil { b.cfg = cfg } else { - b.log.Warnf("watch config.LoadConfig error: %v", err) + log.Warn().Msgf("watch config.LoadConfig error: %v", err) } if err := b.ValidateWatchParams(watchInterval, fullInterval, watchBackupNameTemplate); err != nil { return err } } backupName, err := b.NewBackupWatchName(ctx, backupType) - log := b.log.WithFields(apexLog.Fields{ - "backup": backupName, - "operation": "watch", - }) if err != nil { return err } @@ -158,14 +154,17 @@ func (b *Backuper) Watch(watchInterval, fullInterval, watchBackupNameTemplate, t cmd += " --skip-check-parts-columns" } cmd += " " + backupName - log.Errorf("%s return error: %v", cmd, createRemoteErr) + log.Error().Msgf("%s return error: %v", cmd, createRemoteErr) createRemoteErrCount += 1 } else { createRemoteErrCount = 0 } deleteLocalErr = b.RemoveBackupLocal(ctx, backupName, nil) if deleteLocalErr != nil { - log.Errorf("delete local %s return error: %v", backupName, deleteLocalErr) + log.Error().Fields(map[string]interface{}{ + "backup": backupName, + "operation": "watch", + }).Msgf("delete local %s return error: %v", backupName, deleteLocalErr) deleteLocalErrCount += 1 } else { deleteLocalErrCount = 0 @@ -237,10 +236,10 @@ func (b *Backuper) calculatePrevBackupNameAndType(ctx context.Context, prevBacku now := time.Now() timeBeforeDoBackup := int(b.cfg.General.WatchDuration.Seconds() - now.Sub(lastBackup).Seconds()) timeBeforeDoFullBackup := int(b.cfg.General.FullDuration.Seconds() - now.Sub(lastFullBackup).Seconds()) - b.log.Infof("Time before do backup %v", timeBeforeDoBackup) - b.log.Infof("Time before do full backup %v", timeBeforeDoFullBackup) + log.Info().Msgf("Time before do backup %v", timeBeforeDoBackup) + log.Info().Msgf("Time before do full backup %v", timeBeforeDoFullBackup) if timeBeforeDoBackup > 0 && timeBeforeDoFullBackup > 0 { - b.log.Infof("Waiting %d seconds until continue doing backups due watch interval", timeBeforeDoBackup) + log.Info().Msgf("Waiting %d seconds until continue doing backups due watch interval", timeBeforeDoBackup) select { case <-ctx.Done(): return "", "", time.Time{}, time.Time{}, "", ctx.Err() diff --git a/pkg/clickhouse/clickhouse.go b/pkg/clickhouse/clickhouse.go index 3382e0f6..e32fc434 100644 --- a/pkg/clickhouse/clickhouse.go +++ b/pkg/clickhouse/clickhouse.go @@ -18,18 +18,20 @@ import ( "github.com/ClickHouse/clickhouse-go/v2" "github.com/ClickHouse/clickhouse-go/v2/lib/driver" "github.com/antchfx/xmlquery" - apexLog "github.com/apex/log" "github.com/ricochet2200/go-disk-usage/du" "github.com/Altinity/clickhouse-backup/v2/pkg/common" "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + ) // ClickHouse - provide type ClickHouse struct { Config *config.ClickHouseConfig - Log *apexLog.Entry conn driver.Conn version int isPartsColumnPresent int8 @@ -40,7 +42,7 @@ type ClickHouse struct { func (ch *ClickHouse) Connect() error { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Log.Errorf("close previous connection error: %v", err) + log.Error().Msgf("close previous connection error: %v", err) } } ch.IsOpen = false @@ -82,7 +84,7 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCert != "" || ch.Config.TLSKey != "" { cert, err := tls.LoadX509KeyPair(ch.Config.TLSCert, ch.Config.TLSKey) if err != nil { - ch.Log.Errorf("tls.LoadX509KeyPair error: %v", err) + log.Error().Msgf("tls.LoadX509KeyPair error: %v", err) return err } tlsConfig.Certificates = []tls.Certificate{cert} @@ -90,12 +92,12 @@ func (ch *ClickHouse) Connect() error { if ch.Config.TLSCa != "" { caCert, err := os.ReadFile(ch.Config.TLSCa) if err != nil { - ch.Log.Errorf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) + log.Error().Msgf("read `tls_ca` file %s return error: %v ", ch.Config.TLSCa, err) return err } caCertPool := x509.NewCertPool() if caCertPool.AppendCertsFromPEM(caCert) != true { - ch.Log.Errorf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) + log.Error().Msgf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) return fmt.Errorf("AppendCertsFromPEM %s return false", ch.Config.TLSCa) } tlsConfig.RootCAs = caCertPool @@ -107,9 +109,9 @@ func (ch *ClickHouse) Connect() error { opt.Settings["log_queries"] = 0 } - logFunc := ch.Log.Infof + logLevel := zerolog.InfoLevel if !ch.Config.LogSQLQueries { - logFunc = ch.Log.Debugf + logLevel = zerolog.DebugLevel } // infinite reconnect until success, fix https://github.com/Altinity/clickhouse-backup/issues/857 for { @@ -118,17 +120,17 @@ func (ch *ClickHouse) Connect() error { if err == nil { break } - ch.Log.Warnf("clickhouse connection: %s, sql.Open return error: %v, will wait 5 second to reconnect", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Warn().Msgf("clickhouse connection: %s, sql.Open return error: %v, will wait 5 second to reconnect", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) time.Sleep(5 * time.Second) } - logFunc("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + log.WithLevel(logLevel).Msgf("clickhouse connection prepared: %s run ping", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) err = ch.conn.Ping(context.Background()) if err == nil { - logFunc("clickhouse connection success: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) + log.WithLevel(logLevel).Msgf("clickhouse connection success: %s", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port)) ch.IsOpen = true break } - ch.Log.Warnf("clickhouse connection ping: %s return error: %v, will wait 5 second to reconnect", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) + log.Warn().Msgf("clickhouse connection ping: %s return error: %v, will wait 5 second to reconnect", fmt.Sprintf("tcp://%v:%v", ch.Config.Host, ch.Config.Port), err) time.Sleep(5 * time.Second) } @@ -311,13 +313,13 @@ func (ch *ClickHouse) getDisksFromSystemDisks(ctx context.Context) ([]Disk, erro func (ch *ClickHouse) Close() { if ch.IsOpen { if err := ch.conn.Close(); err != nil { - ch.Log.Warnf("can't close clickhouse connection: %v", err) + log.Warn().Msgf("can't close clickhouse connection: %v", err) } } if ch.Config.LogSQLQueries { - ch.Log.Info("clickhouse connection closed") + log.Info().Msg("clickhouse connection closed") } else { - ch.Log.Debug("clickhouse connection closed") + log.Debug().Msg("clickhouse connection closed") } ch.IsOpen = false } @@ -581,7 +583,7 @@ func (ch *ClickHouse) GetDatabases(ctx context.Context, cfg *config.Config, tabl var result string // 19.4 doesn't have /var/lib/clickhouse/metadata/default.sql if err := ch.SelectSingleRow(ctx, &result, showDatabaseSQL); err != nil { - ch.Log.Warnf("can't get create database query: %v", err) + log.Warn().Msgf("can't get create database query: %v", err) allDatabases[i].Query = fmt.Sprintf("CREATE DATABASE `%s` ENGINE = %s", db.Name, db.Engine) } else { // 23.3+ masked secrets https://github.com/Altinity/clickhouse-backup/issues/640 @@ -606,7 +608,7 @@ func (ch *ClickHouse) getTableSizeFromParts(ctx context.Context, table Table) ui } query := fmt.Sprintf("SELECT sum(bytes_on_disk) as size FROM system.parts WHERE active AND database='%s' AND table='%s' GROUP BY database, table", table.Database, table.Name) if err := ch.SelectContext(ctx, &tablesSize, query); err != nil { - ch.Log.Warnf("error parsing tablesSize: %v", err) + log.Warn().Msgf("error parsing tablesSize: %v", err) } if len(tablesSize) > 0 { return tablesSize[0].Size @@ -637,7 +639,7 @@ func (ch *ClickHouse) fixVariousVersions(ctx context.Context, t Table, metadataP if strings.Contains(t.CreateTableQuery, "'[HIDDEN]'") { tableSQLPath := path.Join(metadataPath, common.TablePathEncode(t.Database), common.TablePathEncode(t.Name)+".sql") if attachSQL, err := os.ReadFile(tableSQLPath); err != nil { - ch.Log.Warnf("can't read %s: %v", tableSQLPath, err) + log.Warn().Msgf("can't read %s: %v", tableSQLPath, err) } else { t.CreateTableQuery = strings.Replace(string(attachSQL), "ATTACH", "CREATE", 1) t.CreateTableQuery = strings.Replace(t.CreateTableQuery, " _ ", " `"+t.Database+"`.`"+t.Name+"` ", 1) @@ -656,7 +658,7 @@ func (ch *ClickHouse) GetVersion(ctx context.Context) (int, error) { var err error query := "SELECT value FROM `system`.`build_options` where name='VERSION_INTEGER'" if err = ch.SelectSingleRow(ctx, &result, query); err != nil { - ch.Log.Warnf("can't get ClickHouse version: %v", err) + log.Warn().Msgf("can't get ClickHouse version: %v", err) return 0, nil } ch.version, err = strconv.Atoi(result) @@ -687,7 +689,7 @@ func (ch *ClickHouse) FreezeTableByParts(ctx context.Context, table *Table, name withNameQuery = fmt.Sprintf("WITH NAME '%s'", name) } for _, item := range partitions { - ch.Log.Debugf(" partition '%v'", item.PartitionID) + log.Debug().Msgf(" partition '%v'", item.PartitionID) query := fmt.Sprintf( "ALTER TABLE `%v`.`%v` FREEZE PARTITION ID '%v' %s;", table.Database, @@ -705,7 +707,7 @@ func (ch *ClickHouse) FreezeTableByParts(ctx context.Context, table *Table, name } if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Log.Warnf("can't freeze partition: %v", err) + log.Warn().Msgf("can't freeze partition: %v", err) } else { return fmt.Errorf("can't freeze partition '%s': %w", item.PartitionID, err) } @@ -724,9 +726,9 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string if strings.HasPrefix(table.Engine, "Replicated") && ch.Config.SyncReplicatedTables { query := fmt.Sprintf("SYSTEM SYNC REPLICA `%s`.`%s`;", table.Database, table.Name) if err := ch.QueryContext(ctx, query); err != nil { - ch.Log.Warnf("can't sync replica: %v", err) + log.Warn().Msgf("can't sync replica: %v", err) } else { - ch.Log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Debugf("replica synced") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Name)).Msg("replica synced") } } if version < 19001005 || ch.Config.FreezeByPart { @@ -739,7 +741,7 @@ func (ch *ClickHouse) FreezeTable(ctx context.Context, table *Table, name string query := fmt.Sprintf("ALTER TABLE `%s`.`%s` FREEZE %s;", table.Database, table.Name, withNameQuery) if err := ch.QueryContext(ctx, query); err != nil { if (strings.Contains(err.Error(), "code: 60") || strings.Contains(err.Error(), "code: 81") || strings.Contains(err.Error(), "code: 218")) && ch.Config.IgnoreNotExistsErrorDuringFreeze { - ch.Log.Warnf("can't freeze table: %v", err) + log.Warn().Msgf("can't freeze table: %v", err) return nil } return fmt.Errorf("can't freeze table: %v", err) @@ -769,7 +771,7 @@ func (ch *ClickHouse) AttachDataParts(table metadata.TableMetadata, dstTable Tab if err := ch.Query(query); err != nil { return err } - ch.Log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).WithField("disk", disk).WithField("part", part.Name).Debug("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Str("disk", disk).Str("part", part.Name).Msg("attached") } } } @@ -782,7 +784,7 @@ var uuidRE = regexp.MustCompile(`UUID '([^']+)'`) // AttachTable - execute ATTACH TABLE command for specific table func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetadata, dstTable Table) error { if len(table.Parts) == 0 { - apexLog.Warnf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) + log.Warn().Msgf("no data parts for restore for `%s`.`%s`", table.Database, table.Table) return nil } if dstTable.Database != "" && dstTable.Database != table.Database { @@ -840,7 +842,7 @@ func (ch *ClickHouse) AttachTable(ctx context.Context, table metadata.TableMetad return err } } - ch.Log.WithField("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Debug("attached") + log.Debug().Str("table", fmt.Sprintf("%s.%s", table.Database, table.Table)).Msg("attached") return nil } func (ch *ClickHouse) ShowCreateTable(ctx context.Context, database, name string) string { @@ -976,7 +978,7 @@ func (ch *ClickHouse) CreateTable(table Table, query string, dropTable, ignoreDe if onCluster != "" && distributedRE.MatchString(query) { matches := distributedRE.FindAllStringSubmatch(query, -1) if onCluster != strings.Trim(matches[0][2], "'\" ") { - apexLog.Warnf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) + log.Warn().Msgf("Will replace cluster ENGINE=Distributed %s -> %s", matches[0][2], onCluster) query = distributedRE.ReplaceAllString(query, fmt.Sprintf("${1}(%s,${3})", onCluster)) } } @@ -1060,16 +1062,14 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args } func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string { - var logF func(msg string) + level := zerolog.InfoLevel if !ch.Config.LogSQLQueries { - logF = ch.Log.Debug - } else { - logF = ch.Log.Info + level = zerolog.DebugLevel } if len(args) > 0 { - logF(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) + log.WithLevel(level).Msgf(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args))) } else { - logF(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(query)) + log.WithLevel(level).Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(query)) } return query } @@ -1091,7 +1091,7 @@ func (ch *ClickHouse) GetAccessManagementPath(ctx context.Context, disks []Disk) if err := ch.SelectContext(ctx, &rows, "SELECT JSONExtractString(params,'path') AS access_path FROM system.user_directories WHERE type='local directory'"); err != nil || len(rows) == 0 { configFile, doc, err := ch.ParseXML(ctx, "config.xml") if err != nil { - ch.Log.Warnf("can't parse config.xml from %s, error: %v", configFile, err) + log.Warn().Msgf("can't parse config.xml from %s, error: %v", configFile, err) } if err == nil { accessControlPathNode := doc.SelectElement("access_control_path") @@ -1224,7 +1224,7 @@ func (ch *ClickHouse) CheckReplicationInProgress(table metadata.TableMetadata) ( if existsReplicas[0].LogPointer > 1 || existsReplicas[0].LogMaxIndex > 1 || existsReplicas[0].AbsoluteDelay > 0 || existsReplicas[0].QueueSize > 0 { return false, fmt.Errorf("%s.%s can't restore cause system.replicas entries already exists and replication in progress from another replica, log_pointer=%d, log_max_index=%d, absolute_delay=%d, queue_size=%d", table.Database, table.Table, existsReplicas[0].LogPointer, existsReplicas[0].LogMaxIndex, existsReplicas[0].AbsoluteDelay, existsReplicas[0].QueueSize) } else { - ch.Log.Infof("replication_in_progress status = %+v", existsReplicas) + log.Info().Msgf("replication_in_progress status = %+v", existsReplicas) } } return true, nil @@ -1274,7 +1274,7 @@ func (ch *ClickHouse) CheckTypesConsistency(table *Table, partColumnsDataTypes [ uniqTypes[dataType] = struct{}{} } if len(uniqTypes) > 1 { - ch.Log.Errorf("`%s`.`%s` have incompatible data types %#v for \"%s\" column", table.Database, table.Name, partColumnsDataTypes[i].Types, partColumnsDataTypes[i].Column) + log.Error().Msgf("`%s`.`%s` have incompatible data types %#v for \"%s\" column", table.Database, table.Name, partColumnsDataTypes[i].Types, partColumnsDataTypes[i].Column) return fmt.Errorf("`%s`.`%s` have inconsistent data types for active data part in system.parts_columns", table.Database, table.Name) } } @@ -1371,6 +1371,6 @@ func (ch *ClickHouse) ExtractStoragePolicy(query string) string { if len(matches) > 0 { storagePolicy = matches[1] } - apexLog.Debugf("extract storage_policy: %s, query: %s", storagePolicy, query) + log.Debug().Msgf("extract storage_policy: %s, query: %s", storagePolicy, query) return storagePolicy } diff --git a/pkg/clickhouse/clickhouse_test.go b/pkg/clickhouse/clickhouse_test.go index 13eb7802..ac24f334 100644 --- a/pkg/clickhouse/clickhouse_test.go +++ b/pkg/clickhouse/clickhouse_test.go @@ -2,16 +2,13 @@ package clickhouse import ( "fmt" - apexLog "github.com/apex/log" "testing" "github.com/stretchr/testify/assert" ) func TestCheckTypesConsistency(t *testing.T) { - ch := ClickHouse{ - Log: apexLog.WithField("logger", "test"), - } + ch := ClickHouse{} table := &Table{ Database: "mydb", Name: "mytable", @@ -110,9 +107,7 @@ func TestCheckTypesConsistency(t *testing.T) { } func TestExtractStoragePolicy(t *testing.T) { - ch := ClickHouse{ - Log: apexLog.WithField("logger", "test"), - } + ch := ClickHouse{} testCases := map[string]string{ "CREATE TABLE `_test.ДБ_atomic__TestIntegrationS3`.test_s3_TestIntegrationS3 UUID '8135780b-0c9a-46a7-94fd-2aebb701eff6' (`id` UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/_test.ДБ_atomic__TestIntegrationS3/test_s3_TestIntegrationS3', '{replica}') ORDER BY id SETTINGS storage_policy = 's3_only', index_granularity = 8192": "s3_only", diff --git a/pkg/config/config.go b/pkg/config/config.go index 246e281d..b4556c18 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -10,9 +10,10 @@ import ( "strings" "time" - "github.com/apex/log" + "github.com/Altinity/clickhouse-backup/v2/pkg/log_helper" s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/kelseyhightower/envconfig" + "github.com/rs/zerolog/log" "github.com/urfave/cli" "gopkg.in/yaml.v3" ) @@ -347,7 +348,7 @@ func LoadConfig(configLocation string) (*Config, error) { } - log.SetLevelFromString(cfg.General.LogLevel) + log_helper.SetLogLevelFromString(cfg.General.LogLevel) if err = ValidateConfig(cfg); err != nil { return cfg, err @@ -533,8 +534,8 @@ func DefaultConfig() *Config { FullInterval: "24h", FullDuration: 24 * time.Hour, WatchBackupNameTemplate: "shard{shard}-{type}-{time:20060102150405}", - RestoreDatabaseMapping: make(map[string]string, 0), - RestoreTableMapping: make(map[string]string, 0), + RestoreDatabaseMapping: make(map[string]string), + RestoreTableMapping: make(map[string]string), IONicePriority: "idle", CPUNicePriority: 15, RBACBackupAlways: true, @@ -632,7 +633,7 @@ func GetConfigFromCli(ctx *cli.Context) *Config { configPath := GetConfigPath(ctx) cfg, err := LoadConfig(configPath) if err != nil { - log.Fatal(err.Error()) + log.Fatal().Stack().Err(err).Send() } RestoreEnvVars(oldEnvValues) return cfg @@ -665,14 +666,14 @@ func OverrideEnvVars(ctx *cli.Context) map[string]oldEnvValues { if len(envVariable) < 2 { envVariable = append(envVariable, "true") } - log.Infof("override %s=%s", envVariable[0], envVariable[1]) + log.Info().Msgf("override %s=%s", envVariable[0], envVariable[1]) oldValue, wasPresent := os.LookupEnv(envVariable[0]) oldValues[envVariable[0]] = oldEnvValues{ OldValue: oldValue, WasPresent: wasPresent, } if err := os.Setenv(envVariable[0], envVariable[1]); err != nil { - log.Warnf("can't override %s=%s, error: %v", envVariable[0], envVariable[1], err) + log.Warn().Msgf("can't override %s=%s, error: %v", envVariable[0], envVariable[1], err) } } } @@ -683,11 +684,11 @@ func RestoreEnvVars(envVars map[string]oldEnvValues) { for name, oldEnv := range envVars { if oldEnv.WasPresent { if err := os.Setenv(name, oldEnv.OldValue); err != nil { - log.Warnf("RestoreEnvVars can't restore %s=%s, error: %v", name, oldEnv.OldValue, err) + log.Warn().Msgf("RestoreEnvVars can't restore %s=%s, error: %v", name, oldEnv.OldValue, err) } } else { if err := os.Unsetenv(name); err != nil { - log.Warnf("RestoreEnvVars can't delete %s, error: %v", name, err) + log.Warn().Msgf("RestoreEnvVars can't delete %s, error: %v", name, err) } } } diff --git a/pkg/custom/delete_custom.go b/pkg/custom/delete_custom.go index 62817d30..7c06d26a 100644 --- a/pkg/custom/delete_custom.go +++ b/pkg/custom/delete_custom.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - "github.com/apex/log" + "github.com/rs/zerolog/log" "time" ) @@ -25,17 +25,17 @@ func DeleteRemote(ctx context.Context, cfg *config.Config, backupName string) er args := ApplyCommandTemplate(cfg.Custom.DeleteCommand, templateData) err := utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...) if err == nil { - log.WithFields(log.Fields{ + log.Info().Fields(map[string]interface{}{ "backup": backupName, "operation": "delete_custom", "duration": utils.HumanizeDuration(time.Since(startCustomDelete)), - }).Info("done") + }).Msg("done") return nil } else { - log.WithFields(log.Fields{ + log.Error().Fields(map[string]interface{}{ "backup": backupName, "operation": "delete_custom", - }).Error(err.Error()) + }).Msg(err.Error()) return err } diff --git a/pkg/custom/download_custom.go b/pkg/custom/download_custom.go index ef0b32cb..a774ec3b 100644 --- a/pkg/custom/download_custom.go +++ b/pkg/custom/download_custom.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - "github.com/apex/log" "github.com/eapache/go-resiliency/retrier" + "github.com/rs/zerolog/log" "time" ) @@ -39,15 +39,15 @@ func Download(ctx context.Context, cfg *config.Config, backupName string, tableP return utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...) }) if err == nil { - log. - WithField("operation", "download_custom"). - WithField("duration", utils.HumanizeDuration(time.Since(startCustomDownload))). - Info("done") + log.Info(). + Str("operation", "download_custom"). + Str("duration", utils.HumanizeDuration(time.Since(startCustomDownload))). + Msg("done") return nil } else { - log. - WithField("operation", "download_custom"). - Error(err.Error()) + log.Error(). + Str("operation", "download_custom"). + Err(err).Send() return err } } diff --git a/pkg/custom/list_custom.go b/pkg/custom/list_custom.go index cd418a8b..b740add6 100644 --- a/pkg/custom/list_custom.go +++ b/pkg/custom/list_custom.go @@ -7,7 +7,7 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/storage" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - "github.com/apex/log" + "github.com/rs/zerolog/log" "strings" "time" ) @@ -37,15 +37,15 @@ func List(ctx context.Context, cfg *config.Config) ([]storage.Backup, error) { } } } - log. - WithField("operation", "list_custom"). - WithField("duration", utils.HumanizeDuration(time.Since(startCustomList))). - Info("done") + log.Info(). + Str("operation", "list_custom"). + Str("duration", utils.HumanizeDuration(time.Since(startCustomList))). + Msg("done") return backupList, nil } else { - log. - WithField("operation", "list_custom"). - Error(err.Error()) + log.Error(). + Str("operation", "list_custom"). + Err(err).Send() return nil, err } } diff --git a/pkg/custom/upload_custom.go b/pkg/custom/upload_custom.go index 91d7dbdc..f3383b81 100644 --- a/pkg/custom/upload_custom.go +++ b/pkg/custom/upload_custom.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" - "github.com/apex/log" "github.com/eapache/go-resiliency/retrier" + "github.com/rs/zerolog/log" "time" ) @@ -45,15 +45,15 @@ func Upload(ctx context.Context, cfg *config.Config, backupName, diffFrom, diffF return utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...) }) if err == nil { - log. - WithField("operation", "upload_custom"). - WithField("duration", utils.HumanizeDuration(time.Since(startCustomUpload))). - Info("done") + log.Info(). + Str("operation", "upload_custom"). + Str("duration", utils.HumanizeDuration(time.Since(startCustomUpload))). + Msg("done") return nil } else { - log. - WithField("operation", "upload_custom"). - Error(err.Error()) + log.Error(). + Str("operation", "upload_custom"). + Err(err).Send() return err } } diff --git a/pkg/custom/utils.go b/pkg/custom/utils.go index 245dd5f5..498ae3b7 100644 --- a/pkg/custom/utils.go +++ b/pkg/custom/utils.go @@ -2,8 +2,8 @@ package custom import ( "bytes" - "github.com/apex/log" "github.com/google/shlex" + "github.com/rs/zerolog/log" "text/template" ) @@ -11,18 +11,18 @@ func ApplyCommandTemplate(command string, templateData interface{}) []string { var b bytes.Buffer tpl, err := template.New("").Parse(command) if err != nil { - log.Warnf("custom command template.Parse error: %v", err) + log.Warn().Msgf("custom command template.Parse error: %v", err) return []string{command} } err = tpl.Execute(&b, templateData) if err != nil { - log.Warnf("custom command template.Execute error: %v", err) + log.Warn().Msgf("custom command template.Execute error: %v", err) return []string{command} } args, err := shlex.Split(b.String()) if err != nil { - log.Warnf("parse shell command %s error: %v", b.String(), err) + log.Warn().Msgf("parse shell command %s error: %v", b.String(), err) return []string{command} } return args diff --git a/pkg/filesystemhelper/filesystemhelper.go b/pkg/filesystemhelper/filesystemhelper.go index 304d4d5b..26a7245e 100644 --- a/pkg/filesystemhelper/filesystemhelper.go +++ b/pkg/filesystemhelper/filesystemhelper.go @@ -2,7 +2,6 @@ package filesystemhelper import ( "fmt" - "github.com/Altinity/clickhouse-backup/v2/pkg/utils" "net/url" "os" "path" @@ -15,7 +14,8 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" "github.com/Altinity/clickhouse-backup/v2/pkg/common" "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" - apexLog "github.com/apex/log" + "github.com/Altinity/clickhouse-backup/v2/pkg/utils" + "github.com/rs/zerolog/log" ) var ( @@ -116,7 +116,6 @@ func MkdirAll(path string, ch *clickhouse.ClickHouse, disks []clickhouse.Disk) e // HardlinkBackupPartsToStorage - copy partitions for specific table to detached folder func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableMetadata, disks []clickhouse.Disk, diskMap map[string]string, tableDataPaths []string, ch *clickhouse.ClickHouse, toDetached bool) error { - log := apexLog.WithFields(apexLog.Fields{"operation": "HardlinkBackupPartsToStorage"}) start := time.Now() dstDataPaths := clickhouse.GetDisksByPaths(disks, tableDataPaths) dbAndTableDir := path.Join(common.TablePathEncode(backupTable.Database), common.TablePathEncode(backupTable.Table)) @@ -159,9 +158,9 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM info, err := os.Stat(dstPartPath) if err != nil { if os.IsNotExist(err) { - log.Debugf("MkDirAll %s", dstPartPath) + log.Debug().Msgf("MkDirAll %s", dstPartPath) if mkdirErr := MkdirAll(dstPartPath, ch, disks); mkdirErr != nil { - log.Warnf("error during Mkdir %+v", mkdirErr) + log.Warn().Msgf("error during Mkdir %+v", mkdirErr) } } else { return err @@ -181,14 +180,14 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM filename := strings.Trim(strings.TrimPrefix(filePath, srcPartPath), "/") dstFilePath := filepath.Join(dstPartPath, filename) if info.IsDir() { - log.Debugf("MkDir %s", dstFilePath) + log.Debug().Msgf("MkDir %s", dstFilePath) return Mkdir(dstFilePath, ch, disks) } if !info.Mode().IsRegular() { - log.Debugf("'%s' is not a regular file, skipping.", filePath) + log.Debug().Msgf("'%s' is not a regular file, skipping.", filePath) return nil } - log.Debugf("Link %s -> %s", filePath, dstFilePath) + log.Debug().Msgf("Link %s -> %s", filePath, dstFilePath) if err := os.Link(filePath, dstFilePath); err != nil { if !os.IsExist(err) { return fmt.Errorf("failed to create hard link '%s' -> '%s': %w", filePath, dstFilePath, err) @@ -200,7 +199,7 @@ func HardlinkBackupPartsToStorage(backupName string, backupTable metadata.TableM } } } - log.WithField("duration", utils.HumanizeDuration(time.Since(start))).Debugf("done") + log.Debug().Str("duration", utils.HumanizeDuration(time.Since(start))).Msg("done") return nil } @@ -213,8 +212,8 @@ func IsPartInPartition(partName string, partitionsBackupMap common.EmptyMap) boo if matched, err := filepath.Match(pattern, partitionId); err == nil && matched { return true } else if err != nil { - apexLog.Warnf("error filepath.Match(%s, %s) error: %v", pattern, partitionId, err) - apexLog.Debugf("%s not found in %s, file will filtered", partitionId, partitionsBackupMap) + log.Warn().Msgf("error filepath.Match(%s, %s) error: %v", pattern, partitionId, err) + log.Debug().Msgf("%s not found in %s, file will filtered", partitionId, partitionsBackupMap) return false } } @@ -227,8 +226,8 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa if strings.Contains(fileName, "%") { decodedFileName, err := url.QueryUnescape(fileName) if err != nil { - apexLog.Warnf("error decoding %s: %v", fileName, err) - apexLog.Debugf("%s not found in %s, file will filtered", fileName, partitionsBackupMap) + log.Warn().Msgf("error decoding %s: %v", fileName, err) + log.Debug().Msgf("%s not found in %s, file will filtered", fileName, partitionsBackupMap) return false } fileName = decodedFileName @@ -240,8 +239,8 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa if matched, err := filepath.Match(pattern, fileName); err == nil && matched { return true } else if err != nil { - apexLog.Warnf("error filepath.Match(%s, %s) error: %v", pattern, fileName, err) - apexLog.Debugf("%s not found in %s, file will filtered", fileName, partitionsBackupMap) + log.Warn().Msgf("error filepath.Match(%s, %s) error: %v", pattern, fileName, err) + log.Debug().Msgf("%s not found in %s, file will filtered", fileName, partitionsBackupMap) return false } } @@ -249,7 +248,6 @@ func IsFileInPartition(disk, fileName string, partitionsBackupMap common.EmptyMa } func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap common.EmptyMap, tableDiffFromRemote metadata.TableMetadata, disk clickhouse.Disk, version int) ([]metadata.Part, int64, error) { - log := apexLog.WithField("logger", "MoveShadowToBackup") size := int64(0) parts := make([]metadata.Part, 0) err := filepath.Walk(shadowPath, func(filePath string, info os.FileInfo, err error) error { @@ -288,7 +286,7 @@ func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap return os.MkdirAll(dstFilePath, 0750) } if !info.Mode().IsRegular() { - log.Debugf("'%s' is not a regular file, skipping", filePath) + log.Debug().Msgf("'%s' is not a regular file, skipping", filePath) return nil } size += info.Size() @@ -325,14 +323,13 @@ func addRequiredPartIfNotExists(parts []metadata.Part, relativePath string, tabl } func IsDuplicatedParts(part1, part2 string) error { - log := apexLog.WithField("logger", "IsDuplicatedParts") p1, err := os.Open(part1) if err != nil { return err } defer func() { if err = p1.Close(); err != nil { - log.Warnf("Can't close %s", part1) + log.Warn().Msgf("Can't close %s", part1) } }() p2, err := os.Open(part2) @@ -341,7 +338,7 @@ func IsDuplicatedParts(part1, part2 string) error { } defer func() { if err = p2.Close(); err != nil { - log.Warnf("Can't close %s", part2) + log.Warn().Msgf("Can't close %s", part2) } }() pf1, err := p1.Readdirnames(-1) diff --git a/pkg/keeper/keeper.go b/pkg/keeper/keeper.go index 20083732..20cc42af 100644 --- a/pkg/keeper/keeper.go +++ b/pkg/keeper/keeper.go @@ -6,7 +6,8 @@ import ( "encoding/json" "fmt" "github.com/antchfx/xmlquery" - "github.com/apex/log" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "os" "path" "strconv" @@ -18,21 +19,21 @@ import ( ) type LogKeeperToApexLogAdapter struct { - apexLog *log.Logger + log zerolog.Logger } -func newKeeperLogger(log *log.Entry) LogKeeperToApexLogAdapter { +func newKeeperLogger() LogKeeperToApexLogAdapter { return LogKeeperToApexLogAdapter{ - apexLog: log.Logger, + log: log.Logger, } } func (KeeperLogToApexLogAdapter LogKeeperToApexLogAdapter) Printf(msg string, args ...interface{}) { msg = fmt.Sprintf("[keeper] %s", msg) if len(args) > 0 { - KeeperLogToApexLogAdapter.apexLog.Debugf(msg, args...) + KeeperLogToApexLogAdapter.log.Debug().Msgf(msg, args...) } else { - KeeperLogToApexLogAdapter.apexLog.Debug(msg) + KeeperLogToApexLogAdapter.log.Debug().Msg(msg) } } @@ -43,7 +44,6 @@ type DumpNode struct { type Keeper struct { conn *zk.Conn - Log *log.Entry root string doc *xmlquery.Node xmlConfigFile string @@ -66,7 +66,7 @@ func (k *Keeper) Connect(ctx context.Context, ch *clickhouse.ClickHouse) error { if sessionTimeoutMs, err := strconv.ParseInt(sessionTimeoutMsNode.InnerText(), 10, 64); err == nil { sessionTimeout = time.Duration(sessionTimeoutMs) * time.Millisecond } else { - k.Log.Warnf("can't parse /zookeeper/session_timeout_ms in %s, value: %v, error: %v ", configFile, sessionTimeoutMsNode.InnerText(), err) + log.Warn().Msgf("can't parse /zookeeper/session_timeout_ms in %s, value: %v, error: %v ", configFile, sessionTimeoutMsNode.InnerText(), err) } } nodeList := zookeeperNode.SelectElements("node") @@ -86,7 +86,7 @@ func (k *Keeper) Connect(ctx context.Context, ch *clickhouse.ClickHouse) error { } keeperHosts[i] = fmt.Sprintf("%s:%s", hostNode.InnerText(), port) } - conn, _, err := zk.Connect(keeperHosts, sessionTimeout, zk.WithLogger(newKeeperLogger(k.Log))) + conn, _, err := zk.Connect(keeperHosts, sessionTimeout, zk.WithLogger(newKeeperLogger())) if err != nil { return err } @@ -118,7 +118,7 @@ func (k *Keeper) Dump(prefix, dumpFile string) (int, error) { } defer func() { if err = f.Close(); err != nil { - k.Log.Warnf("can't close %s: %v", dumpFile, err) + log.Warn().Msgf("can't close %s: %v", dumpFile, err) } }() if !strings.HasPrefix(prefix, "/") && k.root != "" { @@ -179,7 +179,7 @@ func (k *Keeper) Restore(dumpFile, prefix string) error { } defer func() { if err = f.Close(); err != nil { - k.Log.Warnf("can't close %s: %v", dumpFile, err) + log.Warn().Msgf("can't close %s: %v", dumpFile, err) } }() if !strings.HasPrefix(prefix, "/") && k.root != "" { @@ -216,7 +216,7 @@ type WalkCallBack = func(node DumpNode) (bool, error) func (k *Keeper) Walk(prefix, relativePath string, recursive bool, callback WalkCallBack) error { nodePath := path.Join(prefix, relativePath) value, stat, err := k.conn.Get(nodePath) - k.Log.Debugf("Walk->get(%s) = %v, err = %v", nodePath, string(value), err) + log.Debug().Msgf("Walk->get(%s) = %v, err = %v", nodePath, string(value), err) if err != nil { return err } diff --git a/pkg/log_helper/log_level.go b/pkg/log_helper/log_level.go new file mode 100644 index 00000000..f9fbbaf8 --- /dev/null +++ b/pkg/log_helper/log_level.go @@ -0,0 +1,22 @@ +package log_helper + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func SetLogLevelFromString(logLevel string) { + allLogLevels := map[string]zerolog.Level{ + "error": zerolog.ErrorLevel, + "warning": zerolog.WarnLevel, + "info": zerolog.InfoLevel, + "debug": zerolog.DebugLevel, + } + level := zerolog.InfoLevel + var ok bool + if level, ok = allLogLevels[logLevel]; !ok { + log.Warn().Msgf("unexpected log_level=%v, will apply `info`", logLevel) + level = zerolog.InfoLevel + } + zerolog.SetGlobalLevel(level) +} diff --git a/pkg/logcli/cli.go b/pkg/logcli/cli.go deleted file mode 100644 index bba80cfb..00000000 --- a/pkg/logcli/cli.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package logcli implements a colored text handler suitable for command-line interfaces. -package logcli - -import ( - "fmt" - "github.com/apex/log" - "io" - "os" - "sync" -) - -// Strings mapping. -var Strings = [...]string{ - log.DebugLevel: "debug", - log.InfoLevel: " info", - log.WarnLevel: " warn", - log.ErrorLevel: "error", - log.FatalLevel: "error", -} - -// Handler implementation. -type Handler struct { - mu sync.Mutex - Writer io.Writer - Padding int -} - -// New handler. -func New(w io.Writer) *Handler { - if f, ok := w.(*os.File); ok { - return &Handler{ - Writer: f, - Padding: 3, - } - } - - return &Handler{ - Writer: w, - Padding: 3, - } -} - -// HandleLog implements log.Handler. -func (h *Handler) HandleLog(e *log.Entry) error { - level := Strings[e.Level] - names := e.Fields.Names() - - h.mu.Lock() - defer h.mu.Unlock() - - _, _ = fmt.Fprintf(h.Writer, "%s %-5s %-25s", e.Timestamp.Format("2006/01/02 15:04:05.000000"), level, e.Message) - - for _, name := range names { - if name == "source" { - continue - } - _, _ = fmt.Fprintf(h.Writer, " %s=%v", name, e.Fields.Get(name)) - } - - _, _ = fmt.Fprintln(h.Writer) - - return nil -} diff --git a/pkg/logfmt/logfmt.go b/pkg/logfmt/logfmt.go deleted file mode 100644 index e92ddbd2..00000000 --- a/pkg/logfmt/logfmt.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package logfmt implements a "logfmt" format handler. -package logfmt - -import ( - "io" - "sync" - - "github.com/apex/log" - "github.com/go-logfmt/logfmt" -) - -// Handler implementation. -type Handler struct { - mu sync.Mutex - enc *logfmt.Encoder -} - -// New handler. -func New(w io.Writer) *Handler { - return &Handler{ - enc: logfmt.NewEncoder(w), - } -} - -// HandleLog implements log.Handler. -func (h *Handler) HandleLog(e *log.Entry) error { - names := e.Fields.Names() - - h.mu.Lock() - defer h.mu.Unlock() - - _ = h.enc.EncodeKeyval("ts", e.Timestamp) - _ = h.enc.EncodeKeyval("lvl", e.Level.String()) - _ = h.enc.EncodeKeyval("msg", e.Message) - - for _, name := range names { - _ = h.enc.EncodeKeyval(name, e.Fields.Get(name)) - } - - _ = h.enc.EndRecord() - - return nil -} diff --git a/pkg/logfmt/logfmt_test.go b/pkg/logfmt/logfmt_test.go deleted file mode 100644 index 601ed5e2..00000000 --- a/pkg/logfmt/logfmt_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package logfmt_test - -import ( - "bytes" - "github.com/Altinity/clickhouse-backup/v2/pkg/logfmt" - "io" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/apex/log" -) - -func init() { - log.Now = func() time.Time { - return time.Unix(0, 0).UTC() - } -} - -func TestLogFmt(t *testing.T) { - var buf bytes.Buffer - - log.SetHandler(logfmt.New(&buf)) - log.WithField("user", "tj").WithField("id", "123").Info("hello") - log.Info("world") - log.Error("boom") - - expected := `ts=1970-01-01T00:00:00Z lvl=info msg=hello id=123 user=tj -ts=1970-01-01T00:00:00Z lvl=info msg=world -ts=1970-01-01T00:00:00Z lvl=error msg=boom -` - - assert.Equal(t, expected, buf.String()) -} - -func Benchmark(b *testing.B) { - log.SetHandler(logfmt.New(io.Discard)) - ctx := log.WithField("user", "tj").WithField("id", "123") - - for i := 0; i < b.N; i++ { - ctx.Info("hello") - } -} diff --git a/pkg/partition/partition.go b/pkg/partition/partition.go index 0357d150..eda15102 100644 --- a/pkg/partition/partition.go +++ b/pkg/partition/partition.go @@ -6,9 +6,9 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" "github.com/Altinity/clickhouse-backup/v2/pkg/common" "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" - apexLog "github.com/apex/log" "github.com/google/uuid" "github.com/pkg/errors" + "github.com/rs/zerolog/log" "path/filepath" "regexp" "sort" @@ -117,11 +117,11 @@ func GetPartitionIdAndName(ctx context.Context, ch *clickhouse.ClickHouse, datab } defer func() { if dropErr := dropPartitionIdTable(ch, database, partitionIdTable); dropErr != nil { - apexLog.Warnf("partition.GetPartitionId can't drop `%s`.`%s`: %v", database, partitionIdTable, dropErr) + log.Warn().Msgf("partition.GetPartitionId can't drop `%s`.`%s`: %v", database, partitionIdTable, dropErr) } }() if len(columns) == 0 { - apexLog.Warnf("is_in_partition_key=1 fields not found in system.columns for table `%s`.`%s`", database, partitionIdTable) + log.Warn().Msgf("is_in_partition_key=1 fields not found in system.columns for table `%s`.`%s`", database, partitionIdTable) return "", "", nil } partitionInsert := splitAndParsePartition(partition) @@ -216,7 +216,7 @@ func ConvertPartitionsToIdsMapAndNamesList(ctx context.Context, ch *clickhouse.C for _, t := range tablesFromClickHouse { createIdMapAndNameListIfNotExists(t.Database, t.Name, partitionsIdMap, partitionsNameList) if partitionId, partitionName, err := GetPartitionIdAndName(ctx, ch, t.Database, t.Name, t.CreateTableQuery, partitionTuple); err != nil { - apexLog.Fatalf("partition.GetPartitionIdAndName error: %v", err) + log.Fatal().Msgf("partition.GetPartitionIdAndName error: %v", err) } else if partitionId != "" { addItemToIdMapAndNameListIfNotExists(partitionId, partitionName, t.Database, t.Name, partitionsIdMap, partitionsNameList, tablePattern) } @@ -224,7 +224,7 @@ func ConvertPartitionsToIdsMapAndNamesList(ctx context.Context, ch *clickhouse.C for _, t := range tablesFromMetadata { createIdMapAndNameListIfNotExists(t.Database, t.Table, partitionsIdMap, partitionsNameList) if partitionId, partitionName, err := GetPartitionIdAndName(ctx, ch, t.Database, t.Table, t.Query, partitionTuple); err != nil { - apexLog.Fatalf("partition.GetPartitionIdAndName error: %v", err) + log.Fatal().Msgf("partition.GetPartitionIdAndName error: %v", err) } else if partitionId != "" { addItemToIdMapAndNameListIfNotExists(partitionId, partitionName, t.Database, t.Table, partitionsIdMap, partitionsNameList, tablePattern) } @@ -263,13 +263,13 @@ func addItemToIdMapAndNameListIfNotExists(partitionId, partitionName, database, }], partitionName) } } else if err != nil { - apexLog.Errorf("wrong --partitions table specific pattern matching: %v", err) + log.Error().Msgf("wrong --partitions table specific pattern matching: %v", err) } } func createIdMapAndNameListIfNotExists(database, table string, partitionsIdsMap map[metadata.TableTitle]common.EmptyMap, partitionsNameList map[metadata.TableTitle][]string) { if _, exists := partitionsIdsMap[metadata.TableTitle{Database: database, Table: table}]; !exists { - partitionsIdsMap[metadata.TableTitle{Database: database, Table: table}] = make(common.EmptyMap, 0) + partitionsIdsMap[metadata.TableTitle{Database: database, Table: table}] = make(common.EmptyMap) } if _, exists := partitionsNameList[metadata.TableTitle{Database: database, Table: table}]; !exists { partitionsNameList[metadata.TableTitle{Database: database, Table: table}] = make([]string, 0) diff --git a/pkg/resumable/state.go b/pkg/resumable/state.go index 04034cfb..47689992 100644 --- a/pkg/resumable/state.go +++ b/pkg/resumable/state.go @@ -3,19 +3,19 @@ package resumable import ( "encoding/json" "fmt" - apexLog "github.com/apex/log" "os" "path" "strconv" "strings" "sync" + + "github.com/rs/zerolog/log" ) type State struct { stateFile string currentState string params map[string]interface{} - log *apexLog.Entry fp *os.File mx *sync.RWMutex } @@ -25,11 +25,10 @@ func NewState(defaultDiskPath, backupName, command string, params map[string]int stateFile: path.Join(defaultDiskPath, "backup", backupName, fmt.Sprintf("%s.state", command)), currentState: "", mx: &sync.RWMutex{}, - log: apexLog.WithField("logger", "resumable"), } fp, err := os.OpenFile(s.stateFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644) if err != nil && !os.IsNotExist(err) { - s.log.Warnf("can't open %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't open %s error: %v", s.stateFile, err) } s.fp = fp s.LoadState() @@ -55,7 +54,7 @@ func (s *State) LoadParams() { //size 0 during write lines[0] = strings.TrimSuffix(lines[0], ":0") if err := json.Unmarshal([]byte(lines[0]), &s.params); err != nil { - apexLog.Errorf("can't parse state file line 0 as []interface{}: %s", lines[0]) + log.Error().Msgf("can't parse state file line 0 as []interface{}: %s", lines[0]) } } @@ -67,9 +66,9 @@ func (s *State) LoadState() { } else { s.currentState = "" if !os.IsNotExist(err) { - s.log.Warnf("can't read %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't read %s error: %v", s.stateFile, err) } else { - s.log.Warnf("%s empty, will continue from scratch error: %v", s.stateFile, err) + log.Warn().Msgf("%s empty, will continue from scratch error: %v", s.stateFile, err) } } s.mx.Unlock() @@ -81,11 +80,11 @@ func (s *State) AppendToState(path string, size int64) { if s.fp != nil { _, err := s.fp.WriteString(path + "\n") if err != nil { - s.log.Warnf("can't write %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't write %s error: %v", s.stateFile, err) } err = s.fp.Sync() if err != nil { - s.log.Warnf("can't sync %s error: %v", s.stateFile, err) + log.Warn().Msgf("can't sync %s error: %v", s.stateFile, err) } } s.currentState += path + "\n" @@ -102,12 +101,13 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) { s.mx.RLock() res := strings.Index(s.currentState, path+":") if res >= 0 { - s.log.Infof("%s already processed", path) + // s.logger is non thread-safe https://github.com/rs/zerolog/issues/242 + log.Info().Msgf("%s already processed", path) sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")] sSize = sSize[strings.Index(sSize, ":")+1:] size, err = strconv.ParseInt(sSize, 10, 64) if err != nil { - s.log.Warnf("invalid size %s in upload state: %v", sSize, err) + log.Warn().Msgf("invalid size %s in upload state: %v", sSize, err) } } s.mx.RUnlock() diff --git a/pkg/server/callback_test.go b/pkg/server/callback_test.go index b630dd4e..2e8fdcd1 100644 --- a/pkg/server/callback_test.go +++ b/pkg/server/callback_test.go @@ -28,7 +28,7 @@ func TestParseCallback(t *testing.T) { goodChan2 := make(chan *payload, 5) passToChanHandler := func(ch chan *payload) http.HandlerFunc { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { defer func() { if err := r.Body.Close(); err != nil { t.Fatalf("can't close r.Body: %v", err) @@ -43,7 +43,7 @@ func TestParseCallback(t *testing.T) { if _, err := w.Write(nil); err != nil { t.Fatalf("unexpected error while writing response from test server: %v", err) } - }) + } } returnErrHandler := http.HandlerFunc( func(w http.ResponseWriter, _ *http.Request) { diff --git a/pkg/server/metrics/metrics.go b/pkg/server/metrics/metrics.go index 5809c414..597886a6 100644 --- a/pkg/server/metrics/metrics.go +++ b/pkg/server/metrics/metrics.go @@ -2,9 +2,10 @@ package metrics import ( "fmt" - apexLog "github.com/apex/log" - "github.com/prometheus/client_golang/prometheus" "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog/log" ) type APIMetricsInterface interface { @@ -33,7 +34,6 @@ type APIMetrics struct { InProgressCommands prometheus.Gauge SubCommands map[string][]string - log *apexLog.Entry } func NewAPIMetrics() *APIMetrics { @@ -42,7 +42,6 @@ func NewAPIMetrics() *APIMetrics { "create_remote": {"create", "upload"}, "restore_remote": {"download", "restore"}, }, - log: apexLog.WithField("logger", "metrics"), } return metrics } @@ -183,7 +182,7 @@ func (m *APIMetrics) Start(command string, startTime time.Time) { } } } else { - m.log.Warnf("%s not found in LastStart metrics", command) + log.Warn().Msgf("%s not found in LastStart metrics", command) } } @@ -201,19 +200,19 @@ func (m *APIMetrics) Finish(command string, startTime time.Time) { } } } else { - m.log.Warnf("%s not found in LastFinish", command) + log.Warn().Msgf("%s not found in LastFinish", command) } } func (m *APIMetrics) Success(command string) { if _, exists := m.SuccessfulCounter[command]; exists { m.SuccessfulCounter[command].Inc() } else { - m.log.Warnf("%s not found in SuccessfulCounter metrics", command) + log.Warn().Msgf("%s not found in SuccessfulCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(1) } else { - m.log.Warnf("%s not found in LastStatus metrics", command) + log.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -221,12 +220,12 @@ func (m *APIMetrics) Failure(command string) { if _, exists := m.FailedCounter[command]; exists { m.FailedCounter[command].Inc() } else { - m.log.Warnf("%s not found in FailedCounter metrics", command) + log.Warn().Msgf("%s not found in FailedCounter metrics", command) } if _, exists := m.LastStatus[command]; exists { m.LastStatus[command].Set(0) } else { - m.log.Warnf("%s not found in LastStatus metrics", command) + log.Warn().Msgf("%s not found in LastStatus metrics", command) } } @@ -236,7 +235,7 @@ func (m *APIMetrics) ExecuteWithMetrics(command string, errCounter int, f func() err := f() m.Finish(command, startTime) if err != nil { - m.log.Errorf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) + log.Error().Msgf("metrics.ExecuteWithMetrics(%s) return error: %v", command, err) errCounter += 1 m.Failure(command) } else { diff --git a/pkg/server/server.go b/pkg/server/server.go index 6f332a41..11cd3257 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -22,10 +22,10 @@ import ( "syscall" "time" - apexLog "github.com/apex/log" "github.com/google/shlex" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/zerolog/log" "github.com/urfave/cli" "github.com/Altinity/clickhouse-backup/v2/pkg/backup" @@ -47,7 +47,6 @@ type APIServer struct { restart chan struct{} stop chan struct{} metrics *metrics.APIMetrics - log *apexLog.Entry routes []string clickhouseBackupVersion string } @@ -58,25 +57,23 @@ var ( // Run - expose CLI commands as REST API func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBackupVersion string) error { - log := apexLog.WithField("logger", "server.Run") var ( cfg *config.Config err error ) - log.Debug("Wait for ClickHouse") + log.Debug().Msg("Wait for ClickHouse") for { cfg, err = config.LoadConfig(configPath) if err != nil { - log.Error(err.Error()) + log.Error().Stack().Err(err).Send() time.Sleep(5 * time.Second) continue } ch := clickhouse.ClickHouse{ Config: &cfg.ClickHouse, - Log: apexLog.WithField("logger", "clickhouse"), } if err := ch.Connect(); err != nil { - log.Error(err.Error()) + log.Error().Stack().Err(err).Send() time.Sleep(5 * time.Second) continue } @@ -91,17 +88,16 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack restart: make(chan struct{}), clickhouseBackupVersion: clickhouseBackupVersion, metrics: metrics.NewAPIMetrics(), - log: apexLog.WithField("logger", "server"), stop: make(chan struct{}), } if cfg.API.CreateIntegrationTables { if err := api.CreateIntegrationTables(); err != nil { - log.Error(err.Error()) + log.Error().Err(err).Send() } } api.metrics.RegisterMetrics() - log.Infof("Starting API server %s on %s", api.cliApp.Version, api.config.API.ListenAddr) + log.Info().Msgf("Starting API server %s on %s", api.cliApp.Version, api.config.API.ListenAddr) sigterm := make(chan os.Signal, 1) signal.Notify(sigterm, os.Interrupt, syscall.SIGTERM) sighup := make(chan os.Signal, 1) @@ -112,14 +108,14 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack if api.config.API.CompleteResumableAfterRestart { go func() { if err := api.ResumeOperationsAfterRestart(); err != nil { - log.Errorf("ResumeOperationsAfterRestart return error: %v", err) + log.Error().Msgf("ResumeOperationsAfterRestart return error: %v", err) } }() } go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), false); metricsErr != nil { - log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() @@ -131,21 +127,21 @@ func Run(cliCtx *cli.Context, cliApp *cli.App, configPath string, clickhouseBack select { case <-api.restart: if err := api.Restart(); err != nil { - log.Errorf("Failed to restarting API server: %v", err) + log.Error().Msgf("Failed to restarting API server: %v", err) continue } - log.Infof("Reloaded by HTTP") + log.Info().Msgf("Reloaded by HTTP") case <-sighup: if err := api.Restart(); err != nil { - log.Errorf("Failed to restarting API server: %v", err) + log.Error().Msgf("Failed to restarting API server: %v", err) continue } - log.Info("Reloaded by SIGHUP") + log.Info().Msg("Reloaded by SIGHUP") case <-sigterm: - log.Info("Stopping API server") + log.Info().Msg("Stopping API server") return api.Stop() case <-api.stop: - log.Info("Stopping API server. Stopped from the inside of the application") + log.Info().Msg("Stopping API server. Stopped from the inside of the application") return api.Stop() } } @@ -156,7 +152,7 @@ func (api *APIServer) GetMetrics() *metrics.APIMetrics { } func (api *APIServer) RunWatch(cliCtx *cli.Context) { - api.log.Info("Starting API Server in watch mode") + log.Info().Msg("Starting API Server in watch mode") b := backup.NewBackuper(api.config) commandId, _ := status.Current.Start("watch") err := b.Watch( @@ -174,7 +170,6 @@ func (api *APIServer) Stop() error { } func (api *APIServer) Restart() error { - log := apexLog.WithField("logger", "server.Restart").WithField("version", api.cliApp.Version) _, err := api.ReloadConfig(nil, "restart") if err != nil { return err @@ -190,9 +185,9 @@ func (api *APIServer) Restart() error { err = api.server.ListenAndServeTLS(api.config.API.CertificateFile, api.config.API.PrivateKeyFile) if err != nil { if errors.Is(err, http.ErrServerClosed) { - log.Warnf("ListenAndServeTLS get signal: %s", err.Error()) + log.Warn().Msgf("ListenAndServeTLS get signal: %s", err.Error()) } else { - log.Fatalf("ListenAndServeTLS error: %s", err.Error()) + log.Fatal().Stack().Msgf("ListenAndServeTLS error: %s", err.Error()) } } }() @@ -201,9 +196,9 @@ func (api *APIServer) Restart() error { go func() { if err = api.server.ListenAndServe(); err != nil { if errors.Is(err, http.ErrServerClosed) { - log.Warnf("ListenAndServe get signal: %s", err.Error()) + log.Warn().Msgf("ListenAndServe get signal: %s", err.Error()) } else { - log.Fatalf("ListenAndServe error: %s", err.Error()) + log.Fatal().Stack().Msgf("ListenAndServe error: %s", err.Error()) } } }() @@ -213,7 +208,6 @@ func (api *APIServer) Restart() error { // registerHTTPHandlers - resister API routes func (api *APIServer) registerHTTPHandlers() *http.Server { - log := apexLog.WithField("logger", "registerHTTPHandlers") r := mux.NewRouter() r.Use(api.basicAuthMiddleware) r.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -254,7 +248,7 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { routes = append(routes, t) return nil }); err != nil { - log.Errorf("mux.Router.Walk return error: %v", err) + log.Error().Msgf("mux.Router.Walk return error: %v", err) return nil } @@ -267,7 +261,7 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { if api.config.API.CACertFile != "" { caCert, err := os.ReadFile(api.config.API.CACertFile) if err != nil { - api.log.Fatalf("api initialization error %s: %v", api.config.API.CAKeyFile, err) + log.Fatal().Stack().Msgf("api initialization error %s: %v", api.config.API.CAKeyFile, err) } caCertPool := x509.NewCertPool() caCertPool.AppendCertsFromPEM(caCert) @@ -283,9 +277,9 @@ func (api *APIServer) registerHTTPHandlers() *http.Server { func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path != "/metrics" { - api.log.Infof("API call %s %s", r.Method, r.URL.Path) + log.Info().Msgf("API call %s %s", r.Method, r.URL.Path) } else { - api.log.Debugf("API call %s %s", r.Method, r.URL.Path) + log.Debug().Msgf("API call %s %s", r.Method, r.URL.Path) } user, pass, _ := r.BasicAuth() query := r.URL.Query() @@ -296,11 +290,11 @@ func (api *APIServer) basicAuthMiddleware(next http.Handler) http.Handler { pass = p[0] } if (user != api.config.API.Username) || (pass != api.config.API.Password) { - api.log.Warnf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) + log.Warn().Msgf("%s %s Authorization failed %s:%s", r.Method, r.URL, user, pass) w.Header().Set("WWW-Authenticate", "Basic realm=\"Provide username and password\"") w.WriteHeader(http.StatusUnauthorized) if _, err := w.Write([]byte("401 Unauthorized\n")); err != nil { - api.log.Errorf("RequestWriter.Write return error: %v", err) + log.Error().Msgf("RequestWriter.Write return error: %v", err) } return } @@ -337,7 +331,7 @@ func (api *APIServer) actions(w http.ResponseWriter, r *http.Request) { api.writeError(w, http.StatusBadRequest, string(line), err) return } - api.log.WithField("version", api.cliApp.Version).Infof("/backup/actions call: %s", row.Command) + log.Info().Str("version", api.cliApp.Version).Msgf("/backup/actions call: %s", row.Command) args, err := shlex.Split(row.Command) if err != nil { api.writeError(w, http.StatusBadRequest, "", err) @@ -400,16 +394,16 @@ func (api *APIServer) actionsDeleteHandler(row status.ActionRow, args []string, if err != nil { return actionsResults, err } - api.log.Info("DELETED") go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), args[1] == "local"); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() actionsResults = append(actionsResults, actionsResultsRow{ Status: "success", Operation: row.Command, }) + log.Info().Msg("DELETED") return actionsResults, nil } @@ -425,12 +419,12 @@ func (api *APIServer) actionsAsyncCommandsHandler(command string, args []string, }) status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("API /backup/actions error: %v", err) + log.Error().Msgf("API /backup/actions error: %v", err) return } go func() { if err := api.UpdateBackupMetrics(context.Background(), command == "create" || strings.HasPrefix(command, "restore") || command == "download"); err != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", err) + log.Error().Msgf("UpdateBackupMetrics return error: %v", err) } }() }() @@ -461,7 +455,7 @@ func (api *APIServer) actionsKillHandler(row status.ActionRow, args []string, ac func (api *APIServer) actionsCleanHandler(w http.ResponseWriter, row status.ActionRow, command string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn(ErrAPILocked.Error()) + log.Warn().Msgf(ErrAPILocked.Error()) return actionsResults, ErrAPILocked } commandId, ctx := status.Current.Start(command) @@ -473,14 +467,14 @@ func (api *APIServer) actionsCleanHandler(w http.ResponseWriter, row status.Acti b := backup.NewBackuper(cfg) err = b.Clean(ctx) if err != nil { - api.log.Errorf("actions Clean error: %v", err) + log.Error().Msgf("actions Clean error: %v", err) status.Current.Stop(commandId, err) return actionsResults, err } - api.log.Info("CLEANED") + log.Info().Msg("CLEANED") go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), true); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() status.Current.Stop(commandId, nil) @@ -493,7 +487,7 @@ func (api *APIServer) actionsCleanHandler(w http.ResponseWriter, row status.Acti func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row status.ActionRow, command string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Warn(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } commandId, _ := status.Current.Start(command) @@ -505,14 +499,14 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Errorf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) status.Current.Stop(commandId, err) return actionsResults, err } - api.log.Info("CLEANED") + log.Info().Msg("CLEANED") go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), false); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() status.Current.Stop(commandId, nil) @@ -525,7 +519,7 @@ func (api *APIServer) actionsCleanRemoteBrokenHandler(w http.ResponseWriter, row func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.ActionRow, args []string, actionsResults []actionsResultsRow) ([]actionsResultsRow, error) { if (!api.config.API.AllowParallel && status.Current.InProgress()) || status.Current.CheckCommandInProgress(row.Command) { - api.log.Info(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() return actionsResults, ErrAPILocked } cfg, err := api.ReloadConfig(w, "watch") @@ -614,14 +608,14 @@ func (api *APIServer) actionsWatchHandler(w http.ResponseWriter, row status.Acti func (api *APIServer) handleWatchResponse(watchCommandId int, err error) { status.Current.Stop(watchCommandId, err) if err != nil { - api.log.Errorf("Watch error: %v", err) + log.Error().Msgf("Watch error: %v", err) } if api.config.API.WatchIsMainProcess { // Do not stop server if 'watch' was canceled by the user command if errors.Is(err, context.Canceled) { return } - api.log.Info("Stopping server since watch command is stopped") + log.Info().Msg("Stopping server since watch command is stopped") api.stop <- struct{}{} } } @@ -637,7 +631,7 @@ func (api *APIServer) actionsLog(w http.ResponseWriter, r *http.Request) { if q.Get("last") != "" { last, err = strconv.ParseInt(q.Get("last"), 10, 16) if err != nil { - api.log.Warn(err.Error()) + log.Warn().Err(err).Send() api.writeError(w, http.StatusInternalServerError, "actions", err) return } @@ -862,7 +856,7 @@ func (api *APIServer) httpListHandler(w http.ResponseWriter, r *http.Request) { // httpCreateHandler - create a backup func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "create", ErrAPILocked) return } @@ -922,7 +916,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "create", err) return } @@ -934,14 +928,14 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) return b.CreateBackup(backupName, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, createRBAC, false, createConfigs, false, checkPartsColumns, api.clickhouseBackupVersion, commandId) }) if err != nil { - api.log.Errorf("API /backup/create error: %v", err) + log.Error().Msgf("API /backup/create error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(context.Background(), err, callback) return } go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), true); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() @@ -962,7 +956,7 @@ func (api *APIServer) httpCreateHandler(w http.ResponseWriter, r *http.Request) // httpWatchHandler - run watch command go routine, can't run the same watch command twice func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -1027,7 +1021,7 @@ func (api *APIServer) httpWatchHandler(w http.ResponseWriter, r *http.Request) { } if status.Current.CheckCommandInProgress(fullCommand) { - api.log.Warnf("%s error: %v", fullCommand, ErrAPILocked) + log.Warn().Msgf("%s error: %v", fullCommand, ErrAPILocked) api.writeError(w, http.StatusLocked, "watch", ErrAPILocked) return } @@ -1058,7 +1052,7 @@ func (api *APIServer) httpCleanHandler(w http.ResponseWriter, _ *http.Request) { err = b.Clean(ctx) defer status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("Clean error: %v", err) + log.Error().Msgf("Clean error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean", err) return } @@ -1083,13 +1077,13 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt b := backup.NewBackuper(cfg) err = b.CleanRemoteBroken(commandId) if err != nil { - api.log.Errorf("Clean remote broken error: %v", err) + log.Error().Msgf("Clean remote broken error: %v", err) api.writeError(w, http.StatusInternalServerError, "clean_remote_broken", err) return } go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), false); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() @@ -1105,7 +1099,7 @@ func (api *APIServer) httpCleanRemoteBrokenHandler(w http.ResponseWriter, _ *htt // httpUploadHandler - upload a backup to remote storage func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "upload", ErrAPILocked) return } @@ -1159,7 +1153,7 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "upload", err) return } @@ -1171,14 +1165,14 @@ func (api *APIServer) httpUploadHandler(w http.ResponseWriter, r *http.Request) return b.Upload(name, deleteSource, diffFrom, diffFromRemote, tablePattern, partitionsToBackup, schemaOnly, resume, api.cliApp.Version, commandId) }) if err != nil { - api.log.Errorf("Upload error: %v", err) + log.Error().Msgf("Upload error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(context.Background(), err, callback) return } go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), false); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() status.Current.Stop(commandId, nil) @@ -1205,7 +1199,7 @@ var tableMappingRE = regexp.MustCompile(`[\w+]:[\w+]`) // httpRestoreHandler - restore a backup from local storage func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "restore", ErrAPILocked) return } @@ -1302,7 +1296,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "restore", err) return } @@ -1315,7 +1309,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) }) status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("API /backup/restore error: %v", err) + log.Error().Msgf("API /backup/restore error: %v", err) api.errorCallback(context.Background(), err, callback) return } @@ -1335,7 +1329,7 @@ func (api *APIServer) httpRestoreHandler(w http.ResponseWriter, r *http.Request) // httpDownloadHandler - download a backup from remote to local storage func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "download", ErrAPILocked) return } @@ -1373,7 +1367,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request callback, err := parseCallback(query) if err != nil { - api.log.Error(err.Error()) + log.Error().Err(err).Send() api.writeError(w, http.StatusBadRequest, "download", err) return } @@ -1385,14 +1379,14 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request return b.Download(name, tablePattern, partitionsToBackup, schemaOnly, resume, api.cliApp.Version, commandId) }) if err != nil { - api.log.Errorf("API /backup/download error: %v", err) + log.Error().Msgf("API /backup/download error: %v", err) status.Current.Stop(commandId, err) api.errorCallback(context.Background(), err, callback) return } go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), true); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() status.Current.Stop(commandId, nil) @@ -1412,7 +1406,7 @@ func (api *APIServer) httpDownloadHandler(w http.ResponseWriter, r *http.Request // httpDeleteHandler - delete a backup from local or remote storage func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) { if !api.config.API.AllowParallel && status.Current.InProgress() { - api.log.Info(ErrAPILocked.Error()) + log.Warn().Err(ErrAPILocked).Send() api.writeError(w, http.StatusLocked, "delete", ErrAPILocked) return } @@ -1434,13 +1428,13 @@ func (api *APIServer) httpDeleteHandler(w http.ResponseWriter, r *http.Request) } status.Current.Stop(commandId, err) if err != nil { - api.log.Errorf("delete backup error: %v", err) + log.Error().Msgf("delete backup error: %v", err) api.writeError(w, http.StatusInternalServerError, "delete", err) return } go func() { if metricsErr := api.UpdateBackupMetrics(context.Background(), vars["where"] == "local"); metricsErr != nil { - api.log.Errorf("UpdateBackupMetrics return error: %v", metricsErr) + log.Error().Msgf("UpdateBackupMetrics return error: %v", metricsErr) } }() api.sendJSONEachRow(w, http.StatusOK, struct { @@ -1472,7 +1466,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e numberBackupsRemote := 0 numberBackupsRemoteBroken := 0 - api.log.Infof("Update backup metrics start (onlyLocal=%v)", onlyLocal) + log.Info().Msgf("Update backup metrics start (onlyLocal=%v)", onlyLocal) if !api.config.API.EnableMetrics { return nil } @@ -1534,7 +1528,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e api.metrics.LastFinish["create_remote"].Set(float64(lastBackupUpload.Unix())) } } - api.log.WithFields(apexLog.Fields{ + log.Info().Fields(map[string]interface{}{ "duration": utils.HumanizeDuration(time.Since(startTime)), "LastBackupCreateLocal": lastBackupCreateLocal, "LastBackupCreateRemote": lastBackupCreateRemote, @@ -1543,7 +1537,7 @@ func (api *APIServer) UpdateBackupMetrics(ctx context.Context, onlyLocal bool) e "LastBackupSizeLocal": lastSizeLocal, "NumberBackupsLocal": numberBackupsLocal, "NumberBackupsRemote": numberBackupsRemote, - }).Info("Update backup metrics finish") + }).Msg("Update backup metrics finish") return nil } @@ -1573,10 +1567,9 @@ func (api *APIServer) registerMetricsHandlers(r *mux.Router, enableMetrics bool, } func (api *APIServer) CreateIntegrationTables() error { - api.log.Infof("Create integration tables") + log.Info().Msgf("Create integration tables") ch := &clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Log: api.log.WithField("logger", "clickhouse"), } if err := ch.Connect(); err != nil { return fmt.Errorf("can't connect to clickhouse: %w", err) @@ -1635,14 +1628,13 @@ func (api *APIServer) CreateIntegrationTables() error { func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*config.Config, error) { cfg, err := config.LoadConfig(api.configPath) if err != nil { - api.log.Errorf("config.LoadConfig(%s) return error: %v", api.configPath, err) + log.Error().Msgf("config.LoadConfig(%s) return error: %v", api.configPath, err) if w != nil { api.writeError(w, http.StatusInternalServerError, command, err) } return nil, err } api.config = cfg - api.log = apexLog.WithField("logger", "server") api.metrics.NumberBackupsRemoteExpected.Set(float64(cfg.General.BackupsToKeepRemote)) api.metrics.NumberBackupsLocalExpected.Set(float64(cfg.General.BackupsToKeepLocal)) return cfg, nil @@ -1651,14 +1643,13 @@ func (api *APIServer) ReloadConfig(w http.ResponseWriter, command string) (*conf func (api *APIServer) ResumeOperationsAfterRestart() error { ch := clickhouse.ClickHouse{ Config: &api.config.ClickHouse, - Log: apexLog.WithField("logger", "clickhouse"), } if err := ch.Connect(); err != nil { return err } defer func() { if err := ch.GetConn().Close(); err != nil { - api.log.Errorf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) + log.Error().Msgf("ResumeOperationsAfterRestart can't close clickhouse connection: %v", err) } }() disks, err := ch.GetDisks(context.Background(), true) @@ -1717,7 +1708,7 @@ func (api *APIServer) ResumeOperationsAfterRestart() error { } args = append(args, "--resumable=1", backupName) fullCommand := strings.Join(args, " ") - api.log.WithField("operation", "ResumeOperationsAfterRestart").Info(fullCommand) + log.Info().Str("operation", "ResumeOperationsAfterRestart").Send() commandId, _ := status.Current.Start(fullCommand) err, _ = api.metrics.ExecuteWithMetrics(command, 0, func() error { return api.cliApp.Run(append([]string{"clickhouse-backup", "-c", api.configPath, "--command-id", strconv.FormatInt(int64(commandId), 10)}, args...)) @@ -1731,7 +1722,7 @@ func (api *APIServer) ResumeOperationsAfterRestart() error { if api.config.General.BackupsToKeepLocal >= 0 { return err } - api.log.WithField("operation", "ResumeOperationsAfterRestart").Warnf("remove %s return error: ", err) + log.Warn().Str("operation", "ResumeOperationsAfterRestart").Msgf("remove %s return error: ", err) } default: return fmt.Errorf("unkown command for state file %s", stateFile) diff --git a/pkg/server/utils.go b/pkg/server/utils.go index d32fe1d3..caa1d82b 100644 --- a/pkg/server/utils.go +++ b/pkg/server/utils.go @@ -4,18 +4,19 @@ import ( "context" "encoding/json" "fmt" + "github.com/rs/zerolog/log" "net/http" "reflect" ) func (api *APIServer) flushOutput(w http.ResponseWriter, out string) { if _, err := fmt.Fprintln(w, out); err != nil { - api.log.Warnf("can't write to http.ResponseWriter: %v", err) + log.Warn().Msgf("can't write to http.ResponseWriter: %v", err) } } func (api *APIServer) writeError(w http.ResponseWriter, statusCode int, operation string, err error) { - api.log.Errorf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) + log.Error().Msgf("api.writeError status=%d operation=%s err=%v", statusCode, operation, err) w.WriteHeader(statusCode) w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Header().Set("Cache-Control", "no-store, no-cache, must-revalidate") @@ -45,7 +46,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warnf("sendJSONEachRow json.Marshal error: %v", err) + log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } default: @@ -53,7 +54,7 @@ func (api *APIServer) sendJSONEachRow(w http.ResponseWriter, statusCode int, v i api.flushOutput(w, string(out)) } else { api.flushOutput(w, err.Error()) - api.log.Warnf("sendJSONEachRow json.Marshal error: %v", err) + log.Warn().Msgf("sendJSONEachRow json.Marshal error: %v", err) } } } @@ -71,7 +72,7 @@ func (api *APIServer) errorCallback(ctx context.Context, err error, callback cal Error: err.Error(), } for _, e := range callback(ctx, payload) { - api.log.Error(e.Error()) + log.Error().Err(e).Send() } } @@ -82,6 +83,6 @@ func (api *APIServer) successCallback(ctx context.Context, callback callbackFn) Error: "", } for _, e := range callback(ctx, payload) { - api.log.Error(e.Error()) + log.Error().Err(e).Send() } } diff --git a/pkg/status/status.go b/pkg/status/status.go index 6a8b0c6e..d7b3f186 100644 --- a/pkg/status/status.go +++ b/pkg/status/status.go @@ -3,11 +3,12 @@ package status import ( "context" "fmt" - "github.com/Altinity/clickhouse-backup/v2/pkg/common" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" "strings" "sync" "time" + + "github.com/Altinity/clickhouse-backup/v2/pkg/common" ) const ( @@ -17,15 +18,12 @@ const ( ErrorStatus = "error" ) -var Current = &AsyncStatus{ - log: apexLog.WithField("logger", "status"), -} +var Current = &AsyncStatus{} const NotFromAPI = int(-1) type AsyncStatus struct { commands []ActionRow - log *apexLog.Entry sync.RWMutex } @@ -57,7 +55,7 @@ func (status *AsyncStatus) Start(command string) (int, context.Context) { Cancel: cancel, }) lastCommandId := len(status.commands) - 1 - status.log.Debugf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) + log.Debug().Msgf("api.status.Start -> status.commands[%d] == %+v", lastCommandId, status.commands[lastCommandId]) return lastCommandId, ctx } @@ -78,12 +76,12 @@ func (status *AsyncStatus) InProgress() bool { defer status.RUnlock() for n := range status.commands { if status.commands[n].Status == InProgressStatus { - status.log.Debugf("api.status.inProgress -> status.commands[%d].Status == %s, inProgress=%v", n, status.commands[n].Status, status.commands[n].Status == InProgressStatus) + log.Debug().Msgf("api.status.inProgress -> status.commands[%d].Status == %s, inProgress=%v", n, status.commands[n].Status, status.commands[n].Status == InProgressStatus) return true } } - status.log.Debugf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) + log.Debug().Msgf("api.status.inProgress -> len(status.commands)=%d, inProgress=false", len(status.commands)) return false } @@ -119,7 +117,7 @@ func (status *AsyncStatus) Stop(commandId int, err error) { status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) status.commands[commandId].Ctx = nil status.commands[commandId].Cancel = nil - status.log.Debugf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.stop -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } func (status *AsyncStatus) Cancel(command string, err error) error { @@ -127,7 +125,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { defer status.Unlock() if len(status.commands) == 0 { err = fmt.Errorf("empty command list") - status.log.Warnf(err.Error()) + log.Warn().Err(err).Send() return err } commandId := -1 @@ -148,11 +146,11 @@ func (status *AsyncStatus) Cancel(command string, err error) error { } if commandId == -1 { err = fmt.Errorf("command `%s` not found", command) - status.log.Warnf(err.Error()) + log.Warn().Err(err).Send() return err } if status.commands[commandId].Status != InProgressStatus { - status.log.Warnf("found `%s` with status=%s", command, status.commands[commandId].Status) + log.Warn().Msgf("found `%s` with status=%s", command, status.commands[commandId].Status) } if status.commands[commandId].Ctx != nil { status.commands[commandId].Cancel() @@ -162,7 +160,7 @@ func (status *AsyncStatus) Cancel(command string, err error) error { status.commands[commandId].Error = err.Error() status.commands[commandId].Status = CancelStatus status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.log.Debugf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) return nil } @@ -178,7 +176,7 @@ func (status *AsyncStatus) CancelAll(cancelMsg string) { status.commands[commandId].Status = CancelStatus status.commands[commandId].Error = cancelMsg status.commands[commandId].Finish = time.Now().Format(common.TimeFormat) - status.log.Debugf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) + log.Debug().Msgf("api.status.cancel -> status.commands[%d] == %+v", commandId, status.commands[commandId]) } } diff --git a/pkg/storage/azblob.go b/pkg/storage/azblob.go index 300bfcc1..e15010ac 100644 --- a/pkg/storage/azblob.go +++ b/pkg/storage/azblob.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "fmt" "github.com/Altinity/clickhouse-backup/v2/pkg/config" - apexLog "github.com/apex/log" "io" "net/url" "path" @@ -20,6 +19,7 @@ import ( "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" "github.com/pkg/errors" + "github.com/rs/zerolog/log" ) // AzureBlob - presents methods for manipulate data on Azure @@ -28,14 +28,13 @@ type AzureBlob struct { Pipeline pipeline.Pipeline CPK azblob.ClientProvidedKeyOptions Config *config.AzureBlobConfig - Log *apexLog.Entry } func (a *AzureBlob) logf(msg string, args ...interface{}) { if a.Config.Debug { - a.Log.Infof(msg, args...) + log.Info().Msgf(msg, args...) } else { - a.Log.Debugf(msg, args...) + log.Debug().Msgf(msg, args...) } } func (a *AzureBlob) Kind() string { diff --git a/pkg/storage/ftp.go b/pkg/storage/ftp.go index c4643cb9..c3b4adcf 100644 --- a/pkg/storage/ftp.go +++ b/pkg/storage/ftp.go @@ -4,8 +4,6 @@ import ( "context" "crypto/tls" "fmt" - "github.com/Altinity/clickhouse-backup/v2/pkg/config" - apexLog "github.com/apex/log" "io" "os" "path" @@ -13,14 +11,15 @@ import ( "sync" "time" + "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/jlaffaye/ftp" "github.com/jolestar/go-commons-pool/v2" + "github.com/rs/zerolog/log" ) type FTP struct { clients *pool.ObjectPool Config *config.FTPConfig - Log *apexLog.Entry dirCache map[string]bool dirCacheMutex sync.RWMutex } @@ -64,21 +63,21 @@ func (f *FTP) Close(ctx context.Context) error { // getConnectionFromPool *ftp.ServerConn is not thread-safe, so we need implements connection pool func (f *FTP) getConnectionFromPool(ctx context.Context, where string) (*ftp.ServerConn, error) { - f.Log.Debugf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + log.Debug().Msgf("getConnectionFromPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) client, err := f.clients.BorrowObject(ctx) if err != nil { - f.Log.Errorf("can't BorrowObject from FTP Connection Pool: %v", err) + log.Error().Msgf("can't BorrowObject from FTP Connection Pool: %v", err) return nil, err } return client.(*ftp.ServerConn), nil } func (f *FTP) returnConnectionToPool(ctx context.Context, where string, client *ftp.ServerConn) { - f.Log.Debugf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) + log.Debug().Msgf("returnConnectionToPool(%s) active=%d idle=%d", where, f.clients.GetNumActive(), f.clients.GetNumIdle()) if client != nil { err := f.clients.ReturnObject(ctx, client) if err != nil { - f.Log.Errorf("can't ReturnObject to FTP Connection Pool: %v", err) + log.Error().Msgf("can't ReturnObject to FTP Connection Pool: %v", err) } } } @@ -182,7 +181,7 @@ func (f *FTP) GetFileReader(ctx context.Context, key string) (io.ReadCloser, err return f.GetFileReaderAbsolute(ctx, path.Join(f.Config.Path, key)) } func (f *FTP) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { - f.Log.Debugf("GetFileReaderAbsolute key=%s", key) + log.Debug().Msgf("GetFileReaderAbsolute key=%s", key) client, err := f.getConnectionFromPool(ctx, "GetFileReader") if err != nil { return nil, err @@ -205,7 +204,7 @@ func (f *FTP) PutFile(ctx context.Context, key string, r io.ReadCloser) error { } func (f *FTP) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error { - f.Log.Debugf("PutFileAbsolute key=%s", key) + log.Debug().Msgf("PutFileAbsolute key=%s", key) client, err := f.getConnectionFromPool(ctx, "PutFile") defer f.returnConnectionToPool(ctx, "PutFile", client) if err != nil { @@ -257,7 +256,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.RLock() if _, exists := f.dirCache[d]; exists { f.dirCacheMutex.RUnlock() - f.Log.Debugf("MkdirAll %s exists in dirCache", d) + log.Debug().Msgf("MkdirAll %s exists in dirCache", d) continue } f.dirCacheMutex.RUnlock() @@ -265,7 +264,7 @@ func (f *FTP) MkdirAll(key string, client *ftp.ServerConn) error { f.dirCacheMutex.Lock() err = client.MakeDir(d) if err != nil { - f.Log.Warnf("MkdirAll MakeDir(%s) return error: %v", d, err) + log.Warn().Msgf("MkdirAll MakeDir(%s) return error: %v", d, err) } else { f.dirCache[d] = true } diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 28af58f4..6553daec 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -20,7 +20,7 @@ import ( "google.golang.org/api/option/internaloption" "cloud.google.com/go/storage" - apexLog "github.com/apex/log" + "github.com/rs/zerolog/log" "google.golang.org/api/option" googleHTTPTransport "google.golang.org/api/transport/http" ) @@ -47,11 +47,11 @@ func (w debugGCSTransport) RoundTrip(r *http.Request) (*http.Response, error) { logMsg += fmt.Sprintf("%v: %v\n", h, v) } } - apexLog.Info(logMsg) + log.Info().Msg(logMsg) resp, err := w.base.RoundTrip(r) if err != nil { - apexLog.Errorf("GCS_ERROR: %v", err) + log.Error().Msgf("GCS_ERROR: %v", err) return resp, err } logMsg = fmt.Sprintf("<<< [GCS_RESPONSE] <<< %v %v\n", r.Method, r.URL.String()) @@ -60,7 +60,7 @@ func (w debugGCSTransport) RoundTrip(r *http.Request) (*http.Response, error) { logMsg += fmt.Sprintf("%v: %v\n", h, v) } } - apexLog.Info(logMsg) + log.Info().Msg(logMsg) return resp, err } @@ -232,7 +232,7 @@ func (gcs *GCS) GetFileReader(ctx context.Context, key string) (io.ReadCloser, e func (gcs *GCS) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadCloser, error) { pClientObj, err := gcs.clientPool.BorrowObject(ctx) if err != nil { - apexLog.Errorf("gcs.GetFileReader: gcs.clientPool.BorrowObject error: %+v", err) + log.Error().Msgf("gcs.GetFileReader: gcs.clientPool.BorrowObject error: %+v", err) return nil, err } pClient := pClientObj.(*clientObject).Client @@ -240,12 +240,12 @@ func (gcs *GCS) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadC reader, err := obj.NewReader(ctx) if err != nil { if pErr := gcs.clientPool.InvalidateObject(ctx, pClientObj); pErr != nil { - apexLog.Warnf("gcs.GetFileReader: gcs.clientPool.InvalidateObject error: %v ", pErr) + log.Warn().Msgf("gcs.GetFileReader: gcs.clientPool.InvalidateObject error: %v ", pErr) } return nil, err } if pErr := gcs.clientPool.ReturnObject(ctx, pClientObj); pErr != nil { - apexLog.Warnf("gcs.GetFileReader: gcs.clientPool.ReturnObject error: %v ", pErr) + log.Warn().Msgf("gcs.GetFileReader: gcs.clientPool.ReturnObject error: %v ", pErr) } return reader, nil } @@ -261,7 +261,7 @@ func (gcs *GCS) PutFile(ctx context.Context, key string, r io.ReadCloser) error func (gcs *GCS) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser) error { pClientObj, err := gcs.clientPool.BorrowObject(ctx) if err != nil { - apexLog.Errorf("gcs.PutFile: gcs.clientPool.BorrowObject error: %+v", err) + log.Error().Msgf("gcs.PutFile: gcs.clientPool.BorrowObject error: %+v", err) return err } pClient := pClientObj.(*clientObject).Client @@ -275,17 +275,17 @@ func (gcs *GCS) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser } defer func() { if err := gcs.clientPool.ReturnObject(ctx, pClientObj); err != nil { - apexLog.Warnf("gcs.PutFile: gcs.clientPool.ReturnObject error: %+v", err) + log.Warn().Msgf("gcs.PutFile: gcs.clientPool.ReturnObject error: %+v", err) } }() buffer := make([]byte, 128*1024) _, err = io.CopyBuffer(writer, r, buffer) if err != nil { - apexLog.Warnf("gcs.PutFile: can't copy buffer: %+v", err) + log.Warn().Msgf("gcs.PutFile: can't copy buffer: %+v", err) return err } if err = writer.Close(); err != nil { - apexLog.Warnf("gcs.PutFile: can't close writer: %+v", err) + log.Warn().Msgf("gcs.PutFile: can't close writer: %+v", err) return err } return nil @@ -309,7 +309,7 @@ func (gcs *GCS) StatFile(ctx context.Context, key string) (RemoteFile, error) { func (gcs *GCS) deleteKey(ctx context.Context, key string) error { pClientObj, err := gcs.clientPool.BorrowObject(ctx) if err != nil { - apexLog.Errorf("gcs.deleteKey: gcs.clientPool.BorrowObject error: %+v", err) + log.Error().Msgf("gcs.deleteKey: gcs.clientPool.BorrowObject error: %+v", err) return err } pClient := pClientObj.(*clientObject).Client @@ -317,12 +317,12 @@ func (gcs *GCS) deleteKey(ctx context.Context, key string) error { err = object.Delete(ctx) if err != nil { if pErr := gcs.clientPool.InvalidateObject(ctx, pClientObj); pErr != nil { - apexLog.Warnf("gcs.deleteKey: gcs.clientPool.InvalidateObject error: %+v", pErr) + log.Warn().Msgf("gcs.deleteKey: gcs.clientPool.InvalidateObject error: %+v", pErr) } return err } if pErr := gcs.clientPool.ReturnObject(ctx, pClientObj); pErr != nil { - apexLog.Warnf("gcs.deleteKey: gcs.clientPool.ReturnObject error: %+v", pErr) + log.Warn().Msgf("gcs.deleteKey: gcs.clientPool.ReturnObject error: %+v", pErr) } return nil } @@ -339,10 +339,10 @@ func (gcs *GCS) DeleteFileFromObjectDiskBackup(ctx context.Context, key string) func (gcs *GCS) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, dstKey string) (int64, error) { dstKey = path.Join(gcs.Config.ObjectDiskPath, dstKey) - apexLog.Debugf("GCS->CopyObject %s/%s -> %s/%s", srcBucket, srcKey, gcs.Config.Bucket, dstKey) + log.Debug().Msgf("GCS->CopyObject %s/%s -> %s/%s", srcBucket, srcKey, gcs.Config.Bucket, dstKey) pClientObj, err := gcs.clientPool.BorrowObject(ctx) if err != nil { - apexLog.Errorf("gcs.CopyObject: gcs.clientPool.BorrowObject error: %+v", err) + log.Error().Msgf("gcs.CopyObject: gcs.clientPool.BorrowObject error: %+v", err) return 0, err } pClient := pClientObj.(*clientObject).Client @@ -351,18 +351,18 @@ func (gcs *GCS) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey attrs, err := src.Attrs(ctx) if err != nil { if pErr := gcs.clientPool.InvalidateObject(ctx, pClientObj); pErr != nil { - apexLog.Warnf("gcs.CopyObject: gcs.clientPool.InvalidateObject error: %+v", pErr) + log.Warn().Msgf("gcs.CopyObject: gcs.clientPool.InvalidateObject error: %+v", pErr) } return 0, err } if _, err = dst.CopierFrom(src).Run(ctx); err != nil { if pErr := gcs.clientPool.InvalidateObject(ctx, pClientObj); pErr != nil { - apexLog.Warnf("gcs.CopyObject: gcs.clientPool.InvalidateObject error: %+v", pErr) + log.Warn().Msgf("gcs.CopyObject: gcs.clientPool.InvalidateObject error: %+v", pErr) } return 0, err } if pErr := gcs.clientPool.ReturnObject(ctx, pClientObj); pErr != nil { - apexLog.Warnf("gcs.CopyObject: gcs.clientPool.ReturnObject error: %+v", pErr) + log.Warn().Msgf("gcs.CopyObject: gcs.clientPool.ReturnObject error: %+v", pErr) } return attrs.Size, nil } diff --git a/pkg/storage/general.go b/pkg/storage/general.go index adea2154..2d9a78af 100644 --- a/pkg/storage/general.go +++ b/pkg/storage/general.go @@ -5,9 +5,6 @@ import ( "context" "encoding/json" "fmt" - "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" - "github.com/Altinity/clickhouse-backup/v2/pkg/config" - "github.com/eapache/go-resiliency/retrier" "io" "os" "path" @@ -17,13 +14,16 @@ import ( "sync" "time" + "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" + "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/metadata" - "golang.org/x/sync/errgroup" - apexLog "github.com/apex/log" "github.com/djherbis/buffer" "github.com/djherbis/nio/v3" + "github.com/eapache/go-resiliency/retrier" "github.com/mholt/archiver/v4" + "github.com/rs/zerolog/log" + "golang.org/x/sync/errgroup" ) const ( @@ -45,7 +45,6 @@ type Backup struct { type BackupDestination struct { RemoteStorage - Log *apexLog.Entry compressionFormat string compressionLevel int } @@ -72,17 +71,17 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) listCache := map[string]Backup{} if info, err := os.Stat(listCacheFile); os.IsNotExist(err) || info.IsDir() { - bd.Log.Debugf("%s not found, load %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s not found, load %d elements", listCacheFile, len(listCache)) return listCache, nil } f, err := os.Open(listCacheFile) if err != nil { - bd.Log.Warnf("can't open %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return listCache, nil } defer func() { if err := f.Close(); err != nil { - bd.Log.Warnf("can't close %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() select { @@ -91,15 +90,15 @@ func (bd *BackupDestination) loadMetadataCache(ctx context.Context) (map[string] default: body, err := io.ReadAll(f) if err != nil { - bd.Log.Warnf("can't read %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't read %s return error %v", listCacheFile, err) return listCache, nil } if string(body) != "" { if err := json.Unmarshal(body, &listCache); err != nil { - bd.Log.Fatalf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) + log.Fatal().Stack().Msgf("can't parse %s to map[string]Backup\n\n%s\n\nreturn error %v", listCacheFile, body, err) } } - bd.Log.Debugf("%s load %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s load %d elements", listCacheFile, len(listCache)) return listCache, nil } } @@ -108,12 +107,12 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma listCacheFile := path.Join(os.TempDir(), fmt.Sprintf(".clickhouse-backup-metadata.cache.%s", bd.Kind())) f, err := os.OpenFile(listCacheFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { - bd.Log.Warnf("can't open %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't open %s return error %v", listCacheFile, err) return nil } defer func() { if err := f.Close(); err != nil { - bd.Log.Warnf("can't close %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't close %s return error %v", listCacheFile, err) } }() for backupName := range listCache { @@ -139,15 +138,15 @@ func (bd *BackupDestination) saveMetadataCache(ctx context.Context, listCache ma default: body, err := json.MarshalIndent(&listCache, "", "\t") if err != nil { - bd.Log.Warnf("can't json marshal %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't json marshal %s return error %v", listCacheFile, err) return nil } _, err = f.Write(body) if err != nil { - bd.Log.Warnf("can't write to %s return error %v", listCacheFile, err) + log.Warn().Msgf("can't write to %s return error %v", listCacheFile, err) return nil } - bd.Log.Debugf("%s save %d elements", listCacheFile, len(listCache)) + log.Debug().Msgf("%s save %d elements", listCacheFile, len(listCache)) return nil } } @@ -237,7 +236,7 @@ func (bd *BackupDestination) BackupList(ctx context.Context, parseMetadata bool, return nil }) if err != nil { - bd.Log.Warnf("BackupList bd.Walk return error: %v", err) + log.Warn().Msgf("BackupList bd.Walk return error: %v", err) } // sort by name for the same not parsed metadata.json sort.SliceStable(result, func(i, j int) bool { @@ -270,13 +269,13 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot } defer func() { if err := reader.Close(); err != nil { - bd.Log.Warnf("can't close GetFileReader descriptor %v", reader) + log.Warn().Msgf("can't close GetFileReader descriptor %v", reader) } switch reader.(type) { case *os.File: fileName := reader.(*os.File).Name() if err := os.Remove(fileName); err != nil { - bd.Log.Warnf("can't remove %s", fileName) + log.Warn().Msgf("can't remove %s", fileName) } } }() @@ -285,7 +284,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot bufReader := nio.NewReader(reader, buf) compressionFormat := bd.compressionFormat if !checkArchiveExtension(path.Ext(remotePath), compressionFormat) { - bd.Log.Warnf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) + log.Warn().Msgf("remote file backup extension %s not equal with %s", remotePath, compressionFormat) compressionFormat = strings.Replace(path.Ext(remotePath), ".", "", -1) } z, err := getArchiveReader(compressionFormat) @@ -326,7 +325,7 @@ func (bd *BackupDestination) DownloadCompressedStream(ctx context.Context, remot if err := f.Close(); err != nil { return err } - //bd.Log.Debugf("extract %s", extractFile) + //log.Debug().Msgf("extract %s", extractFile) return nil }); err != nil { return err @@ -355,11 +354,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if writerErr != nil { if err := w.CloseWithError(writerErr); err != nil { - bd.Log.Errorf("can't close after error %v pipe writer error: %v", writerErr, err) + log.Error().Msgf("can't close after error %v pipe writer error: %v", writerErr, err) } } else { if err := w.Close(); err != nil { - bd.Log.Errorf("can't close pipe writer: %v", err) + log.Error().Msgf("can't close pipe writer: %v", err) } } }() @@ -386,7 +385,7 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc }, } archiveFiles = append(archiveFiles, file) - //bd.Log.Debugf("add %s to archive %s", filePath, remotePath) + //log.Debug().Msgf("add %s to archive %s", filePath, remotePath) } if writerErr = z.Archive(ctx, w, archiveFiles); writerErr != nil { return writerErr @@ -397,11 +396,11 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc defer func() { if readerErr != nil { if err := body.CloseWithError(readerErr); err != nil { - bd.Log.Errorf("can't close after error %v pipe reader error: %v", writerErr, err) + log.Error().Msgf("can't close after error %v pipe reader error: %v", writerErr, err) } } else { if err := body.Close(); err != nil { - bd.Log.Errorf("can't close pipe reader: %v", err) + log.Error().Msgf("can't close pipe reader: %v", err) } } }() @@ -416,10 +415,6 @@ func (bd *BackupDestination) UploadCompressedStream(ctx context.Context, baseLoc } func (bd *BackupDestination) DownloadPath(ctx context.Context, remotePath string, localPath string, RetriesOnFailure int, RetriesDuration time.Duration, maxSpeed uint64) error { - log := bd.Log.WithFields(apexLog.Fields{ - "path": remotePath, - "operation": "download", - }) return bd.Walk(ctx, remotePath, true, func(ctx context.Context, f RemoteFile) error { if bd.Kind() == "SFTP" && (f.Name() == "." || f.Name() == "..") { return nil @@ -429,30 +424,30 @@ func (bd *BackupDestination) DownloadPath(ctx context.Context, remotePath string startTime := time.Now() r, err := bd.GetFileReader(ctx, path.Join(remotePath, f.Name())) if err != nil { - log.Error(err.Error()) + log.Error().Err(err).Send() return err } dstFilePath := path.Join(localPath, f.Name()) dstDirPath, _ := path.Split(dstFilePath) if err := os.MkdirAll(dstDirPath, 0750); err != nil { - log.Error(err.Error()) + log.Error().Err(err).Send() return err } dst, err := os.Create(dstFilePath) if err != nil { - log.Error(err.Error()) + log.Error().Err(err).Send() return err } if _, err := io.Copy(dst, r); err != nil { - log.Error(err.Error()) + log.Error().Err(err).Send() return err } if err := dst.Close(); err != nil { - log.Error(err.Error()) + log.Error().Err(err).Send() return err } if err := r.Close(); err != nil { - log.Error(err.Error()) + log.Error().Err(err).Send() return err } @@ -488,7 +483,7 @@ func (bd *BackupDestination) UploadPath(ctx context.Context, baseLocalPath strin } closeFile := func() { if err := f.Close(); err != nil { - bd.Log.Warnf("can't close UploadPath file descriptor %v: %v", f, err) + log.Warn().Msgf("can't close UploadPath file descriptor %v: %v", f, err) } } retry := retrier.New(retrier.ConstantBackoff(RetriesOnFailure, RetriesDuration), nil) @@ -522,7 +517,6 @@ func (bd *BackupDestination) throttleSpeed(startTime time.Time, size int64, maxS } func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhouse.ClickHouse, calcMaxSize bool, backupName string) (*BackupDestination, error) { - log := apexLog.WithField("logger", "NewBackupDestination") var err error // https://github.com/Altinity/clickhouse-backup/issues/404 if calcMaxSize { @@ -531,7 +525,7 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous return nil, err } if cfg.General.MaxFileSize > 0 && cfg.General.MaxFileSize < maxFileSize { - log.Warnf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) + log.Warn().Msgf("MAX_FILE_SIZE=%d is less than actual %d, please remove general->max_file_size section from your config", cfg.General.MaxFileSize, maxFileSize) } if cfg.General.MaxFileSize <= 0 || cfg.General.MaxFileSize < maxFileSize { cfg.General.MaxFileSize = maxFileSize @@ -541,7 +535,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous case "azblob": azblobStorage := &AzureBlob{ Config: &cfg.AzureBlob, - Log: log.WithField("logger", "AZBLOB"), } azblobStorage.Config.Path, err = ch.ApplyMacros(ctx, azblobStorage.Config.Path) if err != nil { @@ -569,7 +562,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous azblobStorage.Config.BufferSize = bufferSize return &BackupDestination{ azblobStorage, - log.WithField("logger", "azure"), cfg.AzureBlob.CompressionFormat, cfg.AzureBlob.CompressionLevel, }, nil @@ -592,7 +584,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous Concurrency: cfg.S3.Concurrency, BufferSize: 128 * 1024, PartSize: partSize, - Log: log.WithField("logger", "S3"), } s3Storage.Config.Path, err = ch.ApplyMacros(ctx, s3Storage.Config.Path) if err != nil { @@ -613,7 +604,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ s3Storage, - log.WithField("logger", "s3"), cfg.S3.CompressionFormat, cfg.S3.CompressionLevel, }, nil @@ -638,7 +628,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ googleCloudStorage, - log.WithField("logger", "gcs"), cfg.GCS.CompressionFormat, cfg.GCS.CompressionLevel, }, nil @@ -650,14 +639,12 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ tencentStorage, - log.WithField("logger", "cos"), cfg.COS.CompressionFormat, cfg.COS.CompressionLevel, }, nil case "ftp": ftpStorage := &FTP{ Config: &cfg.FTP, - Log: log.WithField("logger", "FTP"), } ftpStorage.Config.Path, err = ch.ApplyMacros(ctx, ftpStorage.Config.Path) if err != nil { @@ -665,7 +652,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ ftpStorage, - log.WithField("logger", "FTP"), cfg.FTP.CompressionFormat, cfg.FTP.CompressionLevel, }, nil @@ -679,7 +665,6 @@ func NewBackupDestination(ctx context.Context, cfg *config.Config, ch *clickhous } return &BackupDestination{ sftpStorage, - log.WithField("logger", "SFTP"), cfg.SFTP.CompressionFormat, cfg.SFTP.CompressionLevel, }, nil diff --git a/pkg/storage/object_disk/object_disk.go b/pkg/storage/object_disk/object_disk.go index 933b9bae..2f43810a 100644 --- a/pkg/storage/object_disk/object_disk.go +++ b/pkg/storage/object_disk/object_disk.go @@ -18,8 +18,8 @@ import ( "github.com/Altinity/clickhouse-backup/v2/pkg/config" "github.com/Altinity/clickhouse-backup/v2/pkg/storage" "github.com/antchfx/xmlquery" - apexLog "github.com/apex/log" "github.com/puzpuzpuz/xsync" + "github.com/rs/zerolog/log" ) type MetadataVersion uint32 @@ -207,7 +207,7 @@ func (c *ObjectStorageConnection) GetRemoteStorage() storage.RemoteStorage { case "azure", "azure_blob_storage": return c.AzureBlob } - apexLog.Fatalf("invalid ObjectStorageConnection.type %s", c.Type) + log.Fatal().Stack().Msgf("invalid ObjectStorageConnection.type %s", c.Type) return nil } @@ -218,7 +218,7 @@ func (c *ObjectStorageConnection) GetRemoteBucket() string { case "azure", "azure_blob_storage": return c.AzureBlob.Config.Container } - apexLog.Fatalf("invalid ObjectStorageConnection.type %s", c.Type) + log.Fatal().Stack().Msgf("invalid ObjectStorageConnection.type %s", c.Type) return "" } @@ -229,7 +229,7 @@ func (c *ObjectStorageConnection) GetRemotePath() string { case "azure", "azure_blob_storage": return c.AzureBlob.Config.Path } - apexLog.Fatalf("invalid ObjectStorageConnection.type %s", c.Type) + log.Fatal().Stack().Msgf("invalid ObjectStorageConnection.type %s", c.Type) return "" } @@ -269,7 +269,7 @@ func ReadMetadataFromFile(path string) (*Metadata, error) { func ReadMetadataFromReader(metadataFile io.ReadCloser, path string) (*Metadata, error) { defer func() { if err := metadataFile.Close(); err != nil { - apexLog.Warnf("can't close reader %s: %v", path, err) + log.Warn().Msgf("can't close reader %s: %v", path, err) } }() @@ -288,7 +288,7 @@ func WriteMetadataToFile(metadata *Metadata, path string) error { } defer func() { if err = metadataFile.Close(); err != nil { - apexLog.Warnf("can't close %s: %v", path, err) + log.Warn().Msgf("can't close %s: %v", path, err) } }() return metadata.writeToFile(metadataFile) @@ -347,7 +347,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) e creds.S3AccessKey = strings.Trim(accessKeyNode.InnerText(), "\r\n \t") creds.S3SecretKey = strings.Trim(secretKeyNode.InnerText(), "\r\n \t") } else { - apexLog.Warnf("%s -> /%s/storage_configuration/disks/%s doesn't contains and environment variables will use", configFile, root.Data, diskName) + log.Warn().Msgf("%s -> /%s/storage_configuration/disks/%s doesn't contains and environment variables will use", configFile, root.Data, diskName) creds.S3AssumeRole = os.Getenv("AWS_ROLE_ARN") if useEnvironmentCredentials != nil { creds.S3AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") @@ -398,7 +398,7 @@ func getObjectDisksCredentials(ctx context.Context, ch *clickhouse.ClickHouse) e if childCreds, childExists := DisksCredentials.Load(childDiskName); childExists { DisksCredentials.Store(diskName, childCreds) } else { - apexLog.Warnf("disk %s with type %s, reference to childDisk %s which not contains DiskCredentials", diskName, diskType, childDiskName) + log.Warn().Msgf("disk %s with type %s, reference to childDisk %s which not contains DiskCredentials", diskName, diskType, childDiskName) } } } @@ -487,7 +487,7 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf // need for CopyObject s3cfg.ObjectDiskPath = s3cfg.Path s3cfg.Debug = cfg.S3.Debug - connection.S3 = &storage.S3{Config: &s3cfg, Log: apexLog.WithField("logger", "S3")} + connection.S3 = &storage.S3{Config: &s3cfg} if err = connection.S3.Connect(ctx); err != nil { return nil, err } @@ -526,7 +526,7 @@ func makeObjectDiskConnection(ctx context.Context, ch *clickhouse.ClickHouse, cf azureCfg.Container = creds.AzureContainerName } azureCfg.Debug = cfg.AzureBlob.Debug - connection.AzureBlob = &storage.AzureBlob{Config: &azureCfg, Log: apexLog.WithField("logger", "AZBLOB")} + connection.AzureBlob = &storage.AzureBlob{Config: &azureCfg} if err = connection.AzureBlob.Connect(ctx); err != nil { return nil, err } diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index e45986f1..fa73c4ac 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -14,7 +14,6 @@ import ( "time" "github.com/Altinity/clickhouse-backup/v2/pkg/config" - apexLog "github.com/apex/log" "github.com/aws/aws-sdk-go-v2/aws" v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" awsV2Config "github.com/aws/aws-sdk-go-v2/config" @@ -28,25 +27,27 @@ import ( awsV2Logging "github.com/aws/smithy-go/logging" awsV2http "github.com/aws/smithy-go/transport/http" "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "golang.org/x/sync/errgroup" ) -type S3LogToApexLogAdapter struct { - apexLog *apexLog.Logger +type S3LogToZeroLogAdapter struct { + logger zerolog.Logger } -func newS3Logger(log *apexLog.Entry) S3LogToApexLogAdapter { - return S3LogToApexLogAdapter{ - apexLog: log.Logger, +func newS3Logger(logger zerolog.Logger) S3LogToZeroLogAdapter { + return S3LogToZeroLogAdapter{ + logger: logger, } } -func (S3LogToApexLogAdapter S3LogToApexLogAdapter) Logf(severity awsV2Logging.Classification, msg string, args ...interface{}) { +func (S3LogToApexLogAdapter S3LogToZeroLogAdapter) Logf(severity awsV2Logging.Classification, msg string, args ...interface{}) { msg = fmt.Sprintf("[s3:%s] %s", severity, msg) if len(args) > 0 { - S3LogToApexLogAdapter.apexLog.Infof(msg, args...) + S3LogToApexLogAdapter.logger.Info().Msgf(msg, args...) } else { - S3LogToApexLogAdapter.apexLog.Info(msg) + S3LogToApexLogAdapter.logger.Info().Msg(msg) } } @@ -89,7 +90,6 @@ type S3 struct { uploader *s3manager.Uploader downloader *s3manager.Downloader Config *config.S3Config - Log *apexLog.Entry PartSize int64 Concurrency int BufferSize int @@ -140,7 +140,7 @@ func (s *S3) Connect(ctx context.Context) error { } if s.Config.Debug { - awsConfig.Logger = newS3Logger(s.Log) + awsConfig.Logger = newS3Logger(log.Logger) awsConfig.ClientLogMode = aws.LogRetries | aws.LogRequest | aws.LogResponse } @@ -214,13 +214,13 @@ func (s *S3) GetFileReaderAbsolute(ctx context.Context, key string) (io.ReadClos var stateErr *s3types.InvalidObjectState if errors.As(httpErr, &stateErr) { if strings.Contains(string(stateErr.StorageClass), "GLACIER") { - s.Log.Warnf("GetFileReader %s, storageClass %s receive error: %s", key, stateErr.StorageClass, stateErr.Error()) + log.Warn().Msgf("GetFileReader %s, storageClass %s receive error: %s", key, stateErr.StorageClass, stateErr.Error()) if restoreErr := s.restoreObject(ctx, key); restoreErr != nil { - s.Log.Warnf("restoreObject %s, return error: %v", key, restoreErr) + log.Warn().Msgf("restoreObject %s, return error: %v", key, restoreErr) return nil, err } if resp, err = s.client.GetObject(ctx, params); err != nil { - s.Log.Warnf("second GetObject %s, return error: %v", key, err) + log.Warn().Msgf("second GetObject %s, return error: %v", key, err) return nil, err } return resp.Body, nil @@ -466,7 +466,7 @@ func (s *S3) remotePager(ctx context.Context, s3Path string, recursive bool, pro func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, dstKey string) (int64, error) { dstKey = path.Join(s.Config.ObjectDiskPath, dstKey) - s.Log.Debugf("S3->CopyObject %s/%s -> %s/%s", srcBucket, srcKey, s.Config.Bucket, dstKey) + log.Debug().Msgf("S3->CopyObject %s/%s -> %s/%s", srcBucket, srcKey, s.Config.Bucket, dstKey) // just copy object without multipart if srcSize < 5*1024*1024*1024 || strings.Contains(s.Config.Endpoint, "storage.googleapis.com") { params := &s3.CopyObjectInput{ @@ -704,7 +704,7 @@ func (s *S3) restoreObject(ctx context.Context, key string) error { if res.Restore != nil && *res.Restore == "ongoing-request=\"true\"" { i += 1 - s.Log.Warnf("%s still not restored, will wait %d seconds", key, i*5) + log.Warn().Msgf("%s still not restored, will wait %d seconds", key, i*5) time.Sleep(time.Duration(i*5) * time.Second) } else { return nil diff --git a/pkg/storage/sftp.go b/pkg/storage/sftp.go index 7bad272e..fba1ee3b 100644 --- a/pkg/storage/sftp.go +++ b/pkg/storage/sftp.go @@ -13,8 +13,8 @@ import ( "syscall" "time" - "github.com/apex/log" libSFTP "github.com/pkg/sftp" + "github.com/rs/zerolog/log" "golang.org/x/crypto/ssh" ) @@ -27,7 +27,7 @@ type SFTP struct { func (sftp *SFTP) Debug(msg string, v ...interface{}) { if sftp.Config.Debug { - log.Infof(msg, v...) + log.Info().Msgf(msg, v...) } } @@ -140,7 +140,7 @@ func (sftp *SFTP) DeleteDirectory(ctx context.Context, dirPath string) error { sftp.Debug("[SFTP_DEBUG] DeleteDirectory %s", dirPath) defer func() { if err := sftp.sftpClient.RemoveDirectory(dirPath); err != nil { - log.Warnf("RemoveDirectory err=%v", err) + log.Warn().Msgf("RemoveDirectory err=%v", err) } }() @@ -153,11 +153,11 @@ func (sftp *SFTP) DeleteDirectory(ctx context.Context, dirPath string) error { filePath := path.Join(dirPath, file.Name()) if file.IsDir() { if err := sftp.DeleteDirectory(ctx, filePath); err != nil { - log.Warnf("sftp.DeleteDirectory(%s) err=%v", filePath, err) + log.Warn().Msgf("sftp.DeleteDirectory(%s) err=%v", filePath, err) } } else { if err := sftp.sftpClient.Remove(filePath); err != nil { - log.Warnf("sftp.Remove(%s) err=%v", filePath, err) + log.Warn().Msgf("sftp.Remove(%s) err=%v", filePath, err) } } } @@ -231,7 +231,7 @@ func (sftp *SFTP) PutFile(ctx context.Context, key string, localFile io.ReadClos func (sftp *SFTP) PutFileAbsolute(ctx context.Context, key string, localFile io.ReadCloser) error { if err := sftp.sftpClient.MkdirAll(path.Dir(key)); err != nil { - log.Warnf("sftp.sftpClient.MkdirAll(%s) err=%v", path.Dir(key), err) + log.Warn().Msgf("sftp.sftpClient.MkdirAll(%s) err=%v", path.Dir(key), err) } remoteFile, err := sftp.sftpClient.Create(key) if err != nil { @@ -239,7 +239,7 @@ func (sftp *SFTP) PutFileAbsolute(ctx context.Context, key string, localFile io. } defer func() { if err := remoteFile.Close(); err != nil { - log.Warnf("can't close %s err=%v", key, err) + log.Warn().Msgf("can't close %s err=%v", key, err) } }() if _, err = remoteFile.ReadFrom(localFile); err != nil { diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go index 0e332e5c..3ffada57 100644 --- a/pkg/storage/utils.go +++ b/pkg/storage/utils.go @@ -2,9 +2,9 @@ package storage import ( "fmt" - "github.com/apex/log" "github.com/klauspost/compress/zstd" "github.com/mholt/archiver/v4" + "github.com/rs/zerolog/log" "sort" "strings" "time" @@ -23,12 +23,12 @@ func GetBackupsToDeleteRemote(backups []Backup, keep int) []Backup { deletedBackups := make([]Backup, len(backups)-keep) copied := copy(deletedBackups, backups[keep:]) if copied != len(backups)-keep { - log.Warnf("copied wrong items from backup list expected=%d, actual=%d", len(backups)-keep, copied) + log.Warn().Msgf("copied wrong items from backup list expected=%d, actual=%d", len(backups)-keep, copied) } keepBackups := make([]Backup, keep) copied = copy(keepBackups, backups[:keep]) if copied != keep { - log.Warnf("copied wrong items from backup list expected=%d, actual=%d", keep, copied) + log.Warn().Msgf("copied wrong items from backup list expected=%d, actual=%d", keep, copied) } var findRequiredBackup func(b Backup) findRequiredBackup = func(b Backup) { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 3028df6a..9e09afcd 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -3,7 +3,7 @@ package utils import ( "context" "fmt" - "github.com/apex/log" + "github.com/rs/zerolog/log" "os/exec" "regexp" "strings" @@ -47,27 +47,27 @@ func HumanizeDuration(d time.Duration) string { if d >= year { years := d / year if _, err := fmt.Fprintf(&b, "%dy", years); err != nil { - log.Warnf("HumanizeDuration error: %v", err) + log.Warn().Msgf("HumanizeDuration error: %v", err) } d -= years * year } days := d / day d -= days * day if _, err := fmt.Fprintf(&b, "%dd%s", days, d); err != nil { - log.Warnf("HumanizeDuration error: %v", err) + log.Warn().Msgf("HumanizeDuration error: %v", err) } return b.String() } func ExecCmd(ctx context.Context, timeout time.Duration, cmd string, args ...string) error { out, err := ExecCmdOut(ctx, timeout, cmd, args...) - log.Debug(out) + log.Debug().Msg(out) return err } func ExecCmdOut(ctx context.Context, timeout time.Duration, cmd string, args ...string) (string, error) { ctx, cancel := context.WithTimeout(ctx, timeout) - log.Debugf("%s %s", cmd, strings.Join(args, " ")) + log.Debug().Msgf("%s %s", cmd, strings.Join(args, " ")) out, err := exec.CommandContext(ctx, cmd, args...).CombinedOutput() cancel() return string(out), err diff --git a/test/integration/install_delve.sh b/test/integration/install_delve.sh index e91037a4..227c314c 100755 --- a/test/integration/install_delve.sh +++ b/test/integration/install_delve.sh @@ -18,6 +18,7 @@ ln -nsfv /usr/lib/go-1.22/bin/go /usr/bin/go CGO_ENABLED=0 GO111MODULE=on go install -ldflags "-s -w -extldflags '-static'" github.com/go-delve/delve/cmd/dlv@latest # GO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -tags osusergo,netgo -gcflags "all=-N -l" -ldflags "-extldflags '-static' -X 'main.version=debug'" -o build/linux/amd64/clickhouse-backup ./cmd/clickhouse-backup +# /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- -c /etc/clickhouse-backup/config-azblob.yml download --partitions=test_partitions_TestIntegrationAzure.t?:(0,'2022-01-02'),(0,'2022-01-03') full_backup_3691696362844433277 # /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- -c /etc/clickhouse-backup/config-azblob.yml restore --schema TestIntegrationAzure_full_6516689450475708573 # /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- -c /etc/clickhouse-server/config.d/ch-backup.yaml upload debug_upload --table # USE_RESUMABLE_STATE=0 CLICKHOUSE_SKIP_TABLES=*.test_memory /root/go/bin/dlv --listen=:40001 --headless=true --api-version=2 --accept-multiclient exec /bin/clickhouse-backup -- -c /etc/clickhouse-backup/config-s3.yml download test_skip_full_backup diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 6425895c..5c99534a 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -22,16 +22,20 @@ import ( "testing" "time" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/rs/zerolog/pkgerrors" + "golang.org/x/mod/semver" + stdlog "log" + _ "github.com/ClickHouse/clickhouse-go/v2" - "github.com/apex/log" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/mod/semver" "github.com/Altinity/clickhouse-backup/v2/pkg/clickhouse" "github.com/Altinity/clickhouse-backup/v2/pkg/config" - "github.com/Altinity/clickhouse-backup/v2/pkg/logcli" + "github.com/Altinity/clickhouse-backup/v2/pkg/log_helper" "github.com/Altinity/clickhouse-backup/v2/pkg/partition" "github.com/Altinity/clickhouse-backup/v2/pkg/status" "github.com/Altinity/clickhouse-backup/v2/pkg/utils" @@ -42,7 +46,11 @@ var dockerPool *pool.ObjectPool // setup log level func init() { - log.SetHandler(logcli.New(os.Stderr)) + zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs + zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack + consoleWriter := zerolog.ConsoleWriter{Out: os.Stderr, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"} + log.Logger = zerolog.New(zerolog.SyncWriter(consoleWriter)).With().Timestamp().Logger() + stdlog.SetOutput(log.Logger) logLevel := "info" if os.Getenv("LOG_LEVEL") != "" && os.Getenv("LOG_LEVEL") != "info" { logLevel = os.Getenv("LOG_LEVEL") @@ -50,7 +58,7 @@ func init() { if os.Getenv("TEST_LOG_LEVEL") != "" && os.Getenv("TEST_LOG_LEVEL") != "info" { logLevel = os.Getenv("TEST_LOG_LEVEL") } - log.SetLevelFromString(logLevel) + log_helper.SetLogLevelFromString(logLevel) runParallel, isExists := os.LookupEnv("RUN_PARALLEL") if !isExists { @@ -58,7 +66,7 @@ func init() { } runParallelInt, err := strconv.Atoi(runParallel) if err != nil { - log.Fatalf("invalid RUN_PARALLEL environment variable value %s", runParallel) + log.Fatal().Msgf("invalid RUN_PARALLEL environment variable value %s", runParallel) } ctx := context.Background() factory := pool.NewPooledObjectFactorySimple( func(context.Context) (interface{}, error) { @@ -105,16 +113,16 @@ var defaultTestData = []TestDataStruct{ Database: dbNameOrdinary, DatabaseEngine: "Ordinary", // 24.8 shall resolve https://github.com/ClickHouse/ClickHouse/issues/67669 Name: ".inner.table1", - Schema: "(Date Date, TimeStamp DateTime, Log String) ENGINE = MergeTree(Date, (TimeStamp, Log), 8192)", + Schema: "(Date Date, TimeStamp DateTime, Logger String) ENGINE = MergeTree(Date, (TimeStamp, Logger), 8192)", Rows: []map[string]interface{}{ - {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:14"), "Log": "One"}, - {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:15"), "Log": "Two"}, - {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:16"), "Log": "Three"}, - {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:17"), "Log": "Four"}, - {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:18"), "Log": "Five"}, - {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:19"), "Log": "Six"}, + {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:14"), "Logger": "One"}, + {"Date": toDate("2018-10-23"), "TimeStamp": toTS("2018-10-23 07:37:15"), "Logger": "Two"}, + {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:16"), "Logger": "Three"}, + {"Date": toDate("2018-10-24"), "TimeStamp": toTS("2018-10-24 07:37:17"), "Logger": "Four"}, + {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:18"), "Logger": "Five"}, + {"Date": toDate("2019-10-25"), "TimeStamp": toTS("2019-01-25 07:37:19"), "Logger": "Six"}, }, - Fields: []string{"Date", "TimeStamp", "Log"}, + Fields: []string{"Date", "TimeStamp", "Logger"}, OrderBy: "TimeStamp", }, { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", @@ -353,11 +361,11 @@ var defaultIncrementData = []TestDataStruct{ Database: dbNameOrdinary, DatabaseEngine: "Ordinary", // 24.8 shall resolve https://github.com/ClickHouse/ClickHouse/issues/67669 Name: ".inner.table1", - Schema: "(Date Date, TimeStamp DateTime, Log String) ENGINE = MergeTree(Date, (TimeStamp, Log), 8192)", + Schema: "(Date Date, TimeStamp DateTime, Logger String) ENGINE = MergeTree(Date, (TimeStamp, Logger), 8192)", Rows: []map[string]interface{}{ - {"Date": toDate("2019-10-26"), "TimeStamp": toTS("2019-01-26 07:37:19"), "Log": "Seven"}, + {"Date": toDate("2019-10-26"), "TimeStamp": toTS("2019-01-26 07:37:19"), "Logger": "Seven"}, }, - Fields: []string{"Date", "TimeStamp", "Log"}, + Fields: []string{"Date", "TimeStamp", "Logger"}, OrderBy: "TimeStamp", }, { Database: dbNameOrdinary, DatabaseEngine: "Ordinary", @@ -536,7 +544,7 @@ func TestLongListRemote(t *testing.T) { cacheClearDuration := time.Since(startCacheClear) r.Greater(cacheClearDuration, cachedDuration, "cacheClearDuration=%s shall be greater cachedDuration=%s", cacheClearDuration.String(), cachedDuration.String()) - log.Debugf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cachedDuration.String(), cacheClearDuration.String()) + log.Debug().Msgf("noCacheDuration=%s cachedDuration=%s cacheClearDuration=%s", noCacheDuration.String(), cachedDuration.String(), cacheClearDuration.String()) testListRemoteAllBackups := make([]string, totalCacheCount) for i := 0; i < totalCacheCount; i++ { @@ -756,14 +764,14 @@ func TestRBAC(t *testing.T) { createRBACObjects := func(drop bool) { if drop { - log.Debug("drop all RBAC related objects") + log.Debug().Msg("drop all RBAC related objects") env.queryWithNoError(r, "DROP SETTINGS PROFILE `test.rbac-name`") env.queryWithNoError(r, "DROP QUOTA `test.rbac-name`") env.queryWithNoError(r, "DROP ROW POLICY `test.rbac-name` ON default.test_rbac") env.queryWithNoError(r, "DROP ROLE `test.rbac-name`") env.queryWithNoError(r, "DROP USER `test.rbac-name`") } - log.Debug("create RBAC related objects") + log.Debug().Msg("create RBAC related objects") env.queryWithNoError(r, "CREATE SETTINGS PROFILE `test.rbac-name` SETTINGS max_execution_time=60") env.queryWithNoError(r, "CREATE ROLE `test.rbac-name` SETTINGS PROFILE `test.rbac-name`") env.queryWithNoError(r, "CREATE USER `test.rbac-name` IDENTIFIED BY 'test_rbac_password' DEFAULT ROLE `test.rbac-name`") @@ -777,21 +785,21 @@ func TestRBAC(t *testing.T) { env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", config, "delete", "local", "test_rbac_backup") env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") - log.Debug("create conflicted RBAC objects") + log.Debug().Msg("create conflicted RBAC objects") createRBACObjects(true) env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") - log.Debug("download+restore RBAC") + log.Debug().Msg("download+restore RBAC") env.DockerExecNoError(r, "clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 CLICKHOUSE_BACKUP_CONFIG="+config+" clickhouse-backup download test_rbac_backup") out, err := env.DockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac test_rbac_backup") - log.Debug(out) + log.Debug().Msg(out) r.Contains(out, "RBAC successfully restored") r.NoError(err) out, err = env.DockerExecOut("clickhouse-backup", "bash", "-xec", "ALLOW_EMPTY_BACKUPS=1 clickhouse-backup -c "+config+" restore --rm --rbac-only test_rbac_backup") - log.Debug(out) + log.Debug().Msg(out) r.Contains(out, "RBAC successfully restored") r.NoError(err) env.DockerExecNoError(r, "clickhouse", "ls", "-lah", "/var/lib/clickhouse/access") @@ -817,7 +825,7 @@ func TestRBAC(t *testing.T) { r.NoError(err) found := false for _, row := range rbacRows { - log.Debugf("rbacType=%s expectedValue=%s row.Name=%s", rbacType, expectedValue, row.Name) + log.Debug().Msgf("rbacType=%s expectedValue=%s row.Name=%s", rbacType, expectedValue, row.Name) if expectedValue == row.Name { found = true break @@ -938,7 +946,7 @@ func TestServerAPI(t *testing.T) { randFields := 10 fillDatabaseForAPIServer(maxTables, minFields, randFields, env, r, fieldTypes) - log.Debug("Run `clickhouse-backup server --watch` in background") + log.Debug().Msg("Run `clickhouse-backup server --watch` in background") env.DockerExecBackgroundNoError(r,"clickhouse-backup", "bash", "-ce", "clickhouse-backup server --watch &>>/tmp/clickhouse-backup-server.log") time.Sleep(1 * time.Second) @@ -952,7 +960,7 @@ func TestServerAPI(t *testing.T) { testAPIBackupTablesRemote(r, env) - log.Debug("Check /backup/actions") + log.Debug().Msg("Check /backup/actions") env.queryWithNoError(r, "SELECT count() FROM system.backup_actions") testAPIBackupList(t, r, env) @@ -978,7 +986,7 @@ func TestServerAPI(t *testing.T) { func testAPIRestart(r *require.Assertions, env *TestEnvironment) { out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL -XPOST 'http://localhost:7171/restart'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "acknowledged") @@ -993,7 +1001,7 @@ func testAPIRestart(r *require.Assertions, env *TestEnvironment) { func runClickHouseClientInsertSystemBackupActions(r *require.Assertions, env *TestEnvironment, commands []string, needWait bool) { sql := "INSERT INTO system.backup_actions(command) " + "VALUES ('" + strings.Join(commands, "'),('") + "')" out, err := env.DockerExecOut("clickhouse", "bash", "-ce", fmt.Sprintf("clickhouse client --echo -mn -q \"%s\"", sql)) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) if needWait { for _, command := range commands { @@ -1043,10 +1051,10 @@ func testAPIBackupActions(r *require.Assertions, env *TestEnvironment) { } func testAPIWatchAndKill(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/watch + /backup/kill") + log.Debug().Msg("Check /backup/watch + /backup/kill") runKillCommand := func(command string) { out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL 'http://localhost:7171/backup/kill?command=%s'", command)) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) } checkWatchBackup := func(expectedCount uint64) { @@ -1073,7 +1081,7 @@ func testAPIWatchAndKill(r *require.Assertions, env *TestEnvironment) { checkCanceledCommand(1) out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/watch'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) time.Sleep(7 * time.Second) @@ -1083,15 +1091,15 @@ func testAPIWatchAndKill(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/delete/{where}/{name}") + log.Debug().Msg("Check /backup/delete/{where}/{name}") for i := 1; i <= apiBackupNumber; i++ { out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/local/z_backup_%d'", i)) - log.Debugf(out) + log.Debug().Msgf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/remote/z_backup_%d'", i)) - log.Debugf(out) + log.Debug().Msgf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") @@ -1101,7 +1109,7 @@ func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { r.Contains(out, "clickhouse_backup_last_delete_status 1") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XGET 'http://localhost:7171/backup/list'")) - log.Debugf(out) + log.Debug().Msgf(out) r.NoError(err) scanner := bufio.NewScanner(strings.NewReader(out)) for scanner.Scan() { @@ -1116,7 +1124,7 @@ func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { listItem := backupJSON{} r.NoError(json.Unmarshal(scanner.Bytes(), &listItem)) out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/delete/%s/%s'", listItem.Location, listItem.Name)) - log.Debugf(out) + log.Debug().Msgf(out) r.NoError(err) } @@ -1125,15 +1133,15 @@ func testAPIBackupDelete(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupClean(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/clean/ /backup/clean_remote_broken/ and /backup/actions fot these two commands") + log.Debug().Msg("Check /backup/clean/ /backup/clean_remote_broken/ and /backup/actions fot these two commands") out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean'")) - log.Debugf(out) + log.Debug().Msgf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", fmt.Sprintf("curl -sfL -XPOST 'http://localhost:7171/backup/clean/remote_broken'")) - log.Debugf(out) + log.Debug().Msgf(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "\"status\":\"error\"") @@ -1142,7 +1150,7 @@ func testAPIBackupClean(r *require.Assertions, env *TestEnvironment) { } func testAPIMetrics(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /metrics clickhouse_backup_last_backup_size_remote") + log.Debug().Msg("Check /metrics clickhouse_backup_last_backup_size_remote") var lastRemoteSize int64 r.NoError(env.ch.SelectSingleRowNoCtx(&lastRemoteSize, "SELECT size FROM system.backup_list WHERE name='z_backup_5' AND location='remote'")) @@ -1156,11 +1164,11 @@ func testAPIMetrics(r *require.Assertions, env *TestEnvironment) { r.Greater(uint64(lastRemoteSize), realTotalBytes) out, err := env.DockerExecOut("clickhouse-backup", "curl", "-sL", "http://localhost:7171/metrics") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, fmt.Sprintf("clickhouse_backup_last_backup_size_remote %d", lastRemoteSize)) - log.Debug("Check /metrics clickhouse_backup_number_backups_*") + log.Debug().Msg("Check /metrics clickhouse_backup_number_backups_*") r.Contains(out, fmt.Sprintf("clickhouse_backup_number_backups_local %d", apiBackupNumber)) // +1 watch backup env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "list", "remote") @@ -1170,13 +1178,13 @@ func testAPIMetrics(r *require.Assertions, env *TestEnvironment) { } func testAPIDeleteLocalDownloadRestore(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") + log.Debug().Msg("Check /backup/delete/local/{name} + /backup/download/{name} + /backup/restore/{name}?rm=1") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/delete/local/z_backup_$i\"; curl -sfL -XPOST \"http://localhost:7171/backup/download/z_backup_$i\"; sleep 2; curl -sfL -XPOST \"http://localhost:7171/backup/restore/z_backup_$i?rm=1\"; sleep 8; done", apiBackupNumber), ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.NotContains(out, "another operation is currently running") r.NotContains(out, "error") @@ -1193,27 +1201,27 @@ func testAPIDeleteLocalDownloadRestore(r *require.Assertions, env *TestEnvironme } func testAPIBackupList(t *testing.T, r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/list") + log.Debug().Msg("Check /backup/list") out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - log.Debug("Check /backup/list/local") + log.Debug().Msg("Check /backup/list/local") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/local'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { r.True(assert.Regexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"remote\",\"required\":\"\",\"desc\":\"tar, regular\"}", i)), out)) } - log.Debug("Check /backup/list/remote") + log.Debug().Msg("Check /backup/list/remote") out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sfL 'http://localhost:7171/backup/list/remote'") - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) for i := 1; i <= apiBackupNumber; i++ { r.True(assert.NotRegexp(t, regexp.MustCompile(fmt.Sprintf("{\"name\":\"z_backup_%d\",\"created\":\"\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}\",\"size\":\\d+,\"location\":\"local\",\"required\":\"\",\"desc\":\"regular\"}", i)), out)) @@ -1222,13 +1230,13 @@ func testAPIBackupList(t *testing.T, r *require.Assertions, env *TestEnvironment } func testAPIBackupUpload(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/upload") + log.Debug().Msg("Check /backup/upload") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/upload/z_backup_$i\"; sleep 2; done", apiBackupNumber), ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.NotContains(out, "error") r.NotContains(out, "another operation is currently running") @@ -1243,12 +1251,12 @@ func testAPIBackupUpload(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupTables(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/tables") + log.Debug().Msg("Check /backup/tables") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables\"", ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "long_schema") r.NotContains(out, "Connection refused") @@ -1258,12 +1266,12 @@ func testAPIBackupTables(r *require.Assertions, env *TestEnvironment) { r.NotContains(out, "INFORMATION_SCHEMA") r.NotContains(out, "information_schema") - log.Debug("Check /backup/tables/all") + log.Debug().Msg("Check /backup/tables/all") out, err = env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables/all\"", ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "long_schema") r.Contains(out, "system") @@ -1278,12 +1286,12 @@ func testAPIBackupTables(r *require.Assertions, env *TestEnvironment) { func testAPIBackupTablesRemote(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/tables?remote_backup=z_backup_1") + log.Debug().Msg("Check /backup/tables?remote_backup=z_backup_1") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", "curl -sfL \"http://localhost:7171/backup/tables?remote_backup=z_backup_1\"", ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "long_schema") r.NotContains(out, "system") @@ -1296,7 +1304,7 @@ func testAPIBackupTablesRemote(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupVersion(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/version") + log.Debug().Msg("Check /backup/version") cliVersion, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup --version 2>/dev/null --version | grep 'Version' | cut -d ':' -f 2 | xargs") r.NoError(err) apiVersion, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "curl -sL http://localhost:7171/backup/version | jq -r .version") @@ -1308,13 +1316,13 @@ func testAPIBackupVersion(r *require.Assertions, env *TestEnvironment) { } func testAPIBackupCreate(r *require.Assertions, env *TestEnvironment) { - log.Debug("Check /backup/create") + log.Debug().Msg("Check /backup/create") out, err := env.DockerExecOut( "clickhouse-backup", "bash", "-xe", "-c", fmt.Sprintf("sleep 3; for i in {1..%d}; do date; curl -sfL -XPOST \"http://localhost:7171/backup/create?table=long_schema.*&name=z_backup_$i\"; sleep 1.5; done", apiBackupNumber), ) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.NotContains(out, "Connection refused") r.NotContains(out, "another operation is currently running") @@ -1325,7 +1333,7 @@ func testAPIBackupCreate(r *require.Assertions, env *TestEnvironment) { } func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch *TestEnvironment, r *require.Assertions, fieldTypes []string) { - log.Debugf("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) + log.Debug().Msgf("Create %d `long_schema`.`t%%d` tables with with %d..%d fields...", maxTables, minFields, minFields+randFields) ch.queryWithNoError(r, "CREATE DATABASE IF NOT EXISTS long_schema") for i := 0; i < maxTables; i++ { sql := fmt.Sprintf("CREATE TABLE long_schema.t%d (id UInt64", i) @@ -1339,7 +1347,7 @@ func fillDatabaseForAPIServer(maxTables int, minFields int, randFields int, ch * sql = fmt.Sprintf("INSERT INTO long_schema.t%d(id) SELECT number FROM numbers(100)", i) ch.queryWithNoError(r, sql) } - log.Debug("...DONE") + log.Debug().Msg("...DONE") } func TestSkipNotExistsTable(t *testing.T) { @@ -1349,7 +1357,7 @@ func TestSkipNotExistsTable(t *testing.T) { env, r := NewTestEnvironment(t) env.connectWithWait(r, 0*time.Second, 1*time.Second, 1*time.Minute) - log.Debug("Check skip not exist errors") + log.Debug().Msg("Check skip not exist errors") env.queryWithNoError(r, "CREATE DATABASE freeze_not_exists") ifNotExistsCreateSQL := "CREATE TABLE IF NOT EXISTS freeze_not_exists.freeze_not_exists (id UInt64) ENGINE=MergeTree() ORDER BY id" ifNotExistsInsertSQL := "INSERT INTO freeze_not_exists.freeze_not_exists SELECT number FROM numbers(1000)" @@ -1378,19 +1386,19 @@ func TestSkipNotExistsTable(t *testing.T) { err = env.ch.Query(ifNotExistsInsertSQL) r.NoError(err) if i < 5 { - log.Debugf("pauseChannel <- %d", 0) + log.Debug().Msgf("pauseChannel <- %d", 0) pauseChannel <- 0 } else { - log.Debugf("pauseChannel <- %d", pause/i) + log.Debug().Msgf("pauseChannel <- %d", pause/i) pauseChannel <- pause / i } startTime := time.Now() out, err := env.DockerExecOut("clickhouse-backup", "bash", "-ce", "LOG_LEVEL=debug CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup create --table freeze_not_exists.freeze_not_exists "+testBackupName) - log.Debug(out) + log.Debug().Msg(out) if (err != nil && (strings.Contains(out, "can't freeze") || strings.Contains(out, "no tables for backup"))) || (err == nil && !strings.Contains(out, "can't freeze")) { parseTime := func(line string) time.Time { - parsedTime, err := time.Parse("2006/01/02 15:04:05.999999", line[:26]) + parsedTime, err := time.Parse("2006-01-02 15:04:05.999", line[:23]) if err != nil { r.Failf("Error parsing time", "%s, : %v", line, err) } @@ -1419,7 +1427,7 @@ func TestSkipNotExistsTable(t *testing.T) { if strings.Contains(out, "code: 60") && err == nil { freezeErrorHandled = true - log.Debug("CODE 60 catched") + log.Debug().Msg("CODE 60 catched") <-resumeChannel env.DockerExecNoError(r, "clickhouse-backup", "bash", "-ec", "CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/config-s3.yml clickhouse-backup delete local "+testBackupName) break @@ -1437,11 +1445,11 @@ func TestSkipNotExistsTable(t *testing.T) { wg.Done() }() for pause := range pauseChannel { - log.Debugf("%d <- pauseChannel", pause) + log.Debug().Msgf("%d <- pauseChannel", pause) if pause > 0 { pauseStart := time.Now() time.Sleep(time.Duration(pause) * time.Nanosecond) - log.Debugf("pause=%s pauseStart=%s", time.Duration(pause).String(), pauseStart.String()) + log.Debug().Msgf("pause=%s pauseStart=%s", time.Duration(pause).String(), pauseStart.String()) err = env.ch.DropTable(clickhouse.Table{Database: "freeze_not_exists", Name: "freeze_not_exists"}, ifNotExistsCreateSQL, "", false, chVersion, "") r.NoError(err) } @@ -1454,10 +1462,10 @@ func TestSkipNotExistsTable(t *testing.T) { if isAtomic, err := env.ch.IsAtomic("freeze_not_exists"); err == nil && isAtomic { dropDbSQL += " SYNC" } - // env.queryWithNoError(r, dropDbSQL) + env.queryWithNoError(r, dropDbSQL) err = env.ch.Query(dropDbSQL) if err != nil { - env.ch.Log.Errorf("%s error: %v", dropDbSQL, err) + log.Error().Msgf("%s error: %v", dropDbSQL, err) } r.NoError(err) t.Log("TestSkipNotExistsTable DONE, ALL OK") @@ -1821,7 +1829,7 @@ func TestCheckSystemPartsColumns(t *testing.T) { if err != nil { errStr := strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "code: 524") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) - log.Debugf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + log.Debug().Msgf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } env.queryWithNoError(r, "INSERT INTO "+t.Name()+".test_system_parts_columns SELECT today() - INTERVAL number DAY, number FROM numbers(10)") r.Error(env.DockerExec("clickhouse-backup", "clickhouse-backup", "create", "test_system_parts_columns")) @@ -2024,7 +2032,7 @@ func TestRestoreMutationInProgress(t *testing.T) { if err != nil { errStr := strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 341") || strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout"), "UNKNOWN ERROR: %s", err.Error()) - log.Debugf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + log.Debug().Msgf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } attrs := make([]struct { @@ -2042,7 +2050,7 @@ func TestRestoreMutationInProgress(t *testing.T) { r.NotEqual(nil, err) errStr = strings.ToLower(err.Error()) r.True(strings.Contains(errStr, "code: 517") || strings.Contains(errStr, "timeout")) - log.Debugf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) + log.Debug().Msgf("%s RETURN EXPECTED ERROR=%#v", mutationSQL, err) } env.DockerExecNoError(r, "clickhouse", "clickhouse", "client", "-q", "SELECT * FROM system.mutations WHERE is_done=0 FORMAT Vertical") @@ -2050,11 +2058,11 @@ func TestRestoreMutationInProgress(t *testing.T) { out, createErr := env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "create", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") r.NotEqual(createErr, nil) r.Contains(out, "have inconsistent data types") - log.Debug(out) + log.Debug().Msg(out) // backup without check consistency out, createErr = env.DockerExecOut("clickhouse-backup", "clickhouse-backup", "create", "-c", "/etc/clickhouse-backup/config-s3.yml", "--skip-check-parts-columns", "--tables="+t.Name()+".test_restore_mutation_in_progress", "test_restore_mutation_in_progress") - log.Debug(out) + log.Debug().Msg(out) r.NoError(createErr) r.NotContains(out, "have inconsistent data types") @@ -2104,7 +2112,7 @@ func TestRestoreMutationInProgress(t *testing.T) { if expectedSelectError != "" { r.Error(selectErr) r.Contains(strings.ToLower(selectErr.Error()), expectedSelectError) - log.Debugf("%s RETURN EXPECTED ERROR=%#v", selectSQL, selectErr) + log.Debug().Msgf("%s RETURN EXPECTED ERROR=%#v", selectSQL, selectErr) } else { r.NoError(selectErr) } @@ -2201,7 +2209,7 @@ func TestFIPS(t *testing.T) { env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete local "+fipsBackupName) env.DockerExecNoError(r, "clickhouse", "bash", "-ce", "clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml delete remote "+fipsBackupName) - log.Debug("Run `clickhouse-backup-fips server` in background") + log.Debug().Msg("Run `clickhouse-backup-fips server` in background") env.DockerExecBackgroundNoError(r, "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") time.Sleep(1 * time.Second) @@ -2224,7 +2232,7 @@ func TestFIPS(t *testing.T) { testTLSCerts := func(certType, keyLength, curveName string, cipherList ...string) { generateCerts(certType, keyLength, curveName) - log.Debugf("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) + log.Debug().Msgf("Run `clickhouse-backup-fips server` in background for %s %s %s", certType, keyLength, curveName) env.DockerExecBackgroundNoError(r, "clickhouse", "bash", "-ce", "AWS_USE_FIPS_ENDPOINT=true clickhouse-backup-fips -c /etc/clickhouse-backup/config-s3-fips.yml server &>>/tmp/clickhouse-backup-server-fips.log") time.Sleep(1 * time.Second) @@ -2283,32 +2291,32 @@ func TestRestoreMapping(t *testing.T) { env.queryWithNoError(r, "CREATE VIEW database1.v1 AS SELECT * FROM database1.t1") env.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2022-01-01 00:00:00', number FROM numbers(10)") - log.Debug("Create backup") + log.Debug().Msg("Create backup") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "create", testBackupName) - log.Debug("Restore schema") + log.Debug().Msg("Restore schema") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--schema", "--rm", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName) - log.Debug("Check result database1") + log.Debug().Msg("Check result database1") env.queryWithNoError(r, "INSERT INTO database1.t1 SELECT '2023-01-01 00:00:00', number FROM numbers(10)") checkRecordset(1, 20, "SELECT count() FROM database1.t1") checkRecordset(1, 20, "SELECT count() FROM database1.d1") checkRecordset(1, 20, "SELECT count() FROM database1.mv1") checkRecordset(1, 20, "SELECT count() FROM database1.v1") - log.Debug("Drop database1") + log.Debug().Msg("Drop database1") r.NoError(env.dropDatabase("database1")) - log.Debug("Restore data") + log.Debug().Msg("Restore data") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-database-mapping.yml", "restore", "--data", "--restore-database-mapping", "database1:database-2", "--restore-table-mapping", "t1:t3,t2:t4,d1:d2,mv1:mv2,v1:v2", "--tables", "database1.*", testBackupName) - log.Debug("Check result database-2") + log.Debug().Msg("Check result database-2") checkRecordset(1, 10, "SELECT count() FROM `database-2`.t3") checkRecordset(1, 10, "SELECT count() FROM `database-2`.d2") checkRecordset(1, 10, "SELECT count() FROM `database-2`.mv2") checkRecordset(1, 10, "SELECT count() FROM `database-2`.v2") - log.Debug("Check database1 not exists") + log.Debug().Msg("Check database1 not exists") checkRecordset(1, 0, "SELECT count() FROM system.databases WHERE name='database1' SETTINGS empty_result_for_aggregation_by_empty_set=0") fullCleanup(t, r, env, []string{testBackupName}, []string{"local"}, databaseList, true, true, "config-database-mapping.yml") @@ -2369,7 +2377,7 @@ func TestPostgreSQLMaterialized(t *testing.T) { if count > 0 { break } - log.Debugf("ch_pgsql_repl contains %d tables, wait 5 seconds", count) + log.Debug().Msgf("ch_pgsql_repl contains %d tables, wait 5 seconds", count) time.Sleep(5 * time.Second) } @@ -2410,31 +2418,31 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora incrementBackupName2 := fmt.Sprintf("%s_increment2_%d", t.Name(), rand.Int()) databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary} tablesPattern := fmt.Sprintf("*_%s.*", t.Name()) - log.Debug("Clean before start") + log.Debug().Msg("Clean before start") fullCleanup(t, r, env, []string{fullBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig) testData := generateTestData(t, r, env, remoteStorageType, defaultTestData) - log.Debug("Create backup") + log.Debug().Msg("Create backup") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, fullBackupName) incrementData := generateIncrementTestData(t, r, env, remoteStorageType, defaultIncrementData, 1) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--tables", tablesPattern, incrementBackupName) - log.Debug("Upload full") + log.Debug().Msg("Upload full") uploadCmd := fmt.Sprintf("%s_COMPRESSION_FORMAT=zstd CLICKHOUSE_BACKUP_CONFIG=/etc/clickhouse-backup/%s clickhouse-backup upload --resume %s", remoteStorageType, backupConfig, fullBackupName) env.checkResumeAlreadyProcessed(uploadCmd, fullBackupName, "upload", r, remoteStorageType) // https://github.com/Altinity/clickhouse-backup/pull/900 if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.8") >= 0 { - log.Debug("create --diff-from-remote backup") + log.Debug().Msg("create --diff-from-remote backup") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "create", "--diff-from-remote", fullBackupName, "--tables", tablesPattern, incrementBackupName2) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "upload", incrementBackupName2) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "remote", incrementBackupName2) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName2) } - log.Debug("Upload increment") + log.Debug().Msg("Upload increment") uploadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s upload %s --diff-from-remote %s --resume", backupConfig, incrementBackupName, fullBackupName) env.checkResumeAlreadyProcessed(uploadCmd, incrementBackupName, "upload", r, remoteStorageType) @@ -2445,7 +2453,7 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) r.NoError(err) r.Equal(2, len(strings.Split(strings.Trim(out, " \t\r\n"), "\n")), "expect '2' backups exists in backup directory") - log.Debug("Delete backup") + log.Debug().Msg("Delete backup") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName) env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", incrementBackupName) out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "ls -lha "+backupDir+" | grep "+t.Name()) @@ -2454,21 +2462,21 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora dropDatabasesFromTestDataDataSet(t, r, env, databaseList) - log.Debug("Download") + log.Debug().Msg("Download") replaceStorageDiskNameForReBalance(r, env, remoteStorageType, false) downloadCmd := fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, fullBackupName) env.checkResumeAlreadyProcessed(downloadCmd, fullBackupName, "download", r, remoteStorageType) - log.Debug("Restore schema") + log.Debug().Msg("Restore schema") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", fullBackupName) - log.Debug("Restore data") + log.Debug().Msg("Restore data") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--data", fullBackupName) - log.Debug("Full restore with rm") + log.Debug().Msg("Full restore with rm") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--rm", fullBackupName) - log.Debug("Check data") + log.Debug().Msg("Check data") for i := range testData { if testData[i].CheckDatabaseOnly { r.NoError(env.checkDatabaseEngine(t, testData[i])) @@ -2482,17 +2490,17 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora // test increment dropDatabasesFromTestDataDataSet(t, r, env, databaseList) - log.Debug("Delete backup") + log.Debug().Msg("Delete backup") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "delete", "local", fullBackupName) - log.Debug("Download increment") + log.Debug().Msg("Download increment") downloadCmd = fmt.Sprintf("clickhouse-backup -c /etc/clickhouse-backup/%s download --resume %s", backupConfig, incrementBackupName) env.checkResumeAlreadyProcessed(downloadCmd, incrementBackupName, "download", r, remoteStorageType) - log.Debug("Restore") + log.Debug().Msg("Restore") env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/"+backupConfig, "restore", "--schema", "--data", incrementBackupName) - log.Debug("Check increment data") + log.Debug().Msg("Check increment data") for i := range testData { testDataItem := testData[i] if isTableSkip(env, testDataItem, true) || testDataItem.IsDictionary { @@ -2511,7 +2519,7 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora } // test end - log.Debug("Clean after finish") + log.Debug().Msg("Clean after finish") // during download increment, partially downloaded full will also clean fullCleanup(t, r, env, []string{incrementBackupName}, []string{"local"}, nil, true, false, backupConfig) fullCleanup(t, r, env, []string{fullBackupName, incrementBackupName}, []string{"remote"}, databaseList, true, true, backupConfig) @@ -2603,7 +2611,7 @@ func replaceStorageDiskNameForReBalance(r *require.Assertions, env *TestEnvironm } func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, backupConfig string) { - log.Debug("testBackupSpecifiedPartitions started") + log.Debug().Msg("testBackupSpecifiedPartitions started") var err error var out string var result, expectedCount uint64 @@ -2662,7 +2670,7 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes } else { out, err = env.DockerExecOut("clickhouse-backup", "bash", "-ce", "clickhouse-backup -c /etc/clickhouse-backup/"+backupConfig+" restore --data --partitions=\"(0,'2022-01-02'),(0,'2022-01-03')\" "+fullBackupName) } - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) r.Contains(out, "DROP PARTITION") // we just replace data in exists table @@ -2763,7 +2771,7 @@ func testBackupSpecifiedPartitions(t *testing.T, r *require.Assertions, env *Tes if err = env.dropDatabase(dbName); err != nil { t.Fatal(err) } - log.Debug("testBackupSpecifiedPartitions finish") + log.Debug().Msg("testBackupSpecifiedPartitions finish") } func (env *TestEnvironment) checkResumeAlreadyProcessed(backupCmd, testBackupName, resumeKind string, r *require.Assertions, remoteStorageType string) { @@ -2774,7 +2782,7 @@ func (env *TestEnvironment) checkResumeAlreadyProcessed(backupCmd, testBackupNam backupCmd = fmt.Sprintf("%s; cat /var/lib/clickhouse/backup/%s/%s.state; %s", backupCmd, testBackupName, resumeKind, backupCmd) } out, err := env.DockerExecOut("clickhouse-backup", "bash", "-xce", backupCmd) - log.Debug(out) + log.Debug().Msg(out) r.NoError(err) if strings.Contains(backupCmd, "--resume") { r.Contains(out, "already processed") @@ -2806,7 +2814,7 @@ func fullCleanup(t *testing.T, r *require.Assertions, env *TestEnvironment, back } func generateTestData(t *testing.T, r *require.Assertions, env *TestEnvironment, remoteStorageType string, testData []TestDataStruct) []TestDataStruct { - log.Debugf("Generate test data %s with _%s suffix", remoteStorageType, t.Name()) + log.Debug().Msgf("Generate test data %s with _%s suffix", remoteStorageType, t.Name()) testData = generateTestDataWithDifferentStoragePolicy(remoteStorageType, 0, 5, testData) for _, data := range testData { if isTableSkip(env, data, false) { @@ -2824,7 +2832,7 @@ func generateTestData(t *testing.T, r *require.Assertions, env *TestEnvironment, } func generateTestDataWithDifferentStoragePolicy(remoteStorageType string, offset, rowsCount int, testData []TestDataStruct) []TestDataStruct { - log.Debugf("generateTestDataWithDifferentStoragePolicy remoteStorageType=%s", remoteStorageType) + log.Debug().Msgf("generateTestDataWithDifferentStoragePolicy remoteStorageType=%s", remoteStorageType) for databaseName, databaseEngine := range map[string]string{dbNameOrdinary: "Ordinary", dbNameAtomic: "Atomic"} { testDataWithStoragePolicy := TestDataStruct{ Database: databaseName, DatabaseEngine: databaseEngine, @@ -2890,7 +2898,7 @@ func generateTestDataWithDifferentStoragePolicy(remoteStorageType string, offset } func generateIncrementTestData(t *testing.T, r *require.Assertions, ch *TestEnvironment, remoteStorageType string, incrementData []TestDataStruct, incrementNumber int) []TestDataStruct { - log.Debugf("Generate increment test data for %s", remoteStorageType) + log.Debug().Msgf("Generate increment test data for %s", remoteStorageType) incrementData = generateTestDataWithDifferentStoragePolicy(remoteStorageType, 5*incrementNumber, 5, incrementData) for _, data := range incrementData { if isTableSkip(ch, data, false) { @@ -2902,7 +2910,7 @@ func generateIncrementTestData(t *testing.T, r *require.Assertions, ch *TestEnvi } func dropDatabasesFromTestDataDataSet(t *testing.T, r *require.Assertions, ch *TestEnvironment, databaseList []string) { - log.Debug("Drop all databases") + log.Debug().Msg("Drop all databases") for _, db := range databaseList { db = db + "_" + t.Name() r.NoError(ch.dropDatabase(db)) @@ -2917,17 +2925,17 @@ func (env *TestEnvironment) connectWithWait(r *require.Assertions, sleepBefore, r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", append(env.GetDefaultComposeCommand(), "logs", "clickhouse")...)) out, dockerErr := env.DockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "'SELECT version()'") r.NoError(dockerErr) - env.ch.Log.Debug(out) + log.Debug().Msg(out) r.NoError(err) } if err != nil { r.NoError(utils.ExecCmd(context.Background(), 180*time.Second, "docker", "ps", "-a")) if out, dockerErr := env.DockerExecOut("clickhouse", "clickhouse", "client", "--echo", "-q", "SELECT version()"); dockerErr == nil { - log.Debug(out) + log.Debug().Msg(out) } else { - log.Warn(out) + log.Info().Msg(out) } - log.Warnf("clickhouse not ready %v, wait %v seconds", err, (pollInterval).Seconds()) + log.Warn().Msgf("clickhouse not ready %v, wait %v seconds", err, (pollInterval).Seconds()) time.Sleep(pollInterval) } else { if compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "20.8") > 0 { @@ -2936,7 +2944,7 @@ func (env *TestEnvironment) connectWithWait(r *require.Assertions, sleepBefore, if err == nil { break } else { - log.Warnf("mysql not ready %v, wait %d seconds", err, i) + log.Warn().Msgf("mysql not ready %v, wait %d seconds", err, i) time.Sleep(time.Second * time.Duration(i)) } } else { @@ -2949,13 +2957,13 @@ func (env *TestEnvironment) connectWithWait(r *require.Assertions, sleepBefore, func (env *TestEnvironment) connect(timeOut string) error { portOut, err := utils.ExecCmdOut(context.Background(), 10*time.Second, "docker", append(env.GetDefaultComposeCommand(), "port", "clickhouse", "9000")...) if err != nil { - log.Error(portOut) - log.Fatalf("can't get port for clickhouse: %v", err) + log.Error().Msg(portOut) + log.Fatal().Msgf("can't get port for clickhouse: %v", err) } hostAndPort := strings.Split(strings.Trim(portOut, " \r\n\t"), ":") if len(hostAndPort) < 1 { - log.Error(portOut) - log.Fatalf("invalid port for clickhouse: %v", err) + log.Error().Msg(portOut) + log.Fatal().Msgf("invalid port for clickhouse: %v", err) } port, err := strconv.Atoi(hostAndPort[1]) if err != nil { @@ -2967,7 +2975,6 @@ func (env *TestEnvironment) connect(timeOut string) error { Port: uint(port), Timeout: timeOut, }, - Log: log.WithField("logger", "integration-test"), } for i := 0; i < 3; i++ { err = env.ch.Connect() @@ -3051,7 +3058,7 @@ func (env *TestEnvironment) createTestSchema(t *testing.T, data TestDataStruct, substitution := "MergeTree() PARTITION BY toYYYYMMDD($1) ORDER BY $2 SETTINGS index_granularity=$3" createSQL = mergeTreeOldSyntax.ReplaceAllString(createSQL, substitution) } else { - log.Fatalf("Wrong %s, matches=%#v", createSQL, matches) + log.Fatal().Stack().Msgf("Wrong %s, matches=%#v", createSQL, matches) } } if !data.IsFunction { @@ -3077,7 +3084,7 @@ func (env *TestEnvironment) createTestData(t *testing.T, data TestDataStruct) er return nil } insertSQL := fmt.Sprintf("INSERT INTO `%s`.`%s`", data.Database, data.Name) - log.Debug(insertSQL) + log.Debug().Msg(insertSQL) batch, err := env.ch.GetConn().PrepareBatch(context.Background(), insertSQL) if err != nil { @@ -3086,7 +3093,7 @@ func (env *TestEnvironment) createTestData(t *testing.T, data TestDataStruct) er for _, row := range data.Rows { insertData := make([]interface{}, len(data.Fields)) - log.Debugf("VALUES %v", row) + log.Debug().Msgf("VALUES %v", row) for idx, field := range data.Fields { insertData[idx] = row[field] } @@ -3116,7 +3123,7 @@ func (env *TestEnvironment) checkData(t *testing.T, r *require.Assertions, data assert.NotNil(t, data.Rows) data.Database += "_" + t.Name() data.Name += "_" + t.Name() - log.Debugf("Check '%d' rows in '%s.%s'\n", len(data.Rows), data.Database, data.Name) + log.Debug().Msgf("Check '%d' rows in '%s.%s'\n", len(data.Rows), data.Database, data.Name) selectSQL := fmt.Sprintf("SELECT * FROM `%s`.`%s` ORDER BY `%s`", data.Database, data.Name, strings.Replace(data.OrderBy, "{test}", t.Name(), -1)) if data.IsFunction && compareVersion(os.Getenv("CLICKHOUSE_VERSION"), "21.12") == -1 { @@ -3125,7 +3132,7 @@ func (env *TestEnvironment) checkData(t *testing.T, r *require.Assertions, data if data.IsFunction { selectSQL = fmt.Sprintf("SELECT %s(number, number+1) AS test_result FROM numbers(%d)", data.Name, len(data.Rows)) } - log.Debug(selectSQL) + log.Debug().Msg(selectSQL) rows, err := env.ch.GetConn().Query(context.Background(), selectSQL) if err != nil { return err @@ -3194,7 +3201,7 @@ func (env *TestEnvironment) checkDatabaseEngine(t *testing.T, data TestDataStruc func (env *TestEnvironment) queryWithNoError(r *require.Assertions, query string, args ...interface{}) { err := env.ch.Query(query, args...) if err != nil { - env.ch.Log.Errorf("queryWithNoError error: %v", err) + log.Error().Err(err).Msgf("queryWithNoError(%s) error", query) } r.NoError(err) } @@ -3208,7 +3215,7 @@ func (env *TestEnvironment) DockerExecBackgroundNoError(r *require.Assertions, c func (env *TestEnvironment) DockerExecBackground(container string, cmd ...string) error { out, err := env.DockerExecBackgroundOut(container, cmd...) - log.Debug(out) + log.Debug().Msg(out) return err } @@ -3228,12 +3235,15 @@ func (env *TestEnvironment) GetExecDockerCommand(container string) []string { func (env *TestEnvironment) DockerExecNoError(r *require.Assertions, container string, cmd ...string) { out, err := env.DockerExecOut(container, cmd...) + if err == nil { + log.Debug().Msg(out) + } r.NoError(err, "%s\n\n%s\n[ERROR]\n%v", strings.Join(append(env.GetExecDockerCommand(container), cmd...), " "), out, err) } func (env *TestEnvironment) DockerExec(container string, cmd ...string) error { out, err := env.DockerExecOut(container, cmd...) - log.Debug(out) + log.Debug().Msg(out) return err } @@ -3246,9 +3256,9 @@ func (env *TestEnvironment) DockerCP(src, dst string) error { ctx, cancel := context.WithTimeout(context.Background(), 180*time.Second) dcmd := append(env.GetDefaultComposeCommand(), "cp", src, dst) - log.Debugf("docker %s", strings.Join(dcmd, " ")) + log.Debug().Msgf("docker %s", strings.Join(dcmd, " ")) out, err := exec.CommandContext(ctx, "docker", dcmd...).CombinedOutput() - log.Debug(string(out)) + log.Debug().Msgf(string(out)) cancel() return err }