From b7d04a4f0916ea05821c058b126b431eb03b89fd Mon Sep 17 00:00:00 2001 From: Prateek Malhotra Date: Fri, 8 Sep 2017 19:17:30 -0400 Subject: [PATCH] Fixed race bugs. Added JSON output option. Added filters for listing backups. More tests. --- backends/gcs_backend.go | 1 + backup/backup.go | 34 ++++++- backup/list.go | 72 ++++++++++---- backup/restore.go | 18 ++-- cmd/list.go | 60 +++++++++++- cmd/receive.go | 4 +- cmd/root.go | 2 + cmd/version.go | 28 +++++- helpers/log.go | 3 + helpers/volumeinfo.go | 2 + integration_test.go | 211 +++++++++++++++++++++++++++++++++------- 11 files changed, 370 insertions(+), 65 deletions(-) diff --git a/backends/gcs_backend.go b/backends/gcs_backend.go index ca7b1c2..db7def2 100644 --- a/backends/gcs_backend.go +++ b/backends/gcs_backend.go @@ -165,6 +165,7 @@ func (g *GoogleCloudStorageBackend) Upload(ctx context.Context, vol *helpers.Vol if _, err := io.Copy(w, vol); err != nil { w.Close() helpers.AppLogger.Debugf("gs backend: Error while uploading volume %s - %v", vol.ObjectName, err) + return err } return w.Close() diff --git a/backup/backup.go b/backup/backup.go index c8fc770..f1a24e6 100644 --- a/backup/backup.go +++ b/backup/backup.go @@ -46,7 +46,8 @@ import ( ) var ( - ErrNoOp = errors.New("nothing new to sync") + ErrNoOp = errors.New("nothing new to sync") + manifestmutex sync.Mutex ) // ProcessSmartOptions will compute the snapshots to use @@ -315,7 +316,9 @@ func Backup(pctx context.Context, jobInfo *helpers.JobInfo) error { if !vol.IsManifest { helpers.AppLogger.Debugf("Volume %s has finished the entire pipeline.", vol.ObjectName) helpers.AppLogger.Debugf("Adding %s to the manifest volume list.", vol.ObjectName) + manifestmutex.Lock() jobInfo.Volumes = append(jobInfo.Volumes, vol) + manifestmutex.Unlock() // Write a manifest file and save it locally in order to resume later manifestVol, err := saveManifest(ctx, jobInfo, false) if err != nil { @@ -347,8 +350,9 @@ func Backup(pctx context.Context, jobInfo *helpers.JobInfo) error { // TODO: How to incorporate contexts in this go routine? maniwg.Wait() // Wait until the ZFS send command has completed and all volumes have been uploaded to all backends. helpers.AppLogger.Infof("All volumes dispatched in pipeline, finalizing manifest file.") - + manifestmutex.Lock() jobInfo.EndTime = time.Now() + manifestmutex.Unlock() manifestVol, err := saveManifest(ctx, jobInfo, true) if err != nil { return err @@ -364,7 +368,21 @@ func Backup(pctx context.Context, jobInfo *helpers.JobInfo) error { } totalWrittenBytes := jobInfo.TotalBytesWritten() - helpers.AppLogger.Noticef("Done.\n\tTotal ZFS Stream Bytes: %d (%s)\n\tTotal Bytes Written: %d (%s)\n\tElapsed Time: %v\n\tTotal Files Uploaded: %d", jobInfo.ZFSStreamBytes, humanize.IBytes(jobInfo.ZFSStreamBytes), totalWrittenBytes, humanize.IBytes(totalWrittenBytes), time.Since(jobInfo.StartTime), len(jobInfo.Volumes)+1) + if helpers.JSONOutput { + var doneOutput = struct { + TotalZFSBytes uint64 + TotalBackupBytes uint64 + ElapsedTime time.Duration + FilesUploaded int + }{jobInfo.ZFSStreamBytes, totalWrittenBytes, time.Since(jobInfo.StartTime), len(jobInfo.Volumes) + 1} + if j, jerr := json.Marshal(doneOutput); jerr != nil { + helpers.AppLogger.Errorf("could not ouput json due to error - %v", jerr) + } else { + fmt.Fprintf(helpers.Stdout, "%s", string(j)) + } + } else { + fmt.Fprintf(helpers.Stdout, "Done.\n\tTotal ZFS Stream Bytes: %d (%s)\n\tTotal Bytes Written: %d (%s)\n\tElapsed Time: %v\n\tTotal Files Uploaded: %d", jobInfo.ZFSStreamBytes, humanize.IBytes(jobInfo.ZFSStreamBytes), totalWrittenBytes, humanize.IBytes(totalWrittenBytes), time.Since(jobInfo.StartTime), len(jobInfo.Volumes)+1) + } helpers.AppLogger.Debugf("Cleaning up resources...") @@ -378,6 +396,8 @@ func Backup(pctx context.Context, jobInfo *helpers.JobInfo) error { } func saveManifest(ctx context.Context, j *helpers.JobInfo, final bool) (*helpers.VolumeInfo, error) { + manifestmutex.Lock() + defer manifestmutex.Unlock() sort.Sort(helpers.ByVolumeNumber(j.Volumes)) // Setup Manifest File @@ -435,7 +455,7 @@ func sendStream(ctx context.Context, j *helpers.JobInfo, c chan<- *helpers.Volum skipBytes, volNum := j.TotalBytesStreamedAndVols() lastTotalBytes = skipBytes for { - // Skipy byes if we are resuming + // Skip bytes if we are resuming if skipBytes > 0 { helpers.AppLogger.Debugf("Want to skip %d bytes.", skipBytes) written, serr := io.CopyN(ioutil.Discard, counter, int64(skipBytes)) @@ -525,7 +545,9 @@ func sendStream(ctx context.Context, j *helpers.JobInfo, c chan<- *helpers.Volum } }() + manifestmutex.Lock() j.ZFSCommandLine = strings.Join(cmd.Args, " ") + manifestmutex.Unlock() // Wait for the command to finish err = group.Wait() @@ -534,7 +556,9 @@ func sendStream(ctx context.Context, j *helpers.JobInfo, c chan<- *helpers.Volum return err } helpers.AppLogger.Infof("zfs send completed without error") + manifestmutex.Lock() j.ZFSStreamBytes = counter.Count() + manifestmutex.Unlock() return nil } @@ -584,8 +608,10 @@ func tryResume(ctx context.Context, j *helpers.JobInfo) error { return fmt.Errorf("option mismatch") } + manifestmutex.Lock() j.Volumes = originalManifest.Volumes j.StartTime = originalManifest.StartTime + manifestmutex.Unlock() helpers.AppLogger.Infof("Will be resuming previous backup attempt.") } return nil diff --git a/backup/list.go b/backup/list.go index 029fac4..6a0415d 100644 --- a/backup/list.go +++ b/backup/list.go @@ -28,6 +28,7 @@ import ( "path/filepath" "sort" "strings" + "time" "github.com/someone1/zfsbackup-go/helpers" ) @@ -36,7 +37,7 @@ import ( // and then read and output the manifest information describing the backup sets // found in the target destination. // TODO: Group by volume name? -func List(pctx context.Context, jobInfo *helpers.JobInfo) error { +func List(pctx context.Context, jobInfo *helpers.JobInfo, startswith string, before, after time.Time) error { ctx, cancel := context.WithCancel(pctx) defer cancel() @@ -68,28 +69,65 @@ func List(pctx context.Context, jobInfo *helpers.JobInfo) error { return derr } - var output []string - output = append(output, fmt.Sprintf("Found %d backup sets:\n", len(decodedManifests))) + // Filter Manifests to only results we care about + filteredResults := decodedManifests[:0] for _, manifest := range decodedManifests { - output = append(output, manifest.String()) - } - - if len(localOnlyFiles) > 0 { - output = append(output, fmt.Sprintf("There are %d manifests found locally that are not on the target destination.", len(localOnlyFiles))) - localOnlyOuput := []string{"The following manifests were found locally and can be removed using the clean command."} - for _, filename := range localOnlyFiles { - manifestPath := filepath.Join(localCachePath, filename) - decodedManifest, derr := readManifest(ctx, manifestPath, jobInfo) - if derr != nil { - helpers.AppLogger.Warningf("Could not read local only manifest %s due to error %v", manifestPath, derr) + if startswith != "" { + if startswith[len(startswith)-1:] == "*" { + if len(startswith) != 1 && !strings.HasPrefix(manifest.VolumeName, startswith[:len(startswith)-1]) { + continue + } + } else if strings.Compare(startswith, manifest.VolumeName) != 0 { continue } - localOnlyOuput = append(localOnlyOuput, decodedManifest.String()) } - helpers.AppLogger.Infof(strings.Join(localOnlyOuput, "\n")) + + if !before.IsZero() && !manifest.BaseSnapshot.CreationTime.Before(before) { + continue + } + + if !after.IsZero() && !manifest.BaseSnapshot.CreationTime.After(after) { + continue + } + + filteredResults = append(filteredResults, manifest) } - helpers.AppLogger.Noticef(strings.Join(output, "\n")) + decodedManifests = filteredResults + + if !helpers.JSONOutput { + var output []string + + output = append(output, fmt.Sprintf("Found %d backup sets:\n", len(decodedManifests))) + for _, manifest := range decodedManifests { + output = append(output, manifest.String()) + } + + if len(localOnlyFiles) > 0 { + output = append(output, fmt.Sprintf("There are %d manifests found locally that are not on the target destination.", len(localOnlyFiles))) + localOnlyOuput := []string{"The following manifests were found locally and can be removed using the clean command."} + for _, filename := range localOnlyFiles { + manifestPath := filepath.Join(localCachePath, filename) + decodedManifest, derr := readManifest(ctx, manifestPath, jobInfo) + if derr != nil { + helpers.AppLogger.Warningf("Could not read local only manifest %s due to error %v", manifestPath, derr) + continue + } + localOnlyOuput = append(localOnlyOuput, decodedManifest.String()) + } + helpers.AppLogger.Infof(strings.Join(localOnlyOuput, "\n")) + } + fmt.Fprintln(helpers.Stdout, strings.Join(output, "\n")) + } else { + organizedManifests := linkManifests(decodedManifests) + j, jerr := json.Marshal(organizedManifests) + if jerr != nil { + helpers.AppLogger.Errorf("could not marshal results to JSON - %v", jerr) + return jerr + } + + fmt.Fprintln(helpers.Stdout, string(j)) + } return nil } diff --git a/backup/restore.go b/backup/restore.go index 1806e61..92f0eab 100644 --- a/backup/restore.go +++ b/backup/restore.go @@ -333,7 +333,11 @@ func Receive(pctx context.Context, jobInfo *helpers.JobInfo) error { retryconf := backoff.WithContext(be, ctx) operation := func() error { - return processSequence(ctx, sequence, backend, usePipe) + oerr := processSequence(ctx, sequence, backend, usePipe) + if oerr != nil { + helpers.AppLogger.Warningf("error trying to download file %s - %v", sequence.volume.ObjectName, oerr) + } + return oerr } helpers.AppLogger.Debugf("Downloading volume %s.", sequence.volume.ObjectName) @@ -392,7 +396,6 @@ func processSequence(ctx context.Context, sequence downloadSequence, backend bac return err } - defer vol.Close() vol.ObjectName = sequence.volume.ObjectName if usePipe { sequence.c <- vol @@ -408,7 +411,10 @@ func processSequence(ctx context.Context, sequence downloadSequence, backend bac } return err } - vol.Close() + if cerr := vol.Close(); cerr != nil { + helpers.AppLogger.Noticef("Could not close temporary file to download %s due to error - %v.", sequence.volume.ObjectName, cerr) + return cerr + } // Verify the SHA256 Hash, if it doesn't match, ditch it! if vol.SHA256Sum != sequence.volume.SHA256Sum { @@ -419,10 +425,11 @@ func processSequence(ctx context.Context, sequence downloadSequence, backend bac vol.DeleteVolume() return fmt.Errorf("SHA256 hash mismatch for %s, got %s but expected %s", sequence.volume.ObjectName, vol.SHA256Sum, sequence.volume.SHA256Sum) } + helpers.AppLogger.Debugf("Downloaded %s.", sequence.volume.ObjectName) + if !usePipe { sequence.c <- vol } - helpers.AppLogger.Debugf("Downloaded %s.", sequence.volume.ObjectName) return nil } @@ -461,7 +468,6 @@ func receiveStream(ctx context.Context, cmd *exec.Cmd, j *helpers.JobInfo, c <-c // Extract ZFS stream from files and send it to the zfs command group.Go(func() error { defer once.Do(func() { cout.Close() }) - buf := make([]byte, 1024*1024) for { select { case vol, ok := <-c: @@ -474,7 +480,7 @@ func receiveStream(ctx context.Context, cmd *exec.Cmd, j *helpers.JobInfo, c <-c helpers.AppLogger.Errorf("Error while trying to read from volume %s - %v", vol.ObjectName, eerr) return err } - _, eerr = io.CopyBuffer(cout, vol, buf) + _, eerr = io.Copy(cout, vol) if eerr != nil { helpers.AppLogger.Errorf("Error while trying to read from volume %s - %v", vol.ObjectName, eerr) return eerr diff --git a/cmd/list.go b/cmd/list.go index 60deaea..853156a 100644 --- a/cmd/list.go +++ b/cmd/list.go @@ -22,10 +22,20 @@ package cmd import ( "context" + "time" "github.com/spf13/cobra" "github.com/someone1/zfsbackup-go/backup" + "github.com/someone1/zfsbackup-go/helpers" +) + +var ( + startsWith string + beforeStr string + afterStr string + before time.Time + after time.Time ) // listCmd represents the list command @@ -35,13 +45,33 @@ var listCmd = &cobra.Command{ Long: `List all backup sets found at the provided target.`, PreRunE: validateListFlags, RunE: func(cmd *cobra.Command, args []string) error { + if startsWith != "" { + if startsWith[len(startsWith)-1:] == "*" { + helpers.AppLogger.Infof("Listing all backup jobs for volumes starting with %s", startsWith) + } else { + helpers.AppLogger.Infof("Listing all backup jobs for volume %s", startsWith) + } + } + + if !before.IsZero() { + helpers.AppLogger.Infof("Listing all back jobs of snapshots taken before %v", before) + } + + if !after.IsZero() { + helpers.AppLogger.Infof("Listing all back jobs of snapshots taken after %v", after) + } + jobInfo.Destinations = []string{args[0]} - return backup.List(context.Background(), &jobInfo) + return backup.List(context.Background(), &jobInfo, startsWith, before, after) }, } func init() { RootCmd.AddCommand(listCmd) + + listCmd.Flags().StringVar(&startsWith, "volumeName", "", "Filter results to only this volume name, can end with a '*' to match as only a prefix") + listCmd.Flags().StringVar(&beforeStr, "before", "", "Filter results to only this backups before this specified date & time (format: yyyy-MM-ddTHH:mm:ss, parsed in local TZ)") + listCmd.Flags().StringVar(&afterStr, "after", "", "Filter results to only this backups after this specified date & time (format: yyyy-MM-ddTHH:mm:ss, parsed in local TZ)") } func validateListFlags(cmd *cobra.Command, args []string) error { @@ -49,5 +79,33 @@ func validateListFlags(cmd *cobra.Command, args []string) error { cmd.Usage() return errInvalidInput } + + if beforeStr != "" { + parsed, perr := time.ParseInLocation(time.RFC3339[:19], beforeStr, time.Local) + if perr != nil { + helpers.AppLogger.Errorf("could not parse before time '%s' due to error: %v", beforeStr, perr) + return perr + } + before = parsed + } + + if afterStr != "" { + parsed, perr := time.ParseInLocation(time.RFC3339[:19], afterStr, time.Local) + if perr != nil { + helpers.AppLogger.Errorf("could not parse before time '%s' due to error: %v", beforeStr, perr) + return perr + } + after = parsed + } return nil } + +// ResetListJobInfo exists solely for integration testing +func ResetListJobInfo() { + resetRootFlags() + startsWith = "" + beforeStr = "" + afterStr = "" + before = time.Time{} + after = time.Time{} +} diff --git a/cmd/receive.go b/cmd/receive.go index b5c629b..a6c2066 100644 --- a/cmd/receive.go +++ b/cmd/receive.go @@ -40,6 +40,8 @@ var receiveCmd = &cobra.Command{ Long: `receive will restore a snapshot of a ZFS volume similar to how the "zfs recv" command works.`, PreRunE: validateReceiveFlags, RunE: func(cmd *cobra.Command, args []string) error { + helpers.AppLogger.Infof("Limiting the number of active files to %d", jobInfo.MaxFileBuffer) + if jobInfo.AutoRestore { return backup.AutoRestore(context.Background(), &jobInfo) } @@ -64,7 +66,7 @@ func init() { receiveCmd.Flags().StringVar(&jobInfo.Separator, "separator", "|", "the separator to use between object component names (used only for the initial manifest we are looking for).") } -// Exists solely for integration testing +// ResetReceiveJobInfo exists solely for integration testing func ResetReceiveJobInfo() { resetRootFlags() jobInfo.AutoRestore = false diff --git a/cmd/root.go b/cmd/root.go index 716d1dc..06dfe50 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -82,6 +82,7 @@ func init() { RootCmd.PersistentFlags().StringVar(&jobInfo.EncryptTo, "encryptTo", "", "the email of the user to encrypt the data to from the provided public keyring.") RootCmd.PersistentFlags().StringVar(&jobInfo.SignFrom, "signFrom", "", "the email of the user to sign on behalf of from the provided private keyring.") RootCmd.PersistentFlags().StringVar(&helpers.ZFSPath, "zfsPath", "zfs", "the path to the zfs executable.") + RootCmd.PersistentFlags().BoolVar(&helpers.JSONOutput, "jsonOutput", false, "dump results as a JSON string - on success only") passphrase = []byte(os.Getenv("PGP_PASSPHRASE")) } @@ -96,6 +97,7 @@ func resetRootFlags() { jobInfo.EncryptTo = "" jobInfo.SignFrom = "" helpers.ZFSPath = "zfs" + helpers.JSONOutput = false } func processFlags(cmd *cobra.Command, args []string) error { diff --git a/cmd/version.go b/cmd/version.go index 541a6c1..b87b131 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -21,6 +21,7 @@ package cmd import ( + "encoding/json" "fmt" "runtime" @@ -36,7 +37,32 @@ var versionCmd = &cobra.Command{ Long: `This will output the version of zfsbackup in use and information about the runtime and architecture.`, RunE: func(cmd *cobra.Command, args []string) error { - fmt.Fprintf(helpers.Stdout, "\tProgram Name:\t%s\n\tVersion:\tv%s\n\tOS Target:\t%s\n\tArch Target:\t%s\n\tCompiled With:\t%s\n\tGo Version:\t%s\n", helpers.ProgramName, helpers.Version(), runtime.GOOS, runtime.GOARCH, runtime.Compiler, runtime.Version()) + var output string + if helpers.JSONOutput { + j, err := json.Marshal(struct { + Name string + Version string + OS string + Arch string + Compiled string + GoVersion string + }{ + Name: helpers.ProgramName, + Version: helpers.Version(), + OS: runtime.GOOS, + Arch: runtime.GOARCH, + Compiled: runtime.Compiler, + GoVersion: runtime.Version(), + }) + if err != nil { + helpers.AppLogger.Errorf("could not dump version info to JSON - %v", err) + return err + } + output = string(j) + } else { + output = fmt.Sprintf("\tProgram Name:\t%s\n\tVersion:\tv%s\n\tOS Target:\t%s\n\tArch Target:\t%s\n\tCompiled With:\t%s\n\tGo Version:\t%s", helpers.ProgramName, helpers.Version(), runtime.GOOS, runtime.GOARCH, runtime.Compiler, runtime.Version()) + } + fmt.Fprintln(helpers.Stdout, output) return nil }, } diff --git a/helpers/log.go b/helpers/log.go index 7358b6c..e8c5b7b 100644 --- a/helpers/log.go +++ b/helpers/log.go @@ -35,3 +35,6 @@ var AppLogger = logging.MustGetLogger(LogModuleName) // Stdout is where to output standard messaging to var Stdout io.Writer = os.Stdout + +// JSONOutput will signal if we should dump the results to Stdout JSON formatted +var JSONOutput bool = false diff --git a/helpers/volumeinfo.go b/helpers/volumeinfo.go index f3c2f59..d112c05 100644 --- a/helpers/volumeinfo.go +++ b/helpers/volumeinfo.go @@ -457,6 +457,8 @@ func prepareVolume(ctx context.Context, j *JobInfo, pipe bool, isManifest bool) if err != nil { return nil, nil, nil, err } + + // TODO: Signal properly if the process closes prematurely } nameParts := []string{j.VolumeName} diff --git a/integration_test.go b/integration_test.go index 70f09b2..6ecb66d 100644 --- a/integration_test.go +++ b/integration_test.go @@ -22,12 +22,15 @@ package main import ( "bytes" + "encoding/json" "fmt" "os" "os/exec" "strings" "testing" + "time" + "github.com/Azure/azure-sdk-for-go/storage" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws" @@ -39,9 +42,37 @@ import ( "github.com/someone1/zfsbackup-go/helpers" ) -const s3TestBucketName = "s3buckettest" +const s3TestBucketName = "s3integrationbuckettest" +const azureTestBucketName = "azureintegrationbuckettest" +const logLevel = "debug" -func TestIntegration(t *testing.T) { +func setupAzureBucket(t *testing.T) func() { + t.Helper() + if os.Getenv("AZURE_CUSTOM_ENDPOINT") == "" { + t.Skip("No custom Azure Endpoint provided to test against") + } + err := os.Setenv("AZURE_ACCOUNT_NAME", storage.StorageEmulatorAccountName) + if err != nil { + t.Fatalf("could not set environmental variable due to error: %v", err) + } + err = os.Setenv("AZURE_ACCOUNT_KEY", storage.StorageEmulatorAccountKey) + if err != nil { + t.Fatalf("could not set environmental variable due to error: %v", err) + } + + client, cerr := storage.NewClient(storage.StorageEmulatorAccountName, storage.StorageEmulatorAccountKey, os.Getenv("AZURE_CUSTOM_ENDPOINT"), storage.DefaultAPIVersion, false) + if cerr != nil { + t.Fatalf("Error while trying to get create Azure Client: %v", cerr) + } + blobCli := client.GetBlobService() + _, oerr := blobCli.GetContainerReference(azureTestBucketName).CreateIfNotExists(nil) + if oerr != nil { + t.Fatalf("Error while trying to get create Azure Client: %v", oerr) + } + return func() { blobCli.GetContainerReference(azureTestBucketName).DeleteIfExists(nil) } +} + +func setupS3Bucket(t *testing.T) func() { if os.Getenv("AWS_S3_CUSTOM_ENDPOINT") == "" { t.Skip("No custom S3 Endpoint provided to test against") } @@ -67,65 +98,176 @@ func TestIntegration(t *testing.T) { } } - defer client.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(s3TestBucketName), - }) + return func() { + client.DeleteBucket(&s3.DeleteBucketInput{ + Bucket: aws.String(s3TestBucketName), + }) + } +} - t.Run("Version", func(t *testing.T) { - old := helpers.Stdout - buf := bytes.NewBuffer(nil) - helpers.Stdout = buf - defer func() { helpers.Stdout = old }() +func TestVersion(t *testing.T) { + old := helpers.Stdout + buf := bytes.NewBuffer(nil) + helpers.Stdout = buf + defer func() { helpers.Stdout = old }() - os.Args = []string{helpers.ProgramName, "version"} - main() + os.Args = []string{helpers.ProgramName, "version"} + main() - if !strings.Contains(buf.String(), fmt.Sprintf("Version:\tv%s", helpers.Version())) { - t.Fatalf("expected version in version command output, did not recieve one:\n%s", buf.String()) - } + if !strings.Contains(buf.String(), fmt.Sprintf("Version:\tv%s", helpers.Version())) { + t.Fatalf("expected version in version command output, did not recieve one:\n%s", buf.String()) + } - }) + buf.Reset() + os.Args = []string{helpers.ProgramName, "version", "--jsonOutput"} + main() + var jout = struct { + Version string + }{} + if err := json.Unmarshal(buf.Bytes(), &jout); err != nil { + t.Fatalf("expected output to be JSON, got error while trying to decode - %v", err) + } else if jout.Version != helpers.Version() { + t.Fatalf("expected version to be '%s', got '%s' instead", helpers.Version(), jout.Version) + } +} + +func TestIntegration(t *testing.T) { + removeAzureBucket := setupAzureBucket(t) + defer removeAzureBucket() + + removeS3Bucket := setupS3Bucket(t) + defer removeS3Bucket() - bucket := backends.AWSS3BackendPrefix + "://" + s3TestBucketName + s3bucket := fmt.Sprintf("%s://%s", backends.AWSS3BackendPrefix, s3TestBucketName) + azurebucket := fmt.Sprintf("%s://%s", backends.AzureBackendPrefix, azureTestBucketName) + bucket := fmt.Sprintf("%s,%s", s3bucket, azurebucket) + // Azurite doesn't seem to like '|' so making seperator '-' + // Backup Tests t.Run("Backup", func(t *testing.T) { - cmd.RootCmd.SetArgs([]string{"send", "--logLevel", "debug", "tank/data@a", bucket}) + cmd.RootCmd.SetArgs([]string{"send", "--logLevel", logLevel, "--separator", "+", "tank/data@a", bucket}) if err := cmd.RootCmd.Execute(); err != nil { t.Fatalf("error performing backup: %v", err) } cmd.ResetSendJobInfo() - cmd.RootCmd.SetArgs([]string{"send", "--logLevel", "debug", "-i", "tank/data@a", "tank/data@b", bucket}) + cmd.RootCmd.SetArgs([]string{"send", "--logLevel", logLevel, "--separator", "+", "-i", "tank/data@a", "tank/data@b", bucket}) if err := cmd.RootCmd.Execute(); err != nil { t.Fatalf("error performing backup: %v", err) } cmd.ResetSendJobInfo() - cmd.RootCmd.SetArgs([]string{"send", "--logLevel", "debug", "--compressor", "xz", "--compressionLevel", "2", "--increment", "tank/data", bucket}) + cmd.RootCmd.SetArgs([]string{"send", "--logLevel", logLevel, "--separator", "+", "--compressor", "xz", "--compressionLevel", "2", "--increment", "tank/data", bucket}) if err := cmd.RootCmd.Execute(); err != nil { t.Fatalf("error performing backup: %v", err) } cmd.ResetSendJobInfo() - cmd.RootCmd.SetArgs([]string{"send", "--logLevel", "debug", "--increment", "tank/data", bucket}) + cmd.RootCmd.SetArgs([]string{"send", "--logLevel", logLevel, "--separator", "+", "--increment", "tank/data", bucket}) if err := cmd.RootCmd.Execute(); err != backup.ErrNoOp { t.Fatalf("expecting error %v, but got %v instead", backup.ErrNoOp, err) } + + cmd.ResetSendJobInfo() }) - // We pass blank "-i" flags since we are running through multiple executions for testing. - t.Run("Restore", func(t *testing.T) { - cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", "debug", "-F", "tank/data@a", bucket, "tank/data2"}) + var restoreTest = []struct { + backend string + bucket string + target string + }{ + {"AWSS3", s3bucket, "tank/data3"}, + {"Azure", azurebucket, "tank/data2"}, + } + for _, test := range restoreTest { + //t.Logf("Running Test List%s", test.backend) + t.Run(fmt.Sprintf("List%s", test.backend), listWrapper(test.bucket)) + //t.Logf("Running Test Restore%s", test.backend) + t.Run(fmt.Sprintf("Restore%s", test.backend), restoreWrapper(test.bucket, test.target)) + } +} + +func listWrapper(bucket string) func(*testing.T) { + return func(t *testing.T) { + old := helpers.Stdout + buf := bytes.NewBuffer(nil) + helpers.Stdout = buf + defer func() { helpers.Stdout = old }() + + var listTests = []struct { + volumeName string + after time.Time + before time.Time + keys int + entries int + }{ + // volumeName tests + {"", time.Time{}, time.Time{}, 1, 3}, + {"t*", time.Time{}, time.Time{}, 1, 3}, + {"v*", time.Time{}, time.Time{}, 0, 0}, + {"tank/data", time.Time{}, time.Time{}, 1, 3}, + {"tan", time.Time{}, time.Time{}, 0, 0}, + //before Tests + {"", time.Time{}, time.Now(), 1, 3}, + {"", time.Time{}, time.Now().Add(-24 * time.Hour), 0, 0}, + //after Tests + {"", time.Now().Add(-24 * time.Hour), time.Time{}, 1, 3}, + {"", time.Now(), time.Time{}, 0, 0}, + } + + for _, test := range listTests { + opts := []string{"list", "--logLevel", logLevel, "--jsonOutput"} + if test.volumeName != "" { + opts = append(opts, "--volumeName", test.volumeName) + } + if !test.after.IsZero() { + opts = append(opts, "--after", test.after.Format(time.RFC3339[:19])) + } + if !test.before.IsZero() { + opts = append(opts, "--before", test.before.Format(time.RFC3339[:19])) + } + + cmd.RootCmd.SetArgs(append(opts, bucket)) + if err := cmd.RootCmd.Execute(); err != nil { + t.Fatalf("error performing backup: %v", err) + } + cmd.ResetListJobInfo() + + jout := make(map[string][]*helpers.JobInfo) + if err := json.Unmarshal(buf.Bytes(), &jout); err != nil { + t.Fatalf("error parsing json output: %v", err) + } + + if len(jout) != test.keys || len(jout["tank/data"]) != test.entries { + t.Fatalf("expected %d keys and %d entries, got %d keys and %d entries", test.keys, test.entries, len(jout), len(jout["tank/data"])) + } + + if len(jout["tank/data"]) == 3 { + if jout["tank/data"][0].BaseSnapshot.Name != "a" || jout["tank/data"][1].BaseSnapshot.Name != "b" || jout["tank/data"][2].BaseSnapshot.Name != "c" { + t.Fatalf("expected snapshot order a -> b -> c, but got %s -> %s -> %s instead", jout["tank/data"][0].BaseSnapshot.Name, jout["tank/data"][1].BaseSnapshot.Name, jout["tank/data"][2].BaseSnapshot.Name) + } + } + + buf.Reset() + } + } +} + +func restoreWrapper(bucket, target string) func(*testing.T) { + return func(t *testing.T) { + cmd.ResetReceiveJobInfo() + + cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", logLevel, "--separator", "+", "-F", "tank/data@a", bucket, target}) if err := cmd.RootCmd.Execute(); err != nil { t.Fatalf("error performing receive: %v", err) } cmd.ResetReceiveJobInfo() - cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", "debug", "-F", "-i", "tank/data@a", "tank/data@b", bucket, "tank/data2"}) + cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", logLevel, "--separator", "+", "-F", "-i", "tank/data@a", "tank/data@b", bucket, target}) if err := cmd.RootCmd.Execute(); err != nil { t.Fatalf("error performing receive: %v", err) } @@ -133,30 +275,29 @@ func TestIntegration(t *testing.T) { cmd.ResetReceiveJobInfo() os.RemoveAll("~/.zfsbackup") - cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", "debug", "-F", "--auto", "tank/data", bucket, "tank/data2"}) + cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", logLevel, "--separator", "+", "-F", "--auto", "tank/data", bucket, target}) if err := cmd.RootCmd.Execute(); err != nil { t.Fatalf("error performing receive: %v", err) } - diffCmd := exec.Command("diff", "-rq", "/tank/data", "/tank/data2") + cmd.ResetReceiveJobInfo() + + diffCmd := exec.Command("diff", "-rq", "/tank/data", "/"+target) err := diffCmd.Run() if err != nil { - t.Fatalf("unexpected difference comparing the restored backup data2: %v", err) + t.Fatalf("unexpected difference comparing the restored backup %s: %v", target, err) } - cmd.ResetReceiveJobInfo() - // Re-enable test when we get ZoL >= v0.7.0-rc5 on travis ci VM - // cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", "debug", "-F", "--auto", "-o", "origin=tank/data@b", "tank/data", bucket, "tank/data3"}) + // cmd.RootCmd.SetArgs([]string{"receive", "--logLevel", logLevel, "--separator", "+", "-F", "--auto", "-o", "origin=tank/data@b", "tank/data", bucket, target+"origin"}) // if err = cmd.RootCmd.Execute(); err != nil { // t.Fatalf("error performing receive: %v", err) // } - // diffCmd = exec.Command("diff", "-rq", "/tank/data", "/tank/data3") + // diffCmd = exec.Command("diff", "-rq", "/tank/data", target+"origin") // err = diffCmd.Run() // if err != nil { - // t.Fatalf("unexpected difference comparing the restored backup data3: %v", err) + // t.Fatalf("unexpected difference comparing the restored backup %sorigin: %v", err, target) // } - }) - + } }