diff --git a/go.mod b/go.mod index 1cec07a22..bbe16632a 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/kubernetes-incubator/external-storage v0.20.4-openstorage-rc7 - github.com/libopenstorage/stork v1.4.1-0.20240506185157-275e4369d062 + github.com/libopenstorage/stork v1.4.1-0.20240513102605-2340238c7664 github.com/portworx/pxc v0.33.0 github.com/portworx/sched-ops v1.20.4-rc1.0.20240424153814-f3083bdb4578 github.com/sirupsen/logrus v1.9.3 diff --git a/go.sum b/go.sum index fc8ff9d46..20ff0a4db 100644 --- a/go.sum +++ b/go.sum @@ -3362,8 +3362,8 @@ github.com/libopenstorage/stork v1.4.1-0.20230502135851-9cacb19e1df5/go.mod h1:R github.com/libopenstorage/stork v1.4.1-0.20230519043154-cbc10dffaf19/go.mod h1:Xm4DHoViynFXMQKBXGj3IkA77LY2RBFkNtv6vbo3wNw= github.com/libopenstorage/stork v1.4.1-0.20230601053837-5dd68f026569/go.mod h1:+mKPMCPNhS/XOF2RPcNFijkr67CCCWp0o8OXVG6xxAk= github.com/libopenstorage/stork v1.4.1-0.20230610103146-72cf75320066/go.mod h1:Yst+fnOYjWk6SA5pXZBKm19wtiinjxQ/vgYTXI3k80Q= -github.com/libopenstorage/stork v1.4.1-0.20240506185157-275e4369d062 h1:wTiDcrROzCHYsyYfromjatlOwT4UWZSeKqjOcg/SlKY= -github.com/libopenstorage/stork v1.4.1-0.20240506185157-275e4369d062/go.mod h1:kp5qtpq+BgjL5WqiOpDvbPH1WGReO5AaqXDbb+XpvzM= +github.com/libopenstorage/stork v1.4.1-0.20240513102605-2340238c7664 h1:bABYni9x1xTkaIzIvfeYj1MpubMp+kjV5K0qP9k1Xb4= +github.com/libopenstorage/stork v1.4.1-0.20240513102605-2340238c7664/go.mod h1:kp5qtpq+BgjL5WqiOpDvbPH1WGReO5AaqXDbb+XpvzM= github.com/libopenstorage/systemutils v0.0.0-20160208220149-44ac83be3ce1/go.mod h1:xwNGC7xiz/BQ/wbMkvHujL8Gjgseg+x41xMek7sKRRQ= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= diff --git a/vendor/github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1/backuplocation.go b/vendor/github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1/backuplocation.go index bfb390f07..98cd28ad4 100644 --- a/vendor/github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1/backuplocation.go +++ b/vendor/github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1/backuplocation.go @@ -113,6 +113,16 @@ type S3Config struct { SSE string `json:"sse"` } +// AzureEnvironment is the type of the azure environment +type AzureEnvironment string + +const ( + // AzurePublicCloud - azure environment type for azure public cloud + AzurePublic AzureEnvironment = "AzurePublicCloud" + // AzureChinaCloud - azure environment type for azure china cloud + AzureChina AzureEnvironment = "AzureChinaCloud" +) + // AzureConfig specifies the config required to connect to Azure Blob Storage type AzureConfig struct { StorageAccountName string `json:"storageAccountName"` @@ -121,6 +131,9 @@ type AzureConfig struct { SubscriptionID string `json:"subscriptionID"` ClientID string `json:"clientID"` ClientSecret string `json:"clientSecret"` + // Azure-Environment type for azure blob storage + // supported option: "azure-public", "azure-china" + Environment AzureEnvironment `json:"environment"` } // GoogleConfig specifies the config required to connect to Google Cloud Storage @@ -271,6 +284,9 @@ func (bl *BackupLocation) getMergedAzureConfig(client kubernetes.Interface) erro if val, ok := secretConfig.Data["storageAccountKey"]; ok && val != nil { bl.Location.AzureConfig.StorageAccountKey = strings.TrimSuffix(string(val), "\n") } + if val, ok := secretConfig.Data["environment"]; ok && val != nil { + bl.Location.AzureConfig.Environment = AzureEnvironment(strings.TrimSuffix(string(val), "\n")) + } } return nil diff --git a/vendor/github.com/libopenstorage/stork/pkg/applicationmanager/controllers/applicationrestore.go b/vendor/github.com/libopenstorage/stork/pkg/applicationmanager/controllers/applicationrestore.go index be82f6c61..817233ea8 100644 --- a/vendor/github.com/libopenstorage/stork/pkg/applicationmanager/controllers/applicationrestore.go +++ b/vendor/github.com/libopenstorage/stork/pkg/applicationmanager/controllers/applicationrestore.go @@ -611,6 +611,7 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat pvcCount := 0 restoreDone := 0 backupVolumeInfoMappings := make(map[string][]*storkapi.ApplicationBackupVolumeInfo) + hasPXDdriver := false for _, namespace := range backup.Spec.Namespaces { if _, ok := restore.Spec.NamespaceMapping[namespace]; !ok { continue @@ -648,6 +649,9 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat backupVolumeInfoMappings[volumeBackup.DriverName] = make([]*storkapi.ApplicationBackupVolumeInfo, 0) } backupVolumeInfoMappings[volumeBackup.DriverName] = append(backupVolumeInfoMappings[volumeBackup.DriverName], volumeBackup) + if volumeBackup.DriverName == volume.PortworxDriverName { + hasPXDdriver = true + } } } if restore.Status.Volumes == nil { @@ -659,10 +663,25 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat logrus.Errorf("error in checking backuplocation type") return err } + + // Flag onlyPXDdriver to avoid creating a nfs-restore-pvc job for PXD driver(only) exclusively. + // This code checks if there's precisely one entry in the backupVolumeInfoMappings map. + // If such an entry exists, it sets onlyPXDdriver to true if "pxd" is the only driver in the map. + // If there are multiple drivers with PXD, we can still create a resourceExport CR as the count of restored volumes doesn't match the total count of PVCs to be restored. + // If there's only PXD with NFS, skipping resourceExport CR creation for volumes is necessary. + // This prevents duplicating restores, as both PVC counts will match after being initiated from driver.StartRestore in Stork context and marking stage as ApplicationRestoreStageApplications + // Otherwise, there would be redundant restoration of resources, once from nfs-restore-pvc and another from nfs-restore-resource, potentially marking the restore as partial even in new namespace restores. + var skipNfsForPxDOnlyDriver bool + if len(backupVolumeInfoMappings) == 1 { + _, skipNfsForPxDOnlyDriver = backupVolumeInfoMappings[volume.PortworxDriverName] + } if len(restore.Status.Volumes) != pvcCount { - // Here backupVolumeInfoMappings is framed based on driver name mapping, hence startRestore() - // gets called once per driver - if !nfs { + // Portworx driver restore is not supported via job as it can be a secured px volume + // to access the token, we need to run the driver.startRestore in the same context as the stork controller + // this check is equivalent to (if !nfs || (nfs && driverName == volume.PortworxDriverName)) + if !nfs || hasPXDdriver { + // Here backupVolumeInfoMappings is framed based on driver name mapping, hence startRestore() + // gets called once per driver for driverName, vInfos := range backupVolumeInfoMappings { backupVolInfos := vInfos driver, err := volume.Get(driverName) @@ -674,82 +693,95 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat if err != nil { return err } - // For each driver, check if it needs any additional resources to be - // restored before starting the volume restore - objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) - if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) - return err - } - // Skip pv/pvc if replacepolicy is set to retain to avoid creating - if restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyRetain { - backupVolInfos, existingRestoreVolInfos, err = a.skipVolumesFromRestoreList(restore, objects, driver, vInfos) - if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error while checking pvcs: %v", err) - return err + var preRestoreObjects []runtime.Unstructured + // this check is equivalent to (if nfs && driverName == volume.PortworxDriverName) + if nfs { + if restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyRetain { + // Skip pv/pvc if replacepolicy is set to retain to avoid creating with empty object + backupVolInfos, existingRestoreVolInfos, err = a.skipVolumesFromRestoreList(restore, nil, driver, vInfos) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error while checking pvcs: %v", err) + return err + } } - } - var storageClassesBytes []byte - if driverName == "csi" { - storageClassesBytes, err = a.downloadObject(backup, backup.Spec.BackupLocation, backup.Namespace, "storageclasses.json", false) + } else { + // For each driver, check if it needs any additional resources to be + // restored before starting the volume restore + objects, err := a.downloadResources(backup, restore.Spec.BackupLocation, restore.Namespace) if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error in a.downloadObject %v", err) + log.ApplicationRestoreLog(restore).Errorf("Error downloading resources: %v", err) return err } - } - preRestoreObjects, err := driver.GetPreRestoreResources(backup, restore, objects, storageClassesBytes) - if err != nil { - log.ApplicationRestoreLog(restore).Errorf("Error getting PreRestore Resources: %v", err) - return err - } - - // Pre-delete resources for CSI driver - if (driverName == "csi" || driverName == "kdmp") && restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyDelete { - objectMap := storkapi.CreateObjectsMap(restore.Spec.IncludeResources) - objectBasedOnIncludeResources := make([]runtime.Unstructured, 0) - var opts resourcecollector.Options - for _, o := range objects { - skip, err := a.resourceCollector.PrepareResourceForApply( - o, - objects, - objectMap, - restore.Spec.NamespaceMapping, - nil, // no need to set storage class mappings at this stage - nil, - restore.Spec.IncludeOptionalResourceTypes, - nil, - &opts, - restore.Spec.BackupLocation, - restore.Namespace, - ) + // Skip pv/pvc if replacepolicy is set to retain to avoid creating + if restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyRetain { + backupVolInfos, existingRestoreVolInfos, err = a.skipVolumesFromRestoreList(restore, objects, driver, vInfos) if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error while checking pvcs: %v", err) return err } - if !skip { - objectBasedOnIncludeResources = append( - objectBasedOnIncludeResources, - o, - ) + } + var storageClassesBytes []byte + if driverName == "csi" { + storageClassesBytes, err = a.downloadObject(backup, backup.Spec.BackupLocation, backup.Namespace, "storageclasses.json", false) + if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error in a.downloadObject %v", err) + return err } } - tempObjects, err := a.getNamespacedObjectsToDelete( - restore, - objectBasedOnIncludeResources, - ) + preRestoreObjects, err = driver.GetPreRestoreResources(backup, restore, objects, storageClassesBytes) if err != nil { + log.ApplicationRestoreLog(restore).Errorf("Error getting PreRestore Resources: %v", err) return err } - err = a.resourceCollector.DeleteResources( - a.dynamicInterface, - tempObjects, updateCr) - if err != nil { - return err + + // Pre-delete resources for CSI driver + if (driverName == "csi" || driverName == "kdmp") && restore.Spec.ReplacePolicy == storkapi.ApplicationRestoreReplacePolicyDelete { + objectMap := storkapi.CreateObjectsMap(restore.Spec.IncludeResources) + objectBasedOnIncludeResources := make([]runtime.Unstructured, 0) + var opts resourcecollector.Options + for _, o := range objects { + skip, err := a.resourceCollector.PrepareResourceForApply( + o, + objects, + objectMap, + restore.Spec.NamespaceMapping, + nil, // no need to set storage class mappings at this stage + nil, + restore.Spec.IncludeOptionalResourceTypes, + nil, + &opts, + restore.Spec.BackupLocation, + restore.Namespace, + ) + if err != nil { + return err + } + if !skip { + objectBasedOnIncludeResources = append( + objectBasedOnIncludeResources, + o, + ) + } + } + tempObjects, err := a.getNamespacedObjectsToDelete( + restore, + objectBasedOnIncludeResources, + ) + if err != nil { + return err + } + err = a.resourceCollector.DeleteResources( + a.dynamicInterface, + tempObjects, updateCr) + if err != nil { + return err + } } - } - // pvc creation is not part of kdmp - if driverName != volume.KDMPDriverName { - if err := a.applyResources(restore, preRestoreObjects, updateCr); err != nil { - return err + // pvc creation is not part of kdmp + if driverName != volume.KDMPDriverName { + if err := a.applyResources(restore, preRestoreObjects, updateCr); err != nil { + return err + } } } restore, err = a.updateRestoreCRInVolumeStage( @@ -841,11 +873,11 @@ func (a *ApplicationRestoreController) restoreVolumes(restore *storkapi.Applicat return err } } - } } - // Check whether ResourceExport is present or not - if nfs { + // If NFS we create resourceExportCR but we will ensure to ignore PX volumes in the restore + // If only PXD driver is present, we will not create PVC job as that is taken care in above loop + if nfs && !skipNfsForPxDOnlyDriver { err = a.client.Update(context.TODO(), restore) if err != nil { time.Sleep(retrySleep) @@ -1406,21 +1438,26 @@ func (a *ApplicationRestoreController) skipVolumesFromRestoreList( logrus.Infof("skipping namespace %s for restore", bkupVolInfo.Namespace) continue } + ns := val + var pvcName string // Declare the pvcName variable + if objects != nil { + // get corresponding pvc object from objects list + pvcObject, err := volume.GetPVCFromObjects(objects, bkupVolInfo) + if err != nil { + return newVolInfos, existingInfos, err + } + pvcName = pvcObject.Name - // get corresponding pvc object from objects list - pvcObject, err := volume.GetPVCFromObjects(objects, bkupVolInfo) - if err != nil { - return newVolInfos, existingInfos, err + } else { + pvcName = bkupVolInfo.PersistentVolumeClaim } - - ns := val - pvc, err := core.Instance().GetPersistentVolumeClaim(pvcObject.Name, ns) + pvc, err := core.Instance().GetPersistentVolumeClaim(pvcName, ns) if err != nil { if k8s_errors.IsNotFound(err) { newVolInfos = append(newVolInfos, bkupVolInfo) continue } - return newVolInfos, existingInfos, fmt.Errorf("erorr getting pvc %s/%s: %v", ns, pvcObject.Name, err) + return newVolInfos, existingInfos, fmt.Errorf("error getting pvc %s/%s: %v", ns, pvcName, err) // Update the error message } pvName := pvc.Spec.VolumeName var zones []string @@ -1916,7 +1953,7 @@ func (a *ApplicationRestoreController) restoreResources( logrus.Debugf("resource export: %s, status: %s", resourceExport.Name, resourceExport.Status.Status) switch resourceExport.Status.Status { case kdmpapi.ResourceExportStatusFailed: - message = fmt.Sprintf("Error applying resources: %v", err) + message = fmt.Sprintf("Error applying resources: %v", resourceExport.Status.Reason) restore.Status.Status = storkapi.ApplicationRestoreStatusFailed restore.Status.Stage = storkapi.ApplicationRestoreStageFinal restore.Status.Reason = message @@ -2318,7 +2355,7 @@ func (a *ApplicationRestoreController) processVMResourcesForVMRestoreFromNFS(res logrus.Debugf("resource export: %s, status: %s", resourceExport.Name, resourceExport.Status.Status) switch resourceExport.Status.Status { case kdmpapi.ResourceExportStatusFailed: - message = fmt.Sprintf("Error applying resources: %v", err) + message = fmt.Sprintf("Error applying resources: %v", resourceExport.Status.Reason) restore.Status.Status = storkapi.ApplicationRestoreStatusFailed restore.Status.Stage = storkapi.ApplicationRestoreStageFinal restore.Status.Reason = message diff --git a/vendor/github.com/libopenstorage/stork/pkg/objectstore/azure/azure.go b/vendor/github.com/libopenstorage/stork/pkg/objectstore/azure/azure.go index c8bd99fa9..7d520d53e 100644 --- a/vendor/github.com/libopenstorage/stork/pkg/objectstore/azure/azure.go +++ b/vendor/github.com/libopenstorage/stork/pkg/objectstore/azure/azure.go @@ -4,9 +4,11 @@ import ( "context" "fmt" "net/url" + "os" "github.com/Azure/azure-pipeline-go/pipeline" "github.com/Azure/azure-storage-blob-go/azblob" + az_autorest "github.com/Azure/go-autorest/autorest/azure" stork_api "github.com/libopenstorage/stork/pkg/apis/stork/v1alpha1" "github.com/libopenstorage/stork/pkg/objectstore/common" "github.com/sirupsen/logrus" @@ -14,6 +16,10 @@ import ( "gocloud.dev/blob/azureblob" ) +const ( + azureEnvKey = "AZURE_ENVIRONMENT" +) + func getPipeline(backupLocation *stork_api.BackupLocation) (pipeline.Pipeline, error) { accountName := azureblob.AccountName(backupLocation.Location.AzureConfig.StorageAccountName) accountKey := azureblob.AccountKey(backupLocation.Location.AzureConfig.StorageAccountKey) @@ -24,6 +30,31 @@ func getPipeline(backupLocation *stork_api.BackupLocation) (pipeline.Pipeline, e return azureblob.NewPipeline(credential, azblob.PipelineOptions{}), nil } +func getAzureURLSuffix(backupLocation *stork_api.BackupLocation) (string, error) { + // Give first preference to environment variable setting. + azureEnv := os.Getenv(azureEnvKey) + if len(azureEnv) == 0 { + // If env variable is not set, check the azure environment vaue from backuplocation CR. + if len(backupLocation.Location.AzureConfig.Environment) != 0 { + azureEnv = string(backupLocation.Location.AzureConfig.Environment) + logrus.Infof("Received azure environment type %s from backup location cr", azureEnv) + } else { + logrus.Infof("Both BL Cr environment type and azureEnv are empty, setting storage domain to default i.e Public") + return az_autorest.PublicCloud.StorageEndpointSuffix, nil + } + } else { + logrus.Infof("Received azure environment type as environment variable which has higher priority than environment type passed down from backup location cr %s", azureEnv) + } + + azureClientEnv, err := az_autorest.EnvironmentFromName(azureEnv) + if err != nil { + logrus.Errorf("Failed to get azure client env for:%v, err:%v", azureEnv, err) + return "", err + } + // Endpoint suffix based on current cloud type + return azureClientEnv.StorageEndpointSuffix, nil +} + // GetBucket gets a reference to the bucket for that backup location func GetBucket(backupLocation *stork_api.BackupLocation) (*blob.Bucket, error) { accountName := azureblob.AccountName(backupLocation.Location.AzureConfig.StorageAccountName) @@ -31,7 +62,17 @@ func GetBucket(backupLocation *stork_api.BackupLocation) (*blob.Bucket, error) { if err != nil { return nil, err } - return azureblob.OpenBucket(context.Background(), pipeline, accountName, backupLocation.Location.Path, nil) + urlSuffix, err := getAzureURLSuffix(backupLocation) + if err != nil { + return nil, err + } + + urlSuffix = fmt.Sprintf("blob.%s", urlSuffix) + logrus.Debugf("azure - GetBucket - urlSuffix %v", urlSuffix) + opts := azureblob.Options{ + StorageDomain: azureblob.StorageDomain(urlSuffix), + } + return azureblob.OpenBucket(context.Background(), pipeline, accountName, backupLocation.Location.Path, &opts) } // CreateBucket creates a bucket for the bucket location @@ -41,11 +82,16 @@ func CreateBucket(backupLocation *stork_api.BackupLocation) error { if err != nil { return err } - url, err := url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", accountName)) + urlSuffix, err := getAzureURLSuffix(backupLocation) if err != nil { return err } + url, err := url.Parse(fmt.Sprintf("https://%s.blob.%s", accountName, urlSuffix)) + if err != nil { + return err + } + logrus.Debugf("azure - CreateBucket - url %v", url) _, err = azblob.NewServiceURL(*url, pipeline). NewContainerURL(backupLocation.Location.Path). Create(context.Background(), azblob.Metadata{}, azblob.PublicAccessNone) diff --git a/vendor/modules.txt b/vendor/modules.txt index f78e00cf9..a1cfac03f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -441,7 +441,7 @@ github.com/libopenstorage/openstorage-sdk-clients/sdk/golang github.com/libopenstorage/secrets github.com/libopenstorage/secrets/aws/credentials github.com/libopenstorage/secrets/k8s -# github.com/libopenstorage/stork v1.4.1-0.20240506185157-275e4369d062 +# github.com/libopenstorage/stork v1.4.1-0.20240513102605-2340238c7664 ## explicit; go 1.21 github.com/libopenstorage/stork/drivers github.com/libopenstorage/stork/drivers/volume