From c8b9aa7b3c0a35252c8c18f14d5749c964ae751b Mon Sep 17 00:00:00 2001 From: MatthieuFin Date: Tue, 13 Feb 2024 15:23:30 +0100 Subject: [PATCH] feat(cinder-csi-plugin): add support multi cloud openstack cluster (#2035) * feat(cinder-csi-plugin-controller): add support --cloud-name arg which permit to manage multiple config Global sections * feat(cinder-csi-plugin-controller): add support of key `cloud` from secret specified in storageClass to reference specific config Global section * feat(cinder-csi-plugin-node): add support --cloud-name arg which permit to specify one of config Global section Signed-off-by: MatthieuFin --- cmd/cinder-csi-plugin/main.go | 21 +- pkg/csi/cinder/controllerserver.go | 266 ++++++++++++++---- pkg/csi/cinder/controllerserver_test.go | 15 +- pkg/csi/cinder/driver.go | 4 +- pkg/csi/cinder/nodeserver_test.go | 18 +- pkg/csi/cinder/openstack/fixtures/clouds.yaml | 22 ++ pkg/csi/cinder/openstack/openstack.go | 55 ++-- pkg/csi/cinder/openstack/openstack_test.go | 133 +++++++-- pkg/csi/cinder/utils.go | 4 +- tests/sanity/cinder/sanity_test.go | 6 +- 10 files changed, 419 insertions(+), 125 deletions(-) diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index 6ce8556689..537fdb940e 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -34,6 +34,7 @@ var ( endpoint string nodeID string cloudConfig []string + cloudNames []string cluster string httpEndpoint string provideControllerService bool @@ -65,6 +66,8 @@ func main() { klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err) } + cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "CSI driver cloud name in config files. This option can be given multiple times to manage multiple openstack clouds") + cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.") cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") @@ -82,14 +85,18 @@ func handle() { d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) openstack.InitOpenStackProvider(cloudConfig, httpEndpoint) - cloud, err := openstack.GetOpenStackProvider() - if err != nil { - klog.Warningf("Failed to GetOpenStackProvider: %v", err) - return + var err error + clouds := make(map[string]openstack.IOpenStack) + for _, cloudName := range cloudNames { + clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName) + if err != nil { + klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err) + return + } } if provideControllerService { - d.SetupControllerService(cloud) + d.SetupControllerService(clouds) } if provideNodeService { @@ -97,9 +104,9 @@ func handle() { mount := mount.GetMountProvider() //Initialize Metadata - metadata := metadata.GetMetadataProvider(cloud.GetMetadataOpts().SearchOrder) + metadata := metadata.GetMetadataProvider(clouds[cloudNames[0]].GetMetadataOpts().SearchOrder) - d.SetupNodeService(cloud, mount, metadata) + d.SetupNodeService(clouds[cloudNames[0]], mount, metadata) } d.Run() diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go index fcf7aacbd7..b7f7863d0f 100644 --- a/pkg/csi/cinder/controllerserver.go +++ b/pkg/csi/cinder/controllerserver.go @@ -17,7 +17,9 @@ limitations under the License. package cinder import ( + "encoding/json" "fmt" + "sort" "strconv" "github.com/container-storage-interface/spec/lib/go/csi" @@ -25,6 +27,7 @@ import ( "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" "github.com/kubernetes-csi/csi-lib-utils/protosanitizer" + "golang.org/x/exp/maps" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -38,7 +41,7 @@ import ( type controllerServer struct { Driver *Driver - Cloud openstack.IOpenStack + Clouds map[string]openstack.IOpenStack } const ( @@ -48,6 +51,13 @@ const ( func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { klog.V(4).Infof("CreateVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[CreateVolume] specified cloud undefined") + } + // Volume Name volName := req.GetName() volCapabilities := req.GetVolumeCapabilities() @@ -80,7 +90,6 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol } } - cloud := cs.Cloud ignoreVolumeAZ := cloud.GetBlockStorageOpts().IgnoreVolumeAZ // Verify a volume with the provided name doesn't already exist for this tenant @@ -187,12 +196,19 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { klog.V(4).Infof("DeleteVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Errorf(codes.InvalidArgument, "[DeleteVolume] specified cloud \"%s\" undefined", volCloud) + } + // Volume Delete volID := req.GetVolumeId() if len(volID) == 0 { return nil, status.Error(codes.InvalidArgument, "DeleteVolume Volume ID must be provided") } - err := cs.Cloud.DeleteVolume(volID) + err := cloud.DeleteVolume(volID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Volume %s is already deleted.", volID) @@ -210,6 +226,13 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { klog.V(4).Infof("ControllerPublishVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ControllerPublishVolume] specified cloud undefined") + } + // Volume Attach instanceID := req.GetNodeId() volumeID := req.GetVolumeId() @@ -225,7 +248,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Error(codes.InvalidArgument, "[ControllerPublishVolume] Volume capability must be provided") } - _, err := cs.Cloud.GetVolume(volumeID) + _, err := cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Volume %s not found", volumeID) @@ -233,7 +256,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] get volume failed with error %v", err) } - _, err = cs.Cloud.GetInstanceByID(instanceID) + _, err = cloud.GetInstanceByID(instanceID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "[ControllerPublishVolume] Instance %s not found", instanceID) @@ -241,20 +264,20 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] GetInstanceByID failed with error %v", err) } - _, err = cs.Cloud.AttachVolume(instanceID, volumeID) + _, err = cloud.AttachVolume(instanceID, volumeID) if err != nil { klog.Errorf("Failed to AttachVolume: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] Attach Volume failed with error %v", err) } - err = cs.Cloud.WaitDiskAttached(instanceID, volumeID) + err = cloud.WaitDiskAttached(instanceID, volumeID) if err != nil { klog.Errorf("Failed to WaitDiskAttached: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to attach volume: %v", err) } - devicePath, err := cs.Cloud.GetAttachmentDiskPath(instanceID, volumeID) + devicePath, err := cloud.GetAttachmentDiskPath(instanceID, volumeID) if err != nil { klog.Errorf("Failed to GetAttachmentDiskPath: %v", err) return nil, status.Errorf(codes.Internal, "[ControllerPublishVolume] failed to get device path of attached volume: %v", err) @@ -274,6 +297,13 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { klog.V(4).Infof("ControllerUnpublishVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ControllerUnpublishVolume] specified cloud undefined") + } + // Volume Detach instanceID := req.GetNodeId() volumeID := req.GetVolumeId() @@ -281,7 +311,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "[ControllerUnpublishVolume] Volume ID must be provided") } - _, err := cs.Cloud.GetInstanceByID(instanceID) + _, err := cloud.GetInstanceByID(instanceID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because node %s does not exist", volumeID, instanceID) @@ -290,7 +320,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return nil, status.Errorf(codes.Internal, "[ControllerUnpublishVolume] GetInstanceByID failed with error %v", err) } - err = cs.Cloud.DetachVolume(instanceID, volumeID) + err = cloud.DetachVolume(instanceID, volumeID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("ControllerUnpublishVolume assuming volume %s is detached, because it does not exist", volumeID) @@ -300,7 +330,7 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return nil, status.Errorf(codes.Internal, "ControllerUnpublishVolume Detach Volume failed with error %v", err) } - err = cs.Cloud.WaitDiskDetached(instanceID, volumeID) + err = cloud.WaitDiskDetached(instanceID, volumeID) if err != nil { klog.Errorf("Failed to WaitDiskDetached: %v", err) if cpoerrors.IsNotFound(err) { @@ -315,6 +345,11 @@ func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req * return &csi.ControllerUnpublishVolumeResponse{}, nil } +type CloudsStartingToken struct { + CloudName string `json:"cloud"` + Token string `json:"token"` +} + func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { klog.V(4).Infof("ListVolumes: called with %+#v request", req) @@ -323,44 +358,116 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume } maxEntries := int(req.MaxEntries) - vlist, nextPageToken, err := cs.Cloud.ListVolumes(maxEntries, req.StartingToken) - if err != nil { - klog.Errorf("Failed to ListVolumes: %v", err) - if cpoerrors.IsInvalidError(err) { - return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: %v", err) - } - return nil, status.Errorf(codes.Internal, "ListVolumes failed with error %v", err) + var err error + var cloudsToken = CloudsStartingToken{ + CloudName: "", + Token: "", } - ventries := make([]*csi.ListVolumesResponse_Entry, 0, len(vlist)) - for _, v := range vlist { - ventry := csi.ListVolumesResponse_Entry{ - Volume: &csi.Volume{ - VolumeId: v.ID, - CapacityBytes: int64(v.Size * 1024 * 1024 * 1024), - }, + cloudsNames := maps.Keys(cs.Clouds) + sort.Strings(cloudsNames) + + currentCloudName := cloudsNames[0] + if req.StartingToken != "" { + err = json.Unmarshal([]byte(req.StartingToken), &cloudsToken) + if err != nil { + return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: Token invalid") } + currentCloudName = cloudsToken.CloudName + } - status := &csi.ListVolumesResponse_VolumeStatus{} - status.PublishedNodeIds = make([]string, 0, len(v.Attachments)) - for _, attachment := range v.Attachments { - status.PublishedNodeIds = append(status.PublishedNodeIds, attachment.ServerID) + startingToken := cloudsToken.Token + var cloudsVentries []*csi.ListVolumesResponse_Entry + var vlist []volumes.Volume + var nextPageToken string + + startIdx := 0 + for idx := 0; idx < len(cloudsNames); idx++ { + if cloudsNames[idx] == currentCloudName { + break + } + startIdx++ + } + for idx := startIdx; idx < len(cloudsNames); idx++ { + if maxEntries > 0 { + vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(maxEntries-len(cloudsVentries), startingToken) + } else { + vlist, nextPageToken, err = cs.Clouds[cloudsNames[idx]].ListVolumes(maxEntries, startingToken) } - ventry.Status = status + if err != nil { + klog.Errorf("Failed to ListVolumes: %v", err) + if cpoerrors.IsInvalidError(err) { + return nil, status.Errorf(codes.Aborted, "[ListVolumes] Invalid request: %v", err) + } + return nil, status.Errorf(codes.Internal, "ListVolumes failed with error %v", err) + } + + ventries := make([]*csi.ListVolumesResponse_Entry, 0, len(vlist)) + for _, v := range vlist { + ventry := csi.ListVolumesResponse_Entry{ + Volume: &csi.Volume{ + VolumeId: v.ID, + CapacityBytes: int64(v.Size * 1024 * 1024 * 1024), + }, + } - ventries = append(ventries, &ventry) + status := &csi.ListVolumesResponse_VolumeStatus{} + status.PublishedNodeIds = make([]string, 0, len(v.Attachments)) + for _, attachment := range v.Attachments { + status.PublishedNodeIds = append(status.PublishedNodeIds, attachment.ServerID) + } + ventry.Status = status + + ventries = append(ventries, &ventry) + } + klog.V(4).Infof("ListVolumes: retreived %d entries and %q next token from cloud %q", len(ventries), nextPageToken, cloudsNames[idx]) + + cloudsVentries = append(cloudsVentries, ventries...) + + // Reach maxEntries setup nextToken with cloud identifier if needed + if maxEntries > 0 && len(cloudsVentries) == maxEntries { + if nextPageToken == "" { + if idx+1 == len(cloudsNames) { + // no more entries and no more clouds + // send no token its finished + klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(cloudsVentries), "") + return &csi.ListVolumesResponse{ + Entries: cloudsVentries, + NextToken: "", + }, nil + } else { + // stiil clouds to process + cloudsToken.CloudName = cloudsNames[idx+1] + } + } + cloudsToken.CloudName = cloudsNames[idx] + cloudsToken.Token = nextPageToken + data, _ := json.Marshal(cloudsToken) + klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(cloudsVentries), string(data)) + return &csi.ListVolumesResponse{ + Entries: cloudsVentries, + NextToken: string(data), + }, nil + } } - klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(ventries), nextPageToken) + klog.V(4).Infof("ListVolumes: completed with %d entries and %q next token", len(cloudsVentries), "") return &csi.ListVolumesResponse{ - Entries: ventries, - NextToken: nextPageToken, + Entries: cloudsVentries, + NextToken: "", }, nil } func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { klog.V(4).Infof("CreateSnapshot: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[CreateSnapshot] specified cloud undefined") + } + name := req.Name volumeID := req.GetSourceVolumeId() snapshotType := req.Parameters[openstack.SnapshotType] @@ -396,7 +503,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Error(codes.InvalidArgument, "Snapshot type must be 'backup', 'snapshot' or not defined") } var backupsAreEnabled bool - backupsAreEnabled, err = cs.Cloud.BackupsAreEnabled() + backupsAreEnabled, err = cloud.BackupsAreEnabled() klog.V(4).Infof("Backups enabled: %v", backupsAreEnabled) if err != nil { klog.Errorf("Failed to check if backups are enabled: %v", err) @@ -408,7 +515,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS return nil, status.Error(codes.FailedPrecondition, "Backups are not enabled in Cinder") } // Get a list of backups with the provided name - backups, err = cs.Cloud.ListBackups(filters) + backups, err = cloud.ListBackups(filters) if err != nil { klog.Errorf("Failed to query for existing Backup during CreateSnapshot: %v", err) return nil, status.Error(codes.Internal, "Failed to get backups") @@ -422,7 +529,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS if len(backups) == 1 { // since backup.VolumeID is not part of ListBackups response // we need fetch single backup to get the full object. - backup, err = cs.Cloud.GetBackupByID(backups[0].ID) + backup, err = cloud.GetBackupByID(backups[0].ID) if err != nil { klog.Errorf("Failed to get backup by ID %s: %v", backup.ID, err) return nil, status.Error(codes.Internal, "Failed to get backup by ID") @@ -451,7 +558,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS // Create the snapshot if the backup does not already exist and wait for it to be ready if !backupAlreadyExists { - snap, err = cs.createSnapshot(name, volumeID, req.Parameters) + snap, err = cs.createSnapshot(cloud, name, volumeID, req.Parameters) if err != nil { return nil, err } @@ -461,7 +568,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS klog.Errorf("Error to convert time to timestamp: %v", err) } - snap.Status, err = cs.Cloud.WaitSnapshotReady(snap.ID) + snap.Status, err = cloud.WaitSnapshotReady(snap.ID) if err != nil { klog.Errorf("Failed to WaitSnapshotReady: %v", err) return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error: %v. Current snapshot status: %v", err, snap.Status) @@ -486,7 +593,7 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS if snapshotType == "backup" { if !backupAlreadyExists { - backup, err = cs.createBackup(name, volumeID, snap, req.Parameters) + backup, err = cs.createBackup(cloud, name, volumeID, snap, req.Parameters) if err != nil { return nil, err } @@ -497,20 +604,20 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS klog.Errorf("Error to convert time to timestamp: %v", err) } - backup.Status, err = cs.Cloud.WaitBackupReady(backup.ID, snapSize, backupMaxDurationSecondsPerGB) + backup.Status, err = cloud.WaitBackupReady(backup.ID, snapSize, backupMaxDurationSecondsPerGB) if err != nil { klog.Errorf("Failed to WaitBackupReady: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v. Current backups status: %s", err, backup.Status)) } // Necessary to get all the backup information, including size. - backup, err = cs.Cloud.GetBackupByID(backup.ID) + backup, err = cloud.GetBackupByID(backup.ID) if err != nil { klog.Errorf("Failed to GetBackupByID after backup creation: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("GetBackupByID failed with error %v", err)) } - err = cs.Cloud.DeleteSnapshot(backup.SnapshotID) + err = cloud.DeleteSnapshot(backup.SnapshotID) if err != nil && !cpoerrors.IsNotFound(err) { klog.Errorf("Failed to DeleteSnapshot: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteSnapshot failed with error %v", err)) @@ -529,13 +636,13 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS } -func (cs *controllerServer) createSnapshot(name string, volumeID string, parameters map[string]string) (snap *snapshots.Snapshot, err error) { +func (cs *controllerServer) createSnapshot(cloud openstack.IOpenStack, name string, volumeID string, parameters map[string]string) (snap *snapshots.Snapshot, err error) { filters := map[string]string{} filters["Name"] = name // List existing snapshots with the same name - snapshots, _, err := cs.Cloud.ListSnapshots(filters) + snapshots, _, err := cloud.ListSnapshots(filters) if err != nil { klog.Errorf("Failed to query for existing Snapshot during CreateSnapshot: %v", err) return nil, status.Error(codes.Internal, "Failed to get snapshots") @@ -574,7 +681,7 @@ func (cs *controllerServer) createSnapshot(name string, volumeID string, paramet } // TODO: Delegate the check to openstack itself and ignore the conflict - snap, err = cs.Cloud.CreateSnapshot(name, volumeID, properties) + snap, err = cloud.CreateSnapshot(name, volumeID, properties) if err != nil { klog.Errorf("Failed to Create snapshot: %v", err) return nil, status.Errorf(codes.Internal, "CreateSnapshot failed with error %v", err) @@ -585,7 +692,7 @@ func (cs *controllerServer) createSnapshot(name string, volumeID string, paramet return snap, nil } -func (cs *controllerServer) createBackup(name string, volumeID string, snap *snapshots.Snapshot, parameters map[string]string) (*backups.Backup, error) { +func (cs *controllerServer) createBackup(cloud openstack.IOpenStack, name string, volumeID string, snap *snapshots.Snapshot, parameters map[string]string) (*backups.Backup, error) { // Add cluster ID to the snapshot metadata properties := map[string]string{cinderCSIClusterIDKey: cs.Driver.cluster} @@ -600,7 +707,7 @@ func (cs *controllerServer) createBackup(name string, volumeID string, snap *sna } } - backup, err := cs.Cloud.CreateBackup(name, volumeID, snap.ID, parameters[openstack.SnapshotAvailabilityZone], properties) + backup, err := cloud.CreateBackup(name, volumeID, snap.ID, parameters[openstack.SnapshotAvailabilityZone], properties) if err != nil { klog.Errorf("Failed to Create backup: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("CreateBackup failed with error %v", err)) @@ -613,6 +720,13 @@ func (cs *controllerServer) createBackup(name string, volumeID string, snap *sna func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { klog.V(4).Infof("DeleteSnapshot: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[DeleteSnapshot] specified cloud undefined") + } + id := req.GetSnapshotId() if id == "" { @@ -620,9 +734,9 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } // If volumeSnapshot object was linked to a cinder backup, delete the backup. - back, err := cs.Cloud.GetBackupByID(id) + back, err := cloud.GetBackupByID(id) if err == nil && back != nil { - err = cs.Cloud.DeleteBackup(id) + err = cloud.DeleteBackup(id) if err != nil { klog.Errorf("Failed to Delete backup: %v", err) return nil, status.Error(codes.Internal, fmt.Sprintf("DeleteBackup failed with error %v", err)) @@ -630,7 +744,7 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS } // Delegate the check to openstack itself - err = cs.Cloud.DeleteSnapshot(id) + err = cloud.DeleteSnapshot(id) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Snapshot %s is already deleted.", id) @@ -644,9 +758,16 @@ func (cs *controllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[DeleteSnapshot] specified cloud undefined") + } + snapshotID := req.GetSnapshotId() if len(snapshotID) != 0 { - snap, err := cs.Cloud.GetSnapshotByID(snapshotID) + snap, err := cloud.GetSnapshotByID(snapshotID) if err != nil { if cpoerrors.IsNotFound(err) { klog.V(3).Infof("Snapshot %s not found", snapshotID) @@ -690,7 +811,7 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap // Only retrieve snapshots that are available filters["Status"] = "available" - slist, nextPageToken, err = cs.Cloud.ListSnapshots(filters) + slist, nextPageToken, err = cloud.ListSnapshots(filters) if err != nil { klog.Errorf("Failed to ListSnapshots: %v", err) return nil, status.Errorf(codes.Internal, "ListSnapshots failed with error %v", err) @@ -732,6 +853,13 @@ func (cs *controllerServer) ControllerGetCapabilities(ctx context.Context, req * func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ValidateVolumeCapabilities] specified cloud undefined") + } + reqVolCap := req.GetVolumeCapabilities() if len(reqVolCap) == 0 { @@ -743,7 +871,7 @@ func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req return nil, status.Error(codes.InvalidArgument, "ValidateVolumeCapabilities Volume ID must be provided") } - _, err := cs.Cloud.GetVolume(volumeID) + _, err := cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Errorf(codes.NotFound, "ValidateVolumeCapabilities Volume %s not found", volumeID) @@ -784,12 +912,19 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, req *csi.Co return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") } - volume, err := cs.Cloud.GetVolume(volumeID) - if err != nil { - if cpoerrors.IsNotFound(err) { - return nil, status.Errorf(codes.NotFound, "Volume %s not found", volumeID) + var volume *volumes.Volume + var err error + for _, cloud := range cs.Clouds { + volume, err = cloud.GetVolume(volumeID) + if err != nil { + if cpoerrors.IsNotFound(err) { + continue + } + return nil, status.Errorf(codes.Internal, "ControllerGetVolume failed with error %v", err) } - return nil, status.Errorf(codes.Internal, "ControllerGetVolume failed with error %v", err) + } + if err != nil { + return nil, status.Errorf(codes.NotFound, "Volume %s not found", volumeID) } ventry := csi.ControllerGetVolumeResponse{ @@ -812,6 +947,13 @@ func (cs *controllerServer) ControllerGetVolume(ctx context.Context, req *csi.Co func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { klog.V(4).Infof("ControllerExpandVolume: called with args %+v", protosanitizer.StripSecrets(*req)) + // Volume cloud + volCloud := req.GetSecrets()["cloud"] + cloud, cloudExist := cs.Clouds[volCloud] + if !cloudExist { + return nil, status.Error(codes.InvalidArgument, "[ControllerExpandVolume] specified cloud undefined") + } + volumeID := req.GetVolumeId() if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") @@ -829,7 +971,7 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi return nil, status.Error(codes.OutOfRange, "After round-up, volume size exceeds the limit specified") } - volume, err := cs.Cloud.GetVolume(volumeID) + volume, err := cloud.GetVolume(volumeID) if err != nil { if cpoerrors.IsNotFound(err) { return nil, status.Error(codes.NotFound, "Volume not found") @@ -846,14 +988,14 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi }, nil } - err = cs.Cloud.ExpandVolume(volumeID, volume.Status, volSizeGB) + err = cloud.ExpandVolume(volumeID, volume.Status, volSizeGB) if err != nil { return nil, status.Errorf(codes.Internal, "Could not resize volume %q to size %v: %v", volumeID, volSizeGB, err) } // we need wait for the volume to be available or InUse, it might be error_extending in some scenario targetStatus := []string{openstack.VolumeAvailableStatus, openstack.VolumeInUseStatus} - err = cs.Cloud.WaitVolumeTargetStatus(volumeID, targetStatus) + err = cloud.WaitVolumeTargetStatus(volumeID, targetStatus) if err != nil { klog.Errorf("Failed to WaitVolumeTargetStatus of volume %s: %v", volumeID, err) return nil, status.Errorf(codes.Internal, "[ControllerExpandVolume] Volume %s not in target state after resize operation: %v", volumeID, err) diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index f8d7bcd541..c3bdf6fd24 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -17,6 +17,7 @@ limitations under the License. package cinder import ( + "encoding/json" "testing" "github.com/container-storage-interface/spec/lib/go/csi" @@ -32,11 +33,13 @@ var osmock *openstack.OpenStackMock func init() { if fakeCs == nil { osmock = new(openstack.OpenStackMock) - openstack.OsInstance = osmock + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": osmock, + } d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeCs = NewControllerServer(d, openstack.OsInstance) + fakeCs = NewControllerServer(d, openstack.OsInstances) } } @@ -411,8 +414,12 @@ func TestListVolumes(t *testing.T) { // Init assert assert := assert.New(t) - - fakeReq := &csi.ListVolumesRequest{MaxEntries: 2, StartingToken: FakeVolID} + token := CloudsStartingToken{ + CloudName: "", + Token: FakeVolID, + } + data, _ := json.Marshal(token) + fakeReq := &csi.ListVolumesRequest{MaxEntries: 2, StartingToken: string(data)} // Expected Result expectedRes := &csi.ListVolumesResponse{ diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go index bd75fbce9d..f4bb7edecd 100644 --- a/pkg/csi/cinder/driver.go +++ b/pkg/csi/cinder/driver.go @@ -172,9 +172,9 @@ func (d *Driver) GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_Access return d.vcap } -func (d *Driver) SetupControllerService(cloud openstack.IOpenStack) { +func (d *Driver) SetupControllerService(clouds map[string]openstack.IOpenStack) { klog.Info("Providing controller service") - d.cs = NewControllerServer(d, cloud) + d.cs = NewControllerServer(d, clouds) } func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata) { diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index dbfeb6144b..2e25191519 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -48,9 +48,11 @@ func init() { metadata.MetadataService = metamock omock = new(openstack.OpenStackMock) - openstack.OsInstance = omock + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": omock, + } - fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) + fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""]) } } @@ -140,10 +142,12 @@ func TestNodePublishVolumeEphermeral(t *testing.T) { mount.MInstance = mmock metadata.MetadataService = metamock - openstack.OsInstance = omock + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": omock, + } d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) + fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""]) // Init assert assert := assert.New(t) @@ -283,7 +287,9 @@ func TestNodeUnpublishVolume(t *testing.T) { func TestNodeUnpublishVolumeEphermeral(t *testing.T) { mount.MInstance = mmock metadata.MetadataService = metamock - openstack.OsInstance = omock + osmock := map[string]openstack.IOpenStack{ + "": new(openstack.OpenStackMock), + } fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID) mmock.On("UnmountPath", FakeTargetPath).Return(nil) @@ -293,7 +299,7 @@ func TestNodeUnpublishVolumeEphermeral(t *testing.T) { omock.On("DeleteVolume", FakeVolID).Return(nil) d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) - fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) + fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""]) // Init assert assert := assert.New(t) diff --git a/pkg/csi/cinder/openstack/fixtures/clouds.yaml b/pkg/csi/cinder/openstack/fixtures/clouds.yaml index f3e7a6367c..e42bf0927d 100644 --- a/pkg/csi/cinder/openstack/fixtures/clouds.yaml +++ b/pkg/csi/cinder/openstack/fixtures/clouds.yaml @@ -10,3 +10,25 @@ clouds: cacert: "fake-ca.crt" interface: "public" identity_api_version: 3 + openstack_cloud2: + auth: + auth_url: "https://169.254.169.254/identity/v3" + username: "user" + password: "pass" + project_id: "c869168a828847f39f7f06edd7305637" + domain_id: "2a73b8f597c04551a0fdc8e95544be8a" + region_name: "RegionTwo" + cacert: "fake-ca.crt" + interface: "public" + identity_api_version: 3 + openstack_cloud3: + auth: + auth_url: "https://961.452.961.452/identity/v3" + username: "user_cloud3" + password: "pass_cloud3" + project_id: "66c684738f74161ad8b41cb56224b311" + domain_id: "032da590a2714eda744bd321b5356c7e" + region_name: "AnotherRegion" + cacert: "fake-ca_cloud3.crt" + interface: "public" + identity_api_version: 3 diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go index ee603bae74..4179368584 100644 --- a/pkg/csi/cinder/openstack/openstack.go +++ b/pkg/csi/cinder/openstack/openstack.go @@ -90,13 +90,16 @@ type BlockStorageOpts struct { } type Config struct { - Global client.AuthOpts + Global map[string]*client.AuthOpts Metadata metadata.Opts BlockStorage BlockStorageOpts } func logcfg(cfg Config) { - client.LogCfg(cfg.Global) + for cloudName, global := range cfg.Global { + klog.V(0).Infof("Global: \"%s\"", cloudName) + client.LogCfg(*global) + } klog.Infof("Block storage opts: %v", cfg.BlockStorage) } @@ -121,16 +124,18 @@ func GetConfigFromFiles(configFilePaths []string) (Config, error) { } } - // Update the config with data from clouds.yaml if UseClouds is enabled - if cfg.Global.UseClouds { - if cfg.Global.CloudsFile != "" { - os.Setenv("OS_CLIENT_CONFIG_FILE", cfg.Global.CloudsFile) - } - err := client.ReadClouds(&cfg.Global) - if err != nil { - return cfg, err + for _, global := range cfg.Global { + // Update the config with data from clouds.yaml if UseClouds is enabled + if global.UseClouds { + if global.CloudsFile != "" { + os.Setenv("OS_CLIENT_CONFIG_FILE", global.CloudsFile) + } + err := client.ReadClouds(global) + if err != nil { + return cfg, err + } + klog.V(5).Infof("Credentials are loaded from %s:", global.CloudsFile) } - klog.V(5).Infof("Credentials are loaded from %s:", cfg.Global.CloudsFile) } return cfg, nil @@ -138,10 +143,11 @@ func GetConfigFromFiles(configFilePaths []string) (Config, error) { const defaultMaxVolAttachLimit int64 = 256 -var OsInstance IOpenStack +var OsInstances map[string]IOpenStack var configFiles = []string{"/etc/cloud.conf"} func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) { + OsInstances = make(map[string]IOpenStack) metrics.RegisterMetrics("cinder-csi") if httpEndpoint != "" { mux := http.NewServeMux() @@ -159,8 +165,8 @@ func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) { klog.V(2).Infof("InitOpenStackProvider configFiles: %s", configFiles) } -// CreateOpenStackProvider creates Openstack Instance -func CreateOpenStackProvider() (IOpenStack, error) { +// CreateOpenStackProvider creates Openstack Instance with custom Global config param +func CreateOpenStackProvider(cloudName string) (IOpenStack, error) { // Get config from file cfg, err := GetConfigFromFiles(configFiles) if err != nil { @@ -168,15 +174,19 @@ func CreateOpenStackProvider() (IOpenStack, error) { return nil, err } logcfg(cfg) + _, cloudNameDefined := cfg.Global[cloudName] + if !cloudNameDefined { + return nil, fmt.Errorf("GetConfigFromFiles cloud name \"%s\" not found in configuration files: %s", cloudName, configFiles) + } - provider, err := client.NewOpenStackClient(&cfg.Global, "cinder-csi-plugin", userAgentData...) + provider, err := client.NewOpenStackClient(cfg.Global[cloudName], "cinder-csi-plugin", userAgentData...) if err != nil { return nil, err } epOpts := gophercloud.EndpointOpts{ - Region: cfg.Global.Region, - Availability: cfg.Global.EndpointType, + Region: cfg.Global[cloudName].Region, + Availability: cfg.Global[cloudName].EndpointType, } // Init Nova ServiceClient @@ -197,7 +207,7 @@ func CreateOpenStackProvider() (IOpenStack, error) { } // Init OpenStack - OsInstance = &OpenStack{ + OsInstances[cloudName] = &OpenStack{ compute: computeclient, blockstorage: blockstorageclient, bsOpts: cfg.BlockStorage, @@ -205,16 +215,17 @@ func CreateOpenStackProvider() (IOpenStack, error) { metadataOpts: cfg.Metadata, } - return OsInstance, nil + return OsInstances[cloudName], nil } // GetOpenStackProvider returns Openstack Instance -func GetOpenStackProvider() (IOpenStack, error) { - if OsInstance != nil { +func GetOpenStackProvider(cloudName string) (IOpenStack, error) { + OsInstance, OsInstanceDefined := OsInstances[cloudName] + if OsInstanceDefined { return OsInstance, nil } var err error - OsInstance, err = CreateOpenStackProvider() + OsInstance, err = CreateOpenStackProvider(cloudName) if err != nil { return nil, err } diff --git a/pkg/csi/cinder/openstack/openstack_test.go b/pkg/csi/cinder/openstack/openstack_test.go index e6649b3436..faed3d0fb3 100644 --- a/pkg/csi/cinder/openstack/openstack_test.go +++ b/pkg/csi/cinder/openstack/openstack_test.go @@ -24,6 +24,7 @@ import ( "github.com/gophercloud/gophercloud" "github.com/spf13/pflag" "github.com/stretchr/testify/assert" + "k8s.io/cloud-provider-openstack/pkg/client" ) var fakeFileName = "cloud.conf" @@ -37,6 +38,24 @@ var fakeRegion = "RegionOne" var fakeCAfile = "fake-ca.crt" var fakeCloudName = "openstack" +var fakeUserName_cloud2 = "user" +var fakePassword_cloud2 = "pass" +var fakeAuthURL_cloud2 = "https://169.254.169.254/identity/v3" +var fakeTenantID_cloud2 = "c869168a828847f39f7f06edd7305637" +var fakeDomainID_cloud2 = "2a73b8f597c04551a0fdc8e95544be8a" +var fakeRegion_cloud2 = "RegionTwo" +var fakeCAfile_cloud2 = "fake-ca.crt" +var fakeCloudName_cloud2 = "openstack_cloud2" + +var fakeUserName_cloud3 = "user_cloud3" +var fakePassword_cloud3 = "pass_cloud3" +var fakeAuthURL_cloud3 = "https://961.452.961.452/identity/v3" +var fakeTenantID_cloud3 = "66c684738f74161ad8b41cb56224b311" +var fakeDomainID_cloud3 = "032da590a2714eda744bd321b5356c7e" +var fakeRegion_cloud3 = "AnotherRegion" +var fakeCAfile_cloud3 = "fake-ca_cloud3.crt" +var fakeCloudName_cloud3 = "openstack_cloud3" + // Test GetConfigFromFiles func TestGetConfigFromFiles(t *testing.T) { // init file @@ -49,6 +68,22 @@ tenant-id=` + fakeTenantID + ` domain-id=` + fakeDomainID + ` ca-file=` + fakeCAfile + ` region=` + fakeRegion + ` +[Global "cloud2"] +username=` + fakeUserName_cloud2 + ` +password=` + fakePassword_cloud2 + ` +auth-url=` + fakeAuthURL_cloud2 + ` +tenant-id=` + fakeTenantID_cloud2 + ` +domain-id=` + fakeDomainID_cloud2 + ` +ca-file=` + fakeCAfile_cloud2 + ` +region=` + fakeRegion_cloud2 + ` +[Global "cloud3"] +username=` + fakeUserName_cloud3 + ` +password=` + fakePassword_cloud3 + ` +auth-url=` + fakeAuthURL_cloud3 + ` +tenant-id=` + fakeTenantID_cloud3 + ` +domain-id=` + fakeDomainID_cloud3 + ` +ca-file=` + fakeCAfile_cloud3 + ` +region=` + fakeRegion_cloud3 + ` [BlockStorage] rescan-on-resize=true` @@ -67,13 +102,36 @@ rescan-on-resize=true` // Init assert assert := assert.New(t) expectedOpts := Config{} - expectedOpts.Global.Username = fakeUserName - expectedOpts.Global.Password = fakePassword - expectedOpts.Global.DomainID = fakeDomainID - expectedOpts.Global.AuthURL = fakeAuthURL - expectedOpts.Global.CAFile = fakeCAfile - expectedOpts.Global.TenantID = fakeTenantID - expectedOpts.Global.Region = fakeRegion + expectedOpts.Global = make(map[string]*client.AuthOpts, 3) + + expectedOpts.Global[""] = &client.AuthOpts{ + Username: fakeUserName, + Password: fakePassword, + DomainID: fakeDomainID, + AuthURL: fakeAuthURL, + CAFile: fakeCAfile, + TenantID: fakeTenantID, + Region: fakeRegion, + } + expectedOpts.Global["cloud2"] = &client.AuthOpts{ + Username: fakeUserName_cloud2, + Password: fakePassword_cloud2, + DomainID: fakeDomainID_cloud2, + AuthURL: fakeAuthURL_cloud2, + CAFile: fakeCAfile_cloud2, + TenantID: fakeTenantID_cloud2, + Region: fakeRegion_cloud2, + } + expectedOpts.Global["cloud3"] = &client.AuthOpts{ + Username: fakeUserName_cloud3, + Password: fakePassword_cloud3, + DomainID: fakeDomainID_cloud3, + AuthURL: fakeAuthURL_cloud3, + CAFile: fakeCAfile_cloud3, + TenantID: fakeTenantID_cloud3, + Region: fakeRegion_cloud3, + } + expectedOpts.BlockStorage.RescanOnResize = true // Invoke GetConfigFromFiles @@ -130,6 +188,14 @@ func TestGetConfigFromFileWithUseClouds(t *testing.T) { use-clouds = true clouds-file = ` + wd + `/fixtures/clouds.yaml cloud = ` + fakeCloudName + ` +[Global "cloud2"] +use-clouds = true +clouds-file = ` + wd + `/fixtures/clouds.yaml +cloud = ` + fakeCloudName_cloud2 + ` +[Global "cloud3"] +use-clouds = true +clouds-file = ` + wd + `/fixtures/clouds.yaml +cloud = ` + fakeCloudName_cloud3 + ` [BlockStorage] rescan-on-resize=true` @@ -148,17 +214,48 @@ rescan-on-resize=true` // Init assert assert := assert.New(t) expectedOpts := Config{} - expectedOpts.Global.Username = fakeUserName - expectedOpts.Global.Password = fakePassword - expectedOpts.Global.DomainID = fakeDomainID - expectedOpts.Global.AuthURL = fakeAuthURL - expectedOpts.Global.CAFile = fakeCAfile - expectedOpts.Global.TenantID = fakeTenantID - expectedOpts.Global.Region = fakeRegion - expectedOpts.Global.EndpointType = gophercloud.AvailabilityPublic - expectedOpts.Global.UseClouds = true - expectedOpts.Global.CloudsFile = wd + "/fixtures/clouds.yaml" - expectedOpts.Global.Cloud = fakeCloudName + expectedOpts.Global = make(map[string]*client.AuthOpts, 3) + + expectedOpts.Global[""] = &client.AuthOpts{ + Username: fakeUserName, + Password: fakePassword, + DomainID: fakeDomainID, + AuthURL: fakeAuthURL, + CAFile: fakeCAfile, + TenantID: fakeTenantID, + Region: fakeRegion, + EndpointType: gophercloud.AvailabilityPublic, + UseClouds: true, + CloudsFile: wd + "/fixtures/clouds.yaml", + Cloud: fakeCloudName, + } + expectedOpts.Global["cloud2"] = &client.AuthOpts{ + Username: fakeUserName_cloud2, + Password: fakePassword_cloud2, + DomainID: fakeDomainID_cloud2, + AuthURL: fakeAuthURL_cloud2, + CAFile: fakeCAfile_cloud2, + TenantID: fakeTenantID_cloud2, + Region: fakeRegion_cloud2, + EndpointType: gophercloud.AvailabilityPublic, + UseClouds: true, + CloudsFile: wd + "/fixtures/clouds.yaml", + Cloud: fakeCloudName_cloud2, + } + expectedOpts.Global["cloud3"] = &client.AuthOpts{ + Username: fakeUserName_cloud3, + Password: fakePassword_cloud3, + DomainID: fakeDomainID_cloud3, + AuthURL: fakeAuthURL_cloud3, + CAFile: fakeCAfile_cloud3, + TenantID: fakeTenantID_cloud3, + Region: fakeRegion_cloud3, + EndpointType: gophercloud.AvailabilityPublic, + UseClouds: true, + CloudsFile: wd + "/fixtures/clouds.yaml", + Cloud: fakeCloudName_cloud3, + } + expectedOpts.BlockStorage.RescanOnResize = true // Invoke GetConfigFromFiles diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go index 5758065cde..dd74f91325 100644 --- a/pkg/csi/cinder/utils.go +++ b/pkg/csi/cinder/utils.go @@ -44,10 +44,10 @@ func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *c } //revive:disable:unexported-return -func NewControllerServer(d *Driver, cloud openstack.IOpenStack) *controllerServer { +func NewControllerServer(d *Driver, clouds map[string]openstack.IOpenStack) *controllerServer { return &controllerServer{ Driver: d, - Cloud: cloud, + Clouds: clouds, } } diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go index 4e3a2abd65..aba0d90de0 100644 --- a/tests/sanity/cinder/sanity_test.go +++ b/tests/sanity/cinder/sanity_test.go @@ -22,12 +22,14 @@ func TestDriver(t *testing.T) { d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) fakecloudprovider := getfakecloud() - openstack.OsInstance = fakecloudprovider + openstack.OsInstances = map[string]openstack.IOpenStack{ + "": fakecloudprovider, + } fakemnt := GetFakeMountProvider() fakemet := &fakemetadata{} - d.SetupControllerService(fakecloudprovider) + d.SetupControllerService(openstack.OsInstances) d.SetupNodeService(fakecloudprovider, fakemnt, fakemet) // TODO: Stop call