Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[cinder-csi-plugin] ephemeral volume removal (#2602)
Browse files Browse the repository at this point in the history
Remove openstack credits from node plugin

Signed-off-by: Serge Logvinov <[email protected]>
sergelogvinov committed Oct 9, 2024
1 parent 333a126 commit 9e96127
Showing 17 changed files with 108 additions and 338 deletions.
2 changes: 1 addition & 1 deletion charts/cinder-csi-plugin/Chart.yaml
Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@ apiVersion: v1
appVersion: v1.31.0
description: Cinder CSI Chart for OpenStack
name: openstack-cinder-csi
version: 2.31.0
version: 2.31.2
home: https://github.com/kubernetes/cloud-provider-openstack
icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png
maintainers:
Original file line number Diff line number Diff line change
@@ -173,6 +173,7 @@ spec:
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--cluster=$(CLUSTER_NAME)"
- "--provide-node-service=false"
{{- if .Values.csi.plugin.httpEndpoint.enabled }}
- "--http-endpoint=:{{ .Values.csi.plugin.httpEndpoint.port }}"
{{- end }}
Original file line number Diff line number Diff line change
@@ -91,6 +91,7 @@ spec:
- /bin/cinder-csi-plugin
- "-v={{ .Values.logVerbosityLevel }}"
- "--endpoint=$(CSI_ENDPOINT)"
- "--provide-controller-service=false"
- "--cloud-config=$(CLOUD_CONFIG)"
{{- if .Values.csi.plugin.extraArgs }}
{{- with .Values.csi.plugin.extraArgs }}
47 changes: 30 additions & 17 deletions cmd/cinder-csi-plugin/main.go
Original file line number Diff line number Diff line change
@@ -17,6 +17,7 @@ limitations under the License.
package main

import (
"fmt"
"os"

"github.com/spf13/cobra"
@@ -50,6 +51,24 @@ func main() {
Run: func(cmd *cobra.Command, args []string) {
handle()
},
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
f := cmd.Flags()

if !provideControllerService {
return nil
}

configs, err := f.GetStringSlice("cloud-config")
if err != nil {
return err
}

if len(configs) == 0 {
return fmt.Errorf("unable to mark flag cloud-config to be required")
}

return nil
},
Version: version.Version,
}

@@ -63,10 +82,7 @@ func main() {
klog.Fatalf("Unable to mark flag endpoint to be required: %v", err)
}

cmd.PersistentFlags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")
if err := cmd.MarkPersistentFlagRequired("cloud-config"); err != nil {
klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err)
}
cmd.Flags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")

cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.")
cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.")
@@ -77,6 +93,7 @@ func main() {
cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)")
cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)")
cmd.PersistentFlags().BoolVar(&noClient, "node-service-no-os-client", false, "If set to true then the CSI driver node service will not use the OpenStack client (default: false)")
cmd.PersistentFlags().MarkDeprecated("node-service-no-os-client", "This flag is deprecated and will be removed in the future. Node service do not use OpenStack credentials anymore.") //nolint:errcheck

openstack.AddExtraFlags(pflag.CommandLine)

@@ -94,7 +111,7 @@ func handle() {
var err error
clouds := make(map[string]openstack.IOpenStack)
for _, cloudName := range cloudNames {
clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName, false)
clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName)
if err != nil {
klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err)
return
@@ -105,23 +122,19 @@ func handle() {
}

if provideNodeService {
var err error
clouds := make(map[string]openstack.IOpenStack)
for _, cloudName := range cloudNames {
clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName, noClient)
if err != nil {
klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err)
return
}
}

//Initialize mount
mount := mount.GetMountProvider()

cfg, err := openstack.GetConfigFromFiles(cloudConfig)
if err != nil && !os.IsNotExist(err) {
klog.Warningf("Failed to GetConfigFromFiles: %v", err)
return
}

//Initialize Metadata
metadata := metadata.GetMetadataProvider(clouds[cloudNames[0]].GetMetadataOpts().SearchOrder)
metadata := metadata.GetMetadataProvider(cfg.Metadata.SearchOrder)

d.SetupNodeService(clouds[cloudNames[0]], mount, metadata, additionalTopologies)
d.SetupNodeService(mount, metadata, cfg.BlockStorage, additionalTopologies)
}

d.Run()
9 changes: 1 addition & 8 deletions docs/cinder-csi-plugin/using-cinder-csi-plugin.md
Original file line number Diff line number Diff line change
@@ -111,13 +111,6 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f

The default is to provide the node service.
</dd>

<dt>--node-service-no-os-client &lt;disabled&gt;</dt>
<dd>
If set to true then the CSI driver does not provide the OpenStack client in the node service.

The default is to provide the OpenStack client in the node service.
</dd>
</dl>

## Driver Config
@@ -277,7 +270,7 @@ helm install --namespace kube-system --name cinder-csi ./charts/cinder-csi-plugi
| VolumeSnapshotClass `parameters` | `type` | Empty String | `snapshot` creates a VolumeSnapshot object linked to a Cinder volume snapshot. `backup` creates a VolumeSnapshot object linked to a cinder volume backup. Defaults to `snapshot` if not defined |
| VolumeSnapshotClass `parameters` | `backup-max-duration-seconds-per-gb` | `20` | Defines the amount of time to wait for a backup to complete in seconds per GB of volume size |
| VolumeSnapshotClass `parameters` | `availability` | Same as volume | String. Backup Availability Zone |
| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes|
| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes|
| Inline Volume `VolumeAttributes` | `type` | Empty String | Name/ID of Volume type. Corresponding volume type should exist in cinder |

## Local Development
1 change: 1 addition & 0 deletions manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml
Original file line number Diff line number Diff line change
@@ -57,6 +57,7 @@ spec:
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
- "--provide-controller-service=false"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--v=1"
env:
9 changes: 8 additions & 1 deletion pkg/csi/cinder/controllerserver.go
Original file line number Diff line number Diff line change
@@ -1061,8 +1061,11 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi
func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibleTopologyReq *csi.TopologyRequirement) *csi.CreateVolumeResponse {

var volsrc *csi.VolumeContentSource
volCnx := map[string]string{}

if vol.SnapshotID != "" {
volCnx[ResizeRequired] = "true"

volsrc = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Snapshot{
Snapshot: &csi.VolumeContentSource_SnapshotSource{
@@ -1073,6 +1076,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
}

if vol.SourceVolID != "" {
volCnx[ResizeRequired] = "true"

volsrc = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Volume{
Volume: &csi.VolumeContentSource_VolumeSource{
@@ -1083,6 +1088,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
}

if vol.BackupID != nil && *vol.BackupID != "" {
volCnx[ResizeRequired] = "true"

volsrc = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Snapshot{
Snapshot: &csi.VolumeContentSource_SnapshotSource{
@@ -1113,9 +1120,9 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
CapacityBytes: int64(vol.Size * 1024 * 1024 * 1024),
AccessibleTopology: accessibleTopology,
ContentSource: volsrc,
VolumeContext: volCnx,
},
}

return resp

}
10 changes: 8 additions & 2 deletions pkg/csi/cinder/driver.go
Original file line number Diff line number Diff line change
@@ -32,6 +32,12 @@ import (
const (
driverName = "cinder.csi.openstack.org"
topologyKey = "topology." + driverName + "/zone"

// maxVolumesPerNode is the maximum number of volumes that can be attached to a node
maxVolumesPerNode = 256

// ResizeRequired parameter, if set to true, will trigger a resize on mount operation
ResizeRequired = driverName + "/resizeRequired"
)

var (
@@ -177,9 +183,9 @@ func (d *Driver) SetupControllerService(clouds map[string]openstack.IOpenStack)
d.cs = NewControllerServer(d, clouds)
}

func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata, topologies map[string]string) {
func (d *Driver) SetupNodeService(mount mount.IMount, metadata metadata.IMetadata, opts openstack.BlockStorageOpts, topologies map[string]string) {
klog.Info("Providing node service")
d.ns = NewNodeServer(d, mount, metadata, cloud, topologies)
d.ns = NewNodeServer(d, mount, metadata, opts, topologies)
}

func (d *Driver) Run() {
103 changes: 15 additions & 88 deletions pkg/csi/cinder/nodeserver.go
Original file line number Diff line number Diff line change
@@ -24,7 +24,6 @@ import (
"strings"

"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes"
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -33,7 +32,6 @@ import (

"k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack"
"k8s.io/cloud-provider-openstack/pkg/util/blockdevice"
cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors"
"k8s.io/cloud-provider-openstack/pkg/util/metadata"
"k8s.io/cloud-provider-openstack/pkg/util/mount"
mountutil "k8s.io/mount-utils"
@@ -43,7 +41,7 @@ type nodeServer struct {
Driver *Driver
Mount mount.IMount
Metadata metadata.IMetadata
Cloud openstack.IOpenStack
Opts openstack.BlockStorageOpts
Topologies map[string]string
}

@@ -163,73 +161,10 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, status.Error(codes.InvalidArgument, "[NodeUnpublishVolume] volumeID must be provided")
}

ephemeralVolume := false

vol, err := ns.Cloud.GetVolume(volumeID)
if err != nil {

if !cpoerrors.IsNotFound(err) {
return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
}

// if not found by id, try to search by name
volName := fmt.Sprintf("ephemeral-%s", volumeID)

vols, err := ns.Cloud.GetVolumesByName(volName)

//if volume not found then GetVolumesByName returns empty list
if err != nil {
return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
}
if len(vols) > 0 {
vol = &vols[0]
ephemeralVolume = true
} else {
return nil, status.Errorf(codes.NotFound, "Volume not found %s", volName)
}
}

err = ns.Mount.UnmountPath(targetPath)
if err != nil {
if err := ns.Mount.UnmountPath(targetPath); err != nil {
return nil, status.Errorf(codes.Internal, "Unmount of targetpath %s failed with error %v", targetPath, err)
}

if ephemeralVolume {
return nodeUnpublishEphemeral(req, ns, vol)
}

return &csi.NodeUnpublishVolumeResponse{}, nil

}

func nodeUnpublishEphemeral(req *csi.NodeUnpublishVolumeRequest, ns *nodeServer, vol *volumes.Volume) (*csi.NodeUnpublishVolumeResponse, error) {
volumeID := vol.ID
var instanceID string

if len(vol.Attachments) > 0 {
instanceID = vol.Attachments[0].ServerID
} else {
return nil, status.Error(codes.FailedPrecondition, "Volume attachment not found in request")
}

err := ns.Cloud.DetachVolume(instanceID, volumeID)
if err != nil {
klog.V(3).Infof("Failed to DetachVolume: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}

err = ns.Cloud.WaitDiskDetached(instanceID, volumeID)
if err != nil {
klog.V(3).Infof("Failed to WaitDiskDetached: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}

err = ns.Cloud.DeleteVolume(volumeID)
if err != nil {
klog.V(3).Infof("Failed to DeleteVolume: %v", err)
return nil, status.Error(codes.Internal, err.Error())
}

return &csi.NodeUnpublishVolumeResponse{}, nil
}

@@ -238,6 +173,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol

stagingTarget := req.GetStagingTargetPath()
volumeCapability := req.GetVolumeCapability()
volumeContext := req.GetVolumeContext()
volumeID := req.GetVolumeId()

if len(volumeID) == 0 {
@@ -251,14 +187,6 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided")
}

vol, err := ns.Cloud.GetVolume(volumeID)
if err != nil {
if cpoerrors.IsNotFound(err) {
return nil, status.Error(codes.NotFound, "Volume not found")
}
return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
}

m := ns.Mount
// Do not trust the path provided by cinder, get the real path on node
devicePath, err := getDevicePath(volumeID, m)
@@ -296,9 +224,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
}
}

// Try expanding the volume if it's created from a snapshot or another volume (see #1539)
if vol.SourceVolID != "" || vol.SnapshotID != "" {

if required, ok := volumeContext[ResizeRequired]; ok && strings.EqualFold(required, "true") {
r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec)

needResize, err := r.NeedResize(devicePath, stagingTarget)
@@ -376,12 +302,10 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
}
topology := &csi.Topology{Segments: topologyMap}

maxVolume := ns.Cloud.GetMaxVolLimit()

return &csi.NodeGetInfoResponse{
NodeId: nodeID,
AccessibleTopology: topology,
MaxVolumesPerNode: maxVolume,
MaxVolumesPerNode: ns.Opts.NodeVolumeAttachLimit,
}, nil
}

@@ -448,13 +372,16 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
if len(volumePath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume path not provided")
}
volCapability := req.GetVolumeCapability()
if volCapability != nil {
if volCapability.GetBlock() != nil {
return &csi.NodeExpandVolumeResponse{}, nil
}
}

_, err := ns.Cloud.GetVolume(volumeID)
_, err := blockdevice.IsBlockDevice(volumePath)
if err != nil {
if cpoerrors.IsNotFound(err) {
return nil, status.Errorf(codes.NotFound, "Volume with ID %s not found", volumeID)
}
return nil, status.Errorf(codes.Internal, "NodeExpandVolume failed with error %v", err)
return nil, status.Errorf(codes.NotFound, "Failed to determine device path for volumePath %s: %v", volumePath, err)
}

output, err := ns.Mount.GetMountFs(volumePath)
@@ -467,13 +394,14 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
return nil, status.Error(codes.Internal, "Unable to find Device path for volume")
}

if ns.Cloud.GetBlockStorageOpts().RescanOnResize {
if ns.Opts.RescanOnResize {
// comparing current volume size with the expected one
newSize := req.GetCapacityRange().GetRequiredBytes()
if err := blockdevice.RescanBlockDeviceGeometry(devicePath, volumePath, newSize); err != nil {
return nil, status.Errorf(codes.Internal, "Could not verify %q volume size: %v", volumeID, err)
}
}

r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec)
if _, err := r.Resize(devicePath, volumePath); err != nil {
return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err)
@@ -499,7 +427,6 @@ func getDevicePath(volumeID string, m mount.IMount) (string, error) {
}

return devicePath, nil

}

func collectMountOptions(fsType string, mntFlags []string) []string {
59 changes: 17 additions & 42 deletions pkg/csi/cinder/nodeserver_test.go
Original file line number Diff line number Diff line change
@@ -54,7 +54,12 @@ func init() {
"": omock,
}

fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""], map[string]string{})
opts := openstack.BlockStorageOpts{
RescanOnResize: false,
NodeVolumeAttachLimit: maxVolumesPerNode,
}

fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, opts, map[string]string{})
}
}

@@ -127,7 +132,7 @@ func TestNodePublishVolume(t *testing.T) {
assert.Equal(expectedRes, actualRes)
}

func TestNodePublishVolumeEphermeral(t *testing.T) {
func TestNodePublishVolumeEphemeral(t *testing.T) {

properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster}
fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID)
@@ -263,45 +268,6 @@ func TestNodeUnpublishVolume(t *testing.T) {
assert.Equal(expectedRes, actualRes)
}

func TestNodeUnpublishVolumeEphermeral(t *testing.T) {
mount.MInstance = mmock
metadata.MetadataService = metamock
osmock := map[string]openstack.IOpenStack{
"": new(openstack.OpenStackMock),
}
fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID)

mmock.On("UnmountPath", FakeTargetPath).Return(nil)
omock.On("GetVolumesByName", fvolName).Return(FakeVolList, nil)
omock.On("DetachVolume", FakeNodeID, FakeVolID).Return(nil)
omock.On("WaitDiskDetached", FakeNodeID, FakeVolID).Return(nil)
omock.On("DeleteVolume", FakeVolID).Return(nil)

d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""], map[string]string{})

// Init assert
assert := assert.New(t)

// Expected Result
expectedRes := &csi.NodeUnpublishVolumeResponse{}

// Fake request
fakeReq := &csi.NodeUnpublishVolumeRequest{
VolumeId: FakeVolID,
TargetPath: FakeTargetPath,
}

// Invoke NodeUnpublishVolume
actualRes, err := fakeNse.NodeUnpublishVolume(FakeCtx, fakeReq)
if err != nil {
t.Errorf("failed to NodeUnpublishVolume: %v", err)
}

// Assert
assert.Equal(expectedRes, actualRes)
}

// Test NodeUnstageVolume
func TestNodeUnstageVolume(t *testing.T) {

@@ -335,10 +301,19 @@ func TestNodeExpandVolume(t *testing.T) {
// Init assert
assert := assert.New(t)

// setup for test
tempDir := os.TempDir()
defer os.Remove(tempDir)
volumePath := filepath.Join(tempDir, FakeTargetPath)
err := os.MkdirAll(volumePath, 0750)
if err != nil {
t.Fatalf("Failed to set up volumepath: %v", err)
}

// Fake request
fakeReq := &csi.NodeExpandVolumeRequest{
VolumeId: FakeVolName,
VolumePath: FakeDevicePath,
VolumePath: volumePath,
}

// Expected Result
144 changes: 0 additions & 144 deletions pkg/csi/cinder/openstack/noop_openstack.go

This file was deleted.

33 changes: 4 additions & 29 deletions pkg/csi/cinder/openstack/openstack.go
Original file line number Diff line number Diff line change
@@ -144,12 +144,10 @@ func GetConfigFromFiles(configFilePaths []string) (Config, error) {
const defaultMaxVolAttachLimit int64 = 256

var OsInstances map[string]IOpenStack
var NoopInstances map[string]IOpenStack
var configFiles = []string{"/etc/cloud.conf"}

func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) {
OsInstances = make(map[string]IOpenStack)
NoopInstances = make(map[string]IOpenStack)
metrics.RegisterMetrics("cinder-csi")
if httpEndpoint != "" {
mux := http.NewServeMux()
@@ -168,7 +166,7 @@ func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) {
}

// CreateOpenStackProvider creates Openstack Instance with custom Global config param
func CreateOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error) {
func CreateOpenStackProvider(cloudName string) (IOpenStack, error) {
// Get config from file
cfg, err := GetConfigFromFiles(configFiles)
if err != nil {
@@ -177,7 +175,7 @@ func CreateOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error
}
logcfg(cfg)
global := cfg.Global[cloudName]
if global == nil && !noClient {
if global == nil {
return nil, fmt.Errorf("GetConfigFromFiles cloud name \"%s\" not found in configuration files: %s", cloudName, configFiles)
}

@@ -186,16 +184,6 @@ func CreateOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error
cfg.Metadata.SearchOrder = fmt.Sprintf("%s,%s", metadata.ConfigDriveID, metadata.MetadataID)
}

if noClient {
// Init OpenStack
NoopInstances[cloudName] = &NoopOpenStack{
bsOpts: cfg.BlockStorage,
metadataOpts: cfg.Metadata,
}

return NoopInstances[cloudName], nil
}

provider, err := client.NewOpenStackClient(global, "cinder-csi-plugin", userAgentData...)
if err != nil {
return nil, err
@@ -231,25 +219,12 @@ func CreateOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error
}

// GetOpenStackProvider returns Openstack Instance
func GetOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error) {
if noClient {
NoopInstance, NoopInstanceDefined := NoopInstances[cloudName]
if NoopInstanceDefined {
return NoopInstance, nil
}
NoopInstance, err := CreateOpenStackProvider(cloudName, noClient)
if err != nil {
return nil, err
}

return NoopInstance, nil
}

func GetOpenStackProvider(cloudName string) (IOpenStack, error) {
OsInstance, OsInstanceDefined := OsInstances[cloudName]
if OsInstanceDefined {
return OsInstance, nil
}
OsInstance, err := CreateOpenStackProvider(cloudName, noClient)
OsInstance, err := CreateOpenStackProvider(cloudName)
if err != nil {
return nil, err
}
4 changes: 2 additions & 2 deletions pkg/csi/cinder/openstack/openstack_volumes.go
Original file line number Diff line number Diff line change
@@ -237,7 +237,7 @@ func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error
})

if wait.Interrupted(err) {
err = fmt.Errorf("Volume %q failed to be attached within the alloted time", volumeID)
err = fmt.Errorf("Volume %q failed to be attached within the allotted time", volumeID)
}

return err
@@ -325,7 +325,7 @@ func (os *OpenStack) WaitDiskDetached(instanceID string, volumeID string) error
})

if wait.Interrupted(err) {
err = fmt.Errorf("Volume %q failed to detach within the alloted time", volumeID)
err = fmt.Errorf("Volume %q failed to detach within the allotted time", volumeID)
}

return err
8 changes: 6 additions & 2 deletions pkg/csi/cinder/utils.go
Original file line number Diff line number Diff line change
@@ -57,13 +57,17 @@ func NewIdentityServer(d *Driver) *identityServer {
}
}

func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, cloud openstack.IOpenStack, topologies map[string]string) *nodeServer {
func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, opts openstack.BlockStorageOpts, topologies map[string]string) *nodeServer {
if opts.NodeVolumeAttachLimit < 0 || opts.NodeVolumeAttachLimit > maxVolumesPerNode {
opts.NodeVolumeAttachLimit = maxVolumesPerNode
}

return &nodeServer{
Driver: d,
Mount: mount,
Metadata: metadata,
Cloud: cloud,
Topologies: topologies,
Opts: opts,
}
}

4 changes: 4 additions & 0 deletions pkg/util/blockdevice/blockdevice_unsupported.go
Original file line number Diff line number Diff line change
@@ -34,3 +34,7 @@ func GetBlockDeviceSize(path string) (int64, error) {
func RescanBlockDeviceGeometry(devicePath string, deviceMountPath string, newSize int64) error {
return errors.New("RescanBlockDeviceGeometry is not implemented for this OS")
}

func RescanDevice(devicePath string) error {
return errors.New("RescanDevice is not implemented for this OS")
}
5 changes: 4 additions & 1 deletion pkg/util/metadata/metadata.go
Original file line number Diff line number Diff line change
@@ -105,8 +105,11 @@ type IMetadata interface {

// GetMetadataProvider retrieves instance of IMetadata
func GetMetadataProvider(order string) IMetadata {

if MetadataService == nil {
if len(order) == 0 {
order = fmt.Sprintf("%s,%s", ConfigDriveID, MetadataID)
}

MetadataService = &metadataService{searchOrder: order}
}
return MetadataService
6 changes: 5 additions & 1 deletion tests/sanity/cinder/sanity_test.go
Original file line number Diff line number Diff line change
@@ -28,9 +28,13 @@ func TestDriver(t *testing.T) {

fakemnt := GetFakeMountProvider()
fakemet := &fakemetadata{}
fakeOpts := openstack.BlockStorageOpts{
RescanOnResize: false,
NodeVolumeAttachLimit: 200,
}

d.SetupControllerService(openstack.OsInstances)
d.SetupNodeService(fakecloudprovider, fakemnt, fakemet, map[string]string{})
d.SetupNodeService(fakemnt, fakemet, fakeOpts, map[string]string{})

// TODO: Stop call

0 comments on commit 9e96127

Please sign in to comment.