diff --git a/charts/cinder-csi-plugin/Chart.yaml b/charts/cinder-csi-plugin/Chart.yaml
index 980f10fb0e..376efab11a 100644
--- a/charts/cinder-csi-plugin/Chart.yaml
+++ b/charts/cinder-csi-plugin/Chart.yaml
@@ -2,7 +2,7 @@ apiVersion: v1
appVersion: v1.31.0
description: Cinder CSI Chart for OpenStack
name: openstack-cinder-csi
-version: 2.31.0
+version: 2.31.2
home: https://github.com/kubernetes/cloud-provider-openstack
icon: https://github.com/kubernetes/kubernetes/blob/master/logo/logo.png
maintainers:
diff --git a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml
index 542c179f5a..03cc4f1b2e 100644
--- a/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml
+++ b/charts/cinder-csi-plugin/templates/controllerplugin-deployment.yaml
@@ -173,6 +173,7 @@ spec:
- "--endpoint=$(CSI_ENDPOINT)"
- "--cloud-config=$(CLOUD_CONFIG)"
- "--cluster=$(CLUSTER_NAME)"
+ - "--provide-node-service=false"
{{- if .Values.csi.plugin.httpEndpoint.enabled }}
- "--http-endpoint=:{{ .Values.csi.plugin.httpEndpoint.port }}"
{{- end }}
diff --git a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml
index e55e8d69ea..4fef26eba9 100644
--- a/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml
+++ b/charts/cinder-csi-plugin/templates/nodeplugin-daemonset.yaml
@@ -91,7 +91,8 @@ spec:
- /bin/cinder-csi-plugin
- "-v={{ .Values.logVerbosityLevel }}"
- "--endpoint=$(CSI_ENDPOINT)"
- - "--cloud-config=$(CLOUD_CONFIG)"
+ - "--provide-controller-service=false"
+ # - "--cloud-config=$(CLOUD_CONFIG)"
{{- if .Values.csi.plugin.extraArgs }}
{{- with .Values.csi.plugin.extraArgs }}
{{- tpl . $ | trim | nindent 12 }}
@@ -100,8 +101,8 @@ spec:
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- - name: CLOUD_CONFIG
- value: /etc/kubernetes/{{ .Values.secret.filename }}
+ # - name: CLOUD_CONFIG
+ # value: /etc/kubernetes/{{ .Values.secret.filename }}
{{- if .Values.csi.plugin.extraEnv }}
{{- toYaml .Values.csi.plugin.extraEnv | nindent 12 }}
{{- end }}
diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go
index e177b46413..abc790e894 100644
--- a/cmd/cinder-csi-plugin/main.go
+++ b/cmd/cinder-csi-plugin/main.go
@@ -17,6 +17,7 @@ limitations under the License.
package main
import (
+ "fmt"
"os"
"github.com/spf13/cobra"
@@ -31,16 +32,20 @@ import (
)
var (
- endpoint string
- nodeID string
- cloudConfig []string
- cloudNames []string
- additionalTopologies map[string]string
- cluster string
- httpEndpoint string
+ endpoint string
+ nodeID string
+ cloudConfig []string
+ cloudNames []string
+ additionalTopologies map[string]string
+ cluster string
+ httpEndpoint string
+
+ searchOrder string
+ rescanOnResize bool
+ nodeVolumeAttachLimit int64
+
provideControllerService bool
provideNodeService bool
- noClient bool
)
func main() {
@@ -50,6 +55,24 @@ func main() {
Run: func(cmd *cobra.Command, args []string) {
handle()
},
+ PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
+ f := cmd.Flags()
+
+ if !provideControllerService {
+ return nil
+ }
+
+ configs, err := f.GetStringSlice("cloud-config")
+ if err != nil {
+ return err
+ }
+
+ if len(configs) == 0 {
+ return fmt.Errorf("unable to mark flag cloud-config to be required")
+ }
+
+ return nil
+ },
Version: version.Version,
}
@@ -63,10 +86,7 @@ func main() {
klog.Fatalf("Unable to mark flag endpoint to be required: %v", err)
}
- cmd.PersistentFlags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")
- if err := cmd.MarkPersistentFlagRequired("cloud-config"); err != nil {
- klog.Fatalf("Unable to mark flag cloud-config to be required: %v", err)
- }
+ cmd.Flags().StringSliceVar(&cloudConfig, "cloud-config", nil, "CSI driver cloud config. This option can be given multiple times")
cmd.PersistentFlags().StringSliceVar(&cloudNames, "cloud-name", []string{""}, "Cloud name to instruct CSI driver to read additional OpenStack cloud credentials from the configuration subsections. This option can be specified multiple times to manage multiple OpenStack clouds.")
cmd.PersistentFlags().StringToStringVar(&additionalTopologies, "additional-topology", map[string]string{}, "Additional CSI driver topology keys, for example topology.kubernetes.io/region=REGION1. This option can be specified multiple times to add multiple additional topology keys.")
@@ -76,7 +96,12 @@ func main() {
cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)")
cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)")
- cmd.PersistentFlags().BoolVar(&noClient, "node-service-no-os-client", false, "If set to true then the CSI driver node service will not use the OpenStack client (default: false)")
+
+ cmd.PersistentFlags().StringVar(&searchOrder, "search-order", "configDrive,metadataService", "The search order for metadata service")
+
+ // Node specific flags
+ cmd.PersistentFlags().BoolVar(&rescanOnResize, "rescan-on-resize", false, "If set to true then the CSI driver will rescan the device on volume resize")
+ cmd.PersistentFlags().Int64Var(&nodeVolumeAttachLimit, "node-volume-attach-limit", 256, "The maximum number of volumes that can be attached to a node")
openstack.AddExtraFlags(pflag.CommandLine)
@@ -94,7 +119,7 @@ func handle() {
var err error
clouds := make(map[string]openstack.IOpenStack)
for _, cloudName := range cloudNames {
- clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName, false)
+ clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName)
if err != nil {
klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err)
return
@@ -105,23 +130,36 @@ func handle() {
}
if provideNodeService {
- var err error
- clouds := make(map[string]openstack.IOpenStack)
- for _, cloudName := range cloudNames {
- clouds[cloudName], err = openstack.GetOpenStackProvider(cloudName, noClient)
- if err != nil {
- klog.Warningf("Failed to GetOpenStackProvider %s: %v", cloudName, err)
+ //Initialize mount
+ mount := mount.GetMountProvider()
+
+ //Backward compatibility, read [BlockStorage] parameters from cloud config
+ cfg, err := openstack.GetConfigFromFiles(cloudConfig)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ klog.Warningf("Failed to GetConfigFromFiles: %v", err)
return
}
}
- //Initialize mount
- mount := mount.GetMountProvider()
+ if cfg.Metadata.SearchOrder != "" {
+ searchOrder = cfg.Metadata.SearchOrder
+ }
+
+ opts := openstack.BlockStorageOpts{}
+
+ if cfg.BlockStorage.RescanOnResize {
+ opts.RescanOnResize = cfg.BlockStorage.RescanOnResize
+ }
+
+ if cfg.BlockStorage.NodeVolumeAttachLimit < nodeVolumeAttachLimit {
+ opts.NodeVolumeAttachLimit = cfg.BlockStorage.NodeVolumeAttachLimit
+ }
//Initialize Metadata
- metadata := metadata.GetMetadataProvider(clouds[cloudNames[0]].GetMetadataOpts().SearchOrder)
+ metadata := metadata.GetMetadataProvider(searchOrder)
- d.SetupNodeService(clouds[cloudNames[0]], mount, metadata, additionalTopologies)
+ d.SetupNodeService(mount, metadata, opts, additionalTopologies)
}
d.Run()
diff --git a/docs/cinder-csi-plugin/features.md b/docs/cinder-csi-plugin/features.md
index b033ff791f..578d9f1a49 100644
--- a/docs/cinder-csi-plugin/features.md
+++ b/docs/cinder-csi-plugin/features.md
@@ -24,7 +24,7 @@
Dynamic Provisioning uses persistence volume claim (PVC) to request the Kubernetes to create the Cinder volume on behalf of user and consumes the volume from inside container.
-For usage, refer [sample app](./examples.md#dynamic-volume-provisioning)
+For usage, refer [sample app](./examples.md#dynamic-volume-provisioning)
## Topology
@@ -35,7 +35,7 @@ This feature enables driver to consider the topology constraints while creating
`topology.cinder.csi.openstack.org/zone` : Availability by Zone
* `allowedTopologies` can be specified in storage class to restrict the topology of provisioned volumes to specific zones and should be used as replacement of `availability` parameter.
* To disable: set `--feature-gates=Topology=false` in external-provisioner (container `csi-provisioner` of `csi-cinder-controllerplugin`).
- * If using Helm, it can be disabled by setting `Values.csi.provisioner.topology: "false"`
+ * If using Helm, it can be disabled by setting `Values.csi.provisioner.topology: "false"`
For usage, refer [sample app](./examples.md#use-topology)
@@ -51,7 +51,7 @@ For usage, refer [sample app](./examples.md#using-block-volume)
## Volume Expansion
-Driver supports both `Offline` and `Online` resize of cinder volumes. Cinder online resize support is available since cinder 3.42 microversion.
+Driver supports both `Offline` and `Online` resize of cinder volumes. Cinder online resize support is available since cinder 3.42 microversion.
The same should be supported by underlying OpenStack Cloud to avail the feature.
* As of kubernetes v1.16, Volume Expansion is a beta feature and enabled by default.
@@ -60,7 +60,7 @@ The same should be supported by underlying OpenStack Cloud to avail the feature.
### Rescan on in-use volume resize
-Some hypervizors (like VMware) don't automatically send a new volume size to a Linux kernel, when a volume is in-use. Sending a "1" to `/sys/class/block/XXX/device/rescan` is telling the SCSI block device to refresh it's information about where it's ending boundary is (among other things) to give the kernel information about it's updated size. When a `rescan-on-resize` flag is set in a CSI node driver cloud-config `[BlockStorage]` section, a CSI node driver will rescan block device and verify its size before expanding the filesystem. CSI driver will raise an error, when expected volume size cannot be detected.
+Some hypervizors (like VMware) don't automatically send a new volume size to a Linux kernel, when a volume is in-use. Sending a "1" to `/sys/class/block/XXX/device/rescan` is telling the SCSI block device to refresh it's information about where it's ending boundary is (among other things) to give the kernel information about it's updated size. When a `rescan-on-resize` flag is set in a CSI node driver cloud-config `[BlockStorage]` section or through the CLI parameter `--rescan-on-resize`, a CSI node driver will rescan block device and verify its size before expanding the filesystem. CSI driver will raise an error, when expected volume size cannot be detected.
Not all hypervizors have a `/sys/class/block/XXX/device/rescan` location, therefore if you enable this option and your hypervizor doesn't support this, you'll get a warning log on resize event. It is recommended to disable this option in this case.
@@ -81,7 +81,7 @@ Two different Kubernetes features allow volumes to follow the Pod's lifecycle: C
This feature allows CSI volumes to be directly embedded in the Pod specification instead of a PersistentVolume. Volumes specified in this way are ephemeral and do not persist across Pod restarts.
-* As of Kubernetes v1.16 this feature is beta so enabled by default.
+* As of Kubernetes v1.16 this feature is beta so enabled by default.
* To enable this feature for CSI Driver, `volumeLifecycleModes` needs to be specified in [CSIDriver](../../manifests/cinder-csi-plugin/csi-cinder-driver.yaml) object. The driver can run in `Persistent` mode, `Ephemeral` or in both modes.
* `podInfoOnMount` must be `true` to use this feature.
* For usage, refer [sample app](./examples.md#deploy-app-using-inline-volumes)
diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
index 3fc6867e3b..33bde5c6f7 100644
--- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
+++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md
@@ -111,13 +111,6 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f
The default is to provide the node service.
-
-
--node-service-no-os-client <disabled>
-
- If set to true then the CSI driver does not provide the OpenStack client in the node service.
-
- The default is to provide the OpenStack client in the node service.
-
## Driver Config
@@ -138,7 +131,7 @@ The following sections are supported in configuration file.
For Cinder CSI Plugin to authenticate with OpenStack Keystone, required parameters needs to be passed in `[Global]` section of the file. For all supported parameters, please refer [Global](../openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#global) section.
### Block Storage
-These configuration options pertain to block storage and should appear in the `[BlockStorage]` section of the `$CLOUD_CONFIG` file.
+These configuration options pertain to block storage and should appear in the `[BlockStorage]` section of the `$CLOUD_CONFIG` file. Some parameters can be passed as command line arguments as well.
* `node-volume-attach-limit`
Optional. To configure maximum volumes that can be attached to the node. Its default value is `256`.
@@ -277,7 +270,7 @@ helm install --namespace kube-system --name cinder-csi ./charts/cinder-csi-plugi
| VolumeSnapshotClass `parameters` | `type` | Empty String | `snapshot` creates a VolumeSnapshot object linked to a Cinder volume snapshot. `backup` creates a VolumeSnapshot object linked to a cinder volume backup. Defaults to `snapshot` if not defined |
| VolumeSnapshotClass `parameters` | `backup-max-duration-seconds-per-gb` | `20` | Defines the amount of time to wait for a backup to complete in seconds per GB of volume size |
| VolumeSnapshotClass `parameters` | `availability` | Same as volume | String. Backup Availability Zone |
-| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes|
+| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes|
| Inline Volume `VolumeAttributes` | `type` | Empty String | Name/ID of Volume type. Corresponding volume type should exist in cinder |
## Local Development
diff --git a/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml b/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml
index e6edfadd4f..c43484bc87 100644
--- a/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml
+++ b/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml
@@ -57,13 +57,14 @@ spec:
args:
- /bin/cinder-csi-plugin
- "--endpoint=$(CSI_ENDPOINT)"
- - "--cloud-config=$(CLOUD_CONFIG)"
+ - "--provide-controller-service=false"
+ # - "--cloud-config=$(CLOUD_CONFIG)"
- "--v=1"
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- - name: CLOUD_CONFIG
- value: /etc/config/cloud.conf
+ # - name: CLOUD_CONFIG
+ # value: /etc/config/cloud.conf
imagePullPolicy: "IfNotPresent"
ports:
- containerPort: 9808
@@ -87,9 +88,9 @@ spec:
- name: pods-probe-dir
mountPath: /dev
mountPropagation: "HostToContainer"
- - name: secret-cinderplugin
- mountPath: /etc/config
- readOnly: true
+ # - name: secret-cinderplugin
+ # mountPath: /etc/config
+ # readOnly: true
# - name: cacert
# mountPath: /etc/cacert
# readOnly: true
@@ -110,9 +111,9 @@ spec:
hostPath:
path: /dev
type: Directory
- - name: secret-cinderplugin
- secret:
- secretName: cloud-config
+ # - name: secret-cinderplugin
+ # secret:
+ # secretName: cloud-config
# - name: cacert
# hostPath:
# path: /etc/cacert
diff --git a/pkg/csi/cinder/controllerserver.go b/pkg/csi/cinder/controllerserver.go
index a2d42eb21a..0a18c04bcb 100644
--- a/pkg/csi/cinder/controllerserver.go
+++ b/pkg/csi/cinder/controllerserver.go
@@ -1061,8 +1061,11 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi
func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibleTopologyReq *csi.TopologyRequirement) *csi.CreateVolumeResponse {
var volsrc *csi.VolumeContentSource
+ resizeRequired := false
if vol.SnapshotID != "" {
+ resizeRequired = true
+
volsrc = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Snapshot{
Snapshot: &csi.VolumeContentSource_SnapshotSource{
@@ -1073,6 +1076,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
}
if vol.SourceVolID != "" {
+ resizeRequired = true
+
volsrc = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Volume{
Volume: &csi.VolumeContentSource_VolumeSource{
@@ -1083,6 +1088,8 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
}
if vol.BackupID != nil && *vol.BackupID != "" {
+ resizeRequired = true
+
volsrc = &csi.VolumeContentSource{
Type: &csi.VolumeContentSource_Snapshot{
Snapshot: &csi.VolumeContentSource_SnapshotSource{
@@ -1107,12 +1114,18 @@ func getCreateVolumeResponse(vol *volumes.Volume, ignoreVolumeAZ bool, accessibl
}
}
+ volCnx := map[string]string{}
+ if resizeRequired {
+ volCnx[ResizeRequired] = "true"
+ }
+
resp := &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: vol.ID,
CapacityBytes: int64(vol.Size * 1024 * 1024 * 1024),
AccessibleTopology: accessibleTopology,
ContentSource: volsrc,
+ VolumeContext: volCnx,
},
}
diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go
index 773b98a8cc..28590021e6 100644
--- a/pkg/csi/cinder/driver.go
+++ b/pkg/csi/cinder/driver.go
@@ -32,6 +32,12 @@ import (
const (
driverName = "cinder.csi.openstack.org"
topologyKey = "topology." + driverName + "/zone"
+
+ // maxVolumesPerNode is the maximum number of volumes that can be attached to a node
+ maxVolumesPerNode = 256
+
+ // ResizeRequired parameter, if set to true, will trigger a resize on mount operation
+ ResizeRequired = "resizeRequired"
)
var (
@@ -177,9 +183,9 @@ func (d *Driver) SetupControllerService(clouds map[string]openstack.IOpenStack)
d.cs = NewControllerServer(d, clouds)
}
-func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata, topologies map[string]string) {
+func (d *Driver) SetupNodeService(mount mount.IMount, metadata metadata.IMetadata, opts openstack.BlockStorageOpts, topologies map[string]string) {
klog.Info("Providing node service")
- d.ns = NewNodeServer(d, mount, metadata, cloud, topologies)
+ d.ns = NewNodeServer(d, mount, metadata, opts, topologies)
}
func (d *Driver) Run() {
diff --git a/pkg/csi/cinder/nodeserver.go b/pkg/csi/cinder/nodeserver.go
index bfa966c741..96850ed590 100644
--- a/pkg/csi/cinder/nodeserver.go
+++ b/pkg/csi/cinder/nodeserver.go
@@ -24,7 +24,6 @@ import (
"strings"
"github.com/container-storage-interface/spec/lib/go/csi"
- "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes"
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -33,7 +32,6 @@ import (
"k8s.io/cloud-provider-openstack/pkg/csi/cinder/openstack"
"k8s.io/cloud-provider-openstack/pkg/util/blockdevice"
- cpoerrors "k8s.io/cloud-provider-openstack/pkg/util/errors"
"k8s.io/cloud-provider-openstack/pkg/util/metadata"
"k8s.io/cloud-provider-openstack/pkg/util/mount"
mountutil "k8s.io/mount-utils"
@@ -43,7 +41,7 @@ type nodeServer struct {
Driver *Driver
Mount mount.IMount
Metadata metadata.IMetadata
- Cloud openstack.IOpenStack
+ Opts openstack.BlockStorageOpts
Topologies map[string]string
}
@@ -163,73 +161,10 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, status.Error(codes.InvalidArgument, "[NodeUnpublishVolume] volumeID must be provided")
}
- ephemeralVolume := false
-
- vol, err := ns.Cloud.GetVolume(volumeID)
- if err != nil {
-
- if !cpoerrors.IsNotFound(err) {
- return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
- }
-
- // if not found by id, try to search by name
- volName := fmt.Sprintf("ephemeral-%s", volumeID)
-
- vols, err := ns.Cloud.GetVolumesByName(volName)
-
- //if volume not found then GetVolumesByName returns empty list
- if err != nil {
- return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
- }
- if len(vols) > 0 {
- vol = &vols[0]
- ephemeralVolume = true
- } else {
- return nil, status.Errorf(codes.NotFound, "Volume not found %s", volName)
- }
- }
-
- err = ns.Mount.UnmountPath(targetPath)
- if err != nil {
+ if err := ns.Mount.UnmountPath(targetPath); err != nil {
return nil, status.Errorf(codes.Internal, "Unmount of targetpath %s failed with error %v", targetPath, err)
}
- if ephemeralVolume {
- return nodeUnpublishEphemeral(req, ns, vol)
- }
-
- return &csi.NodeUnpublishVolumeResponse{}, nil
-
-}
-
-func nodeUnpublishEphemeral(req *csi.NodeUnpublishVolumeRequest, ns *nodeServer, vol *volumes.Volume) (*csi.NodeUnpublishVolumeResponse, error) {
- volumeID := vol.ID
- var instanceID string
-
- if len(vol.Attachments) > 0 {
- instanceID = vol.Attachments[0].ServerID
- } else {
- return nil, status.Error(codes.FailedPrecondition, "Volume attachment not found in request")
- }
-
- err := ns.Cloud.DetachVolume(instanceID, volumeID)
- if err != nil {
- klog.V(3).Infof("Failed to DetachVolume: %v", err)
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- err = ns.Cloud.WaitDiskDetached(instanceID, volumeID)
- if err != nil {
- klog.V(3).Infof("Failed to WaitDiskDetached: %v", err)
- return nil, status.Error(codes.Internal, err.Error())
- }
-
- err = ns.Cloud.DeleteVolume(volumeID)
- if err != nil {
- klog.V(3).Infof("Failed to DeleteVolume: %v", err)
- return nil, status.Error(codes.Internal, err.Error())
- }
-
return &csi.NodeUnpublishVolumeResponse{}, nil
}
@@ -238,6 +173,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
stagingTarget := req.GetStagingTargetPath()
volumeCapability := req.GetVolumeCapability()
+ volumeContext := req.GetVolumeContext()
volumeID := req.GetVolumeId()
if len(volumeID) == 0 {
@@ -251,14 +187,6 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
return nil, status.Error(codes.InvalidArgument, "NodeStageVolume Volume Capability must be provided")
}
- vol, err := ns.Cloud.GetVolume(volumeID)
- if err != nil {
- if cpoerrors.IsNotFound(err) {
- return nil, status.Error(codes.NotFound, "Volume not found")
- }
- return nil, status.Errorf(codes.Internal, "GetVolume failed with error %v", err)
- }
-
m := ns.Mount
// Do not trust the path provided by cinder, get the real path on node
devicePath, err := getDevicePath(volumeID, m)
@@ -296,9 +224,7 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
}
}
- // Try expanding the volume if it's created from a snapshot or another volume (see #1539)
- if vol.SourceVolID != "" || vol.SnapshotID != "" {
-
+ if required, ok := volumeContext[ResizeRequired]; ok && strings.EqualFold(required, "true") {
r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec)
needResize, err := r.NeedResize(devicePath, stagingTarget)
@@ -376,12 +302,10 @@ func (ns *nodeServer) NodeGetInfo(ctx context.Context, req *csi.NodeGetInfoReque
}
topology := &csi.Topology{Segments: topologyMap}
- maxVolume := ns.Cloud.GetMaxVolLimit()
-
return &csi.NodeGetInfoResponse{
NodeId: nodeID,
AccessibleTopology: topology,
- MaxVolumesPerNode: maxVolume,
+ MaxVolumesPerNode: ns.Opts.NodeVolumeAttachLimit,
}, nil
}
@@ -448,13 +372,16 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
if len(volumePath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume path not provided")
}
+ volCapability := req.GetVolumeCapability()
+ if volCapability != nil {
+ if volCapability.GetBlock() != nil {
+ return &csi.NodeExpandVolumeResponse{}, nil
+ }
+ }
- _, err := ns.Cloud.GetVolume(volumeID)
+ _, err := blockdevice.IsBlockDevice(volumePath)
if err != nil {
- if cpoerrors.IsNotFound(err) {
- return nil, status.Errorf(codes.NotFound, "Volume with ID %s not found", volumeID)
- }
- return nil, status.Errorf(codes.Internal, "NodeExpandVolume failed with error %v", err)
+ return nil, status.Errorf(codes.NotFound, "Failed to determine device path for volumePath %s: %v", volumePath, err)
}
output, err := ns.Mount.GetMountFs(volumePath)
@@ -467,13 +394,14 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
return nil, status.Error(codes.Internal, "Unable to find Device path for volume")
}
- if ns.Cloud.GetBlockStorageOpts().RescanOnResize {
+ if ns.Opts.RescanOnResize {
// comparing current volume size with the expected one
newSize := req.GetCapacityRange().GetRequiredBytes()
if err := blockdevice.RescanBlockDeviceGeometry(devicePath, volumePath, newSize); err != nil {
return nil, status.Errorf(codes.Internal, "Could not verify %q volume size: %v", volumeID, err)
}
}
+
r := mountutil.NewResizeFs(ns.Mount.Mounter().Exec)
if _, err := r.Resize(devicePath, volumePath); err != nil {
return nil, status.Errorf(codes.Internal, "Could not resize volume %q: %v", volumeID, err)
@@ -499,7 +427,6 @@ func getDevicePath(volumeID string, m mount.IMount) (string, error) {
}
return devicePath, nil
-
}
func collectMountOptions(fsType string, mntFlags []string) []string {
diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go
index f0cbc1b904..2284d15807 100644
--- a/pkg/csi/cinder/nodeserver_test.go
+++ b/pkg/csi/cinder/nodeserver_test.go
@@ -54,7 +54,12 @@ func init() {
"": omock,
}
- fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstances[""], map[string]string{})
+ opts := openstack.BlockStorageOpts{
+ RescanOnResize: false,
+ NodeVolumeAttachLimit: maxVolumesPerNode,
+ }
+
+ fakeNs = NewNodeServer(d, mount.MInstance, metadata.MetadataService, opts, map[string]string{})
}
}
@@ -127,7 +132,7 @@ func TestNodePublishVolume(t *testing.T) {
assert.Equal(expectedRes, actualRes)
}
-func TestNodePublishVolumeEphermeral(t *testing.T) {
+func TestNodePublishVolumeEphemeral(t *testing.T) {
properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster}
fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID)
@@ -263,45 +268,6 @@ func TestNodeUnpublishVolume(t *testing.T) {
assert.Equal(expectedRes, actualRes)
}
-func TestNodeUnpublishVolumeEphermeral(t *testing.T) {
- mount.MInstance = mmock
- metadata.MetadataService = metamock
- osmock := map[string]openstack.IOpenStack{
- "": new(openstack.OpenStackMock),
- }
- fvolName := fmt.Sprintf("ephemeral-%s", FakeVolID)
-
- mmock.On("UnmountPath", FakeTargetPath).Return(nil)
- omock.On("GetVolumesByName", fvolName).Return(FakeVolList, nil)
- omock.On("DetachVolume", FakeNodeID, FakeVolID).Return(nil)
- omock.On("WaitDiskDetached", FakeNodeID, FakeVolID).Return(nil)
- omock.On("DeleteVolume", FakeVolID).Return(nil)
-
- d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster})
- fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, osmock[""], map[string]string{})
-
- // Init assert
- assert := assert.New(t)
-
- // Expected Result
- expectedRes := &csi.NodeUnpublishVolumeResponse{}
-
- // Fake request
- fakeReq := &csi.NodeUnpublishVolumeRequest{
- VolumeId: FakeVolID,
- TargetPath: FakeTargetPath,
- }
-
- // Invoke NodeUnpublishVolume
- actualRes, err := fakeNse.NodeUnpublishVolume(FakeCtx, fakeReq)
- if err != nil {
- t.Errorf("failed to NodeUnpublishVolume: %v", err)
- }
-
- // Assert
- assert.Equal(expectedRes, actualRes)
-}
-
// Test NodeUnstageVolume
func TestNodeUnstageVolume(t *testing.T) {
@@ -335,10 +301,19 @@ func TestNodeExpandVolume(t *testing.T) {
// Init assert
assert := assert.New(t)
+ // setup for test
+ tempDir := os.TempDir()
+ defer os.Remove(tempDir)
+ volumePath := filepath.Join(tempDir, FakeTargetPath)
+ err := os.MkdirAll(volumePath, 0750)
+ if err != nil {
+ t.Fatalf("Failed to set up volumepath: %v", err)
+ }
+
// Fake request
fakeReq := &csi.NodeExpandVolumeRequest{
VolumeId: FakeVolName,
- VolumePath: FakeDevicePath,
+ VolumePath: volumePath,
}
// Expected Result
diff --git a/pkg/csi/cinder/openstack/noop_openstack.go b/pkg/csi/cinder/openstack/noop_openstack.go
deleted file mode 100644
index c2f563c0b2..0000000000
--- a/pkg/csi/cinder/openstack/noop_openstack.go
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
-Copyright 2024 The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package openstack
-
-import (
- "fmt"
-
- "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/backups"
- "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/snapshots"
- "github.com/gophercloud/gophercloud/v2/openstack/blockstorage/v3/volumes"
- "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers"
- "k8s.io/cloud-provider-openstack/pkg/util/metadata"
-)
-
-type NoopOpenStack struct {
- bsOpts BlockStorageOpts
- metadataOpts metadata.Opts
-}
-
-func (os *NoopOpenStack) CreateVolume(name string, size int, vtype, availability string, snapshotID string, sourceVolID string, sourceBackupID string, tags map[string]string) (*volumes.Volume, error) {
- return nil, fmt.Errorf("CreateVolume is not implemented for ephemeral storage in this configuration")
-}
-
-func (os *NoopOpenStack) ListVolumes(limit int, startingToken string) ([]volumes.Volume, string, error) {
- return nil, "", nil
-}
-
-func (os *NoopOpenStack) GetVolumesByName(n string) ([]volumes.Volume, error) {
- return nil, nil
-}
-
-func (os *NoopOpenStack) DeleteVolume(volumeID string) error {
- return nil
-}
-
-func (os *NoopOpenStack) GetVolume(volumeID string) (*volumes.Volume, error) {
- return &volumes.Volume{ID: volumeID}, nil
-}
-
-func (os *NoopOpenStack) AttachVolume(instanceID, volumeID string) (string, error) {
- return volumeID, nil
-}
-
-func (os *NoopOpenStack) WaitDiskAttached(instanceID string, volumeID string) error {
- return nil
-}
-
-func (os *NoopOpenStack) WaitVolumeTargetStatus(volumeID string, tStatus []string) error {
- return nil
-}
-
-func (os *NoopOpenStack) DetachVolume(instanceID, volumeID string) error {
- return nil
-}
-
-func (os *NoopOpenStack) WaitDiskDetached(instanceID string, volumeID string) error {
- return nil
-}
-
-func (os *NoopOpenStack) GetAttachmentDiskPath(instanceID, volumeID string) (string, error) {
- return "", nil
-}
-
-func (os *NoopOpenStack) ExpandVolume(volumeID string, status string, newSize int) error {
- return nil
-}
-
-func (os *NoopOpenStack) GetMaxVolLimit() int64 {
- if os.bsOpts.NodeVolumeAttachLimit > 0 && os.bsOpts.NodeVolumeAttachLimit <= 256 {
- return os.bsOpts.NodeVolumeAttachLimit
- }
-
- return defaultMaxVolAttachLimit
-}
-
-func (os *NoopOpenStack) GetBlockStorageOpts() BlockStorageOpts {
- return os.bsOpts
-}
-
-func (os *NoopOpenStack) GetMetadataOpts() metadata.Opts {
- return os.metadataOpts
-}
-
-func (os *NoopOpenStack) CreateBackup(name, volID, snapshotID, availabilityZone string, tags map[string]string) (*backups.Backup, error) {
- return &backups.Backup{}, nil
-}
-
-func (os *NoopOpenStack) BackupsAreEnabled() (bool, error) {
- return false, nil
-}
-
-func (os *NoopOpenStack) DeleteBackup(backupID string) error {
- return nil
-}
-
-func (os *NoopOpenStack) CreateSnapshot(name, volID string, tags map[string]string) (*snapshots.Snapshot, error) {
- return &snapshots.Snapshot{}, nil
-}
-
-func (os *NoopOpenStack) DeleteSnapshot(snapID string) error {
- return nil
-}
-
-func (os *NoopOpenStack) GetSnapshotByID(snapshotID string) (*snapshots.Snapshot, error) {
- return &snapshots.Snapshot{ID: snapshotID}, nil
-}
-
-func (os *NoopOpenStack) ListSnapshots(filters map[string]string) ([]snapshots.Snapshot, string, error) {
- return nil, "", nil
-}
-
-func (os *NoopOpenStack) WaitSnapshotReady(snapshotID string) (string, error) {
- return "", nil
-}
-
-func (os *NoopOpenStack) GetBackupByID(backupID string) (*backups.Backup, error) {
- return &backups.Backup{ID: backupID}, nil
-}
-
-func (os *NoopOpenStack) ListBackups(filters map[string]string) ([]backups.Backup, error) {
- return nil, nil
-}
-
-func (os *NoopOpenStack) WaitBackupReady(backupID string, snapshotSize int, backupMaxDurationSecondsPerGB int) (string, error) {
- return "", nil
-}
-
-func (os *NoopOpenStack) GetInstanceByID(instanceID string) (*servers.Server, error) {
- return &servers.Server{ID: instanceID}, nil
-}
diff --git a/pkg/csi/cinder/openstack/openstack.go b/pkg/csi/cinder/openstack/openstack.go
index 6bcfe43762..29f8196218 100644
--- a/pkg/csi/cinder/openstack/openstack.go
+++ b/pkg/csi/cinder/openstack/openstack.go
@@ -144,12 +144,10 @@ func GetConfigFromFiles(configFilePaths []string) (Config, error) {
const defaultMaxVolAttachLimit int64 = 256
var OsInstances map[string]IOpenStack
-var NoopInstances map[string]IOpenStack
var configFiles = []string{"/etc/cloud.conf"}
func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) {
OsInstances = make(map[string]IOpenStack)
- NoopInstances = make(map[string]IOpenStack)
metrics.RegisterMetrics("cinder-csi")
if httpEndpoint != "" {
mux := http.NewServeMux()
@@ -168,7 +166,7 @@ func InitOpenStackProvider(cfgFiles []string, httpEndpoint string) {
}
// CreateOpenStackProvider creates Openstack Instance with custom Global config param
-func CreateOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error) {
+func CreateOpenStackProvider(cloudName string) (IOpenStack, error) {
// Get config from file
cfg, err := GetConfigFromFiles(configFiles)
if err != nil {
@@ -186,16 +184,6 @@ func CreateOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error
cfg.Metadata.SearchOrder = fmt.Sprintf("%s,%s", metadata.ConfigDriveID, metadata.MetadataID)
}
- if noClient {
- // Init OpenStack
- NoopInstances[cloudName] = &NoopOpenStack{
- bsOpts: cfg.BlockStorage,
- metadataOpts: cfg.Metadata,
- }
-
- return NoopInstances[cloudName], nil
- }
-
provider, err := client.NewOpenStackClient(cfg.Global[cloudName], "cinder-csi-plugin", userAgentData...)
if err != nil {
return nil, err
@@ -231,25 +219,12 @@ func CreateOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error
}
// GetOpenStackProvider returns Openstack Instance
-func GetOpenStackProvider(cloudName string, noClient bool) (IOpenStack, error) {
- if noClient {
- NoopInstance, NoopInstanceDefined := NoopInstances[cloudName]
- if NoopInstanceDefined {
- return NoopInstance, nil
- }
- NoopInstance, err := CreateOpenStackProvider(cloudName, noClient)
- if err != nil {
- return nil, err
- }
-
- return NoopInstance, nil
- }
-
+func GetOpenStackProvider(cloudName string) (IOpenStack, error) {
OsInstance, OsInstanceDefined := OsInstances[cloudName]
if OsInstanceDefined {
return OsInstance, nil
}
- OsInstance, err := CreateOpenStackProvider(cloudName, noClient)
+ OsInstance, err := CreateOpenStackProvider(cloudName)
if err != nil {
return nil, err
}
diff --git a/pkg/csi/cinder/openstack/openstack_volumes.go b/pkg/csi/cinder/openstack/openstack_volumes.go
index 116f35c467..5ac62fc73f 100644
--- a/pkg/csi/cinder/openstack/openstack_volumes.go
+++ b/pkg/csi/cinder/openstack/openstack_volumes.go
@@ -237,7 +237,7 @@ func (os *OpenStack) WaitDiskAttached(instanceID string, volumeID string) error
})
if wait.Interrupted(err) {
- err = fmt.Errorf("Volume %q failed to be attached within the alloted time", volumeID)
+ err = fmt.Errorf("Volume %q failed to be attached within the allotted time", volumeID)
}
return err
@@ -325,7 +325,7 @@ func (os *OpenStack) WaitDiskDetached(instanceID string, volumeID string) error
})
if wait.Interrupted(err) {
- err = fmt.Errorf("Volume %q failed to detach within the alloted time", volumeID)
+ err = fmt.Errorf("Volume %q failed to detach within the allotted time", volumeID)
}
return err
diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go
index 13c1ee993a..64551e425b 100644
--- a/pkg/csi/cinder/utils.go
+++ b/pkg/csi/cinder/utils.go
@@ -57,13 +57,17 @@ func NewIdentityServer(d *Driver) *identityServer {
}
}
-func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, cloud openstack.IOpenStack, topologies map[string]string) *nodeServer {
+func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, opts openstack.BlockStorageOpts, topologies map[string]string) *nodeServer {
+ if opts.NodeVolumeAttachLimit < 0 || opts.NodeVolumeAttachLimit > maxVolumesPerNode {
+ opts.NodeVolumeAttachLimit = maxVolumesPerNode
+ }
+
return &nodeServer{
Driver: d,
Mount: mount,
Metadata: metadata,
- Cloud: cloud,
Topologies: topologies,
+ Opts: opts,
}
}
diff --git a/pkg/util/blockdevice/blockdevice_unsupported.go b/pkg/util/blockdevice/blockdevice_unsupported.go
index a75c69be7b..b9adad44d3 100644
--- a/pkg/util/blockdevice/blockdevice_unsupported.go
+++ b/pkg/util/blockdevice/blockdevice_unsupported.go
@@ -34,3 +34,7 @@ func GetBlockDeviceSize(path string) (int64, error) {
func RescanBlockDeviceGeometry(devicePath string, deviceMountPath string, newSize int64) error {
return errors.New("RescanBlockDeviceGeometry is not implemented for this OS")
}
+
+func RescanDevice(devicePath string) error {
+ return errors.New("RescanDevice is not implemented for this OS")
+}
diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go
index 3eb723ddfa..3857c5d96e 100644
--- a/tests/sanity/cinder/sanity_test.go
+++ b/tests/sanity/cinder/sanity_test.go
@@ -28,9 +28,13 @@ func TestDriver(t *testing.T) {
fakemnt := GetFakeMountProvider()
fakemet := &fakemetadata{}
+ fakeOpts := openstack.BlockStorageOpts{
+ RescanOnResize: false,
+ NodeVolumeAttachLimit: 200,
+ }
d.SetupControllerService(openstack.OsInstances)
- d.SetupNodeService(fakecloudprovider, fakemnt, fakemet, map[string]string{})
+ d.SetupNodeService(fakemnt, fakemet, fakeOpts, map[string]string{})
// TODO: Stop call