diff --git a/cmd/cinder-csi-plugin/main.go b/cmd/cinder-csi-plugin/main.go index e59502a5cc..1673bda4e1 100644 --- a/cmd/cinder-csi-plugin/main.go +++ b/cmd/cinder-csi-plugin/main.go @@ -31,11 +31,13 @@ import ( ) var ( - endpoint string - nodeID string - cloudConfig []string - cluster string - httpEndpoint string + endpoint string + nodeID string + cloudConfig []string + cluster string + httpEndpoint string + provideControllerService bool + provideNodeService bool ) func main() { @@ -65,6 +67,10 @@ func main() { cmd.PersistentFlags().StringVar(&cluster, "cluster", "", "The identifier of the cluster that the plugin is running in.") cmd.PersistentFlags().StringVar(&httpEndpoint, "http-endpoint", "", "The TCP network address where the HTTP server for providing metrics for diagnostics, will listen (example: `:8080`). The default is empty string, which means the server is disabled.") + + cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)") + cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)") + openstack.AddExtraFlags(pflag.CommandLine) code := cli.Run(cmd) @@ -73,19 +79,28 @@ func main() { func handle() { // Initialize cloud - d := cinder.NewDriver(endpoint, cluster) + d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) + openstack.InitOpenStackProvider(cloudConfig, httpEndpoint) cloud, err := openstack.GetOpenStackProvider() if err != nil { klog.Warningf("Failed to GetOpenStackProvider: %v", err) return } - //Initialize mount - mount := mount.GetMountProvider() - //Initialize Metadata - metadata := metadata.GetMetadataProvider(cloud.GetMetadataOpts().SearchOrder) + if provideControllerService { + d.SetupControllerService(cloud) + } + + if provideNodeService { + //Initialize mount + mount := mount.GetMountProvider() + + //Initialize Metadata + metadata := metadata.GetMetadataProvider(cloud.GetMetadataOpts().SearchOrder) + + d.SetupNodeService(cloud, mount, metadata) + } - d.SetupDriver(cloud, mount, metadata) d.Run() } diff --git a/cmd/manila-csi-plugin/main.go b/cmd/manila-csi-plugin/main.go index 60d4c76b25..b68217dce0 100644 --- a/cmd/manila-csi-plugin/main.go +++ b/cmd/manila-csi-plugin/main.go @@ -45,9 +45,11 @@ var ( clusterID string // Runtime options - endpoint string - runtimeConfigFile string - userAgentData []string + endpoint string + runtimeConfigFile string + userAgentData []string + provideControllerService bool + provideNodeService bool ) func validateShareProtocolSelector(v string) error { @@ -75,23 +77,39 @@ func main() { manilaClientBuilder := &manilaclient.ClientBuilder{UserAgent: "manila-csi-plugin", ExtraUserAgentData: userAgentData} csiClientBuilder := &csiclient.ClientBuilder{} - d, err := manila.NewDriver( - &manila.DriverOpts{ - DriverName: driverName, - NodeID: nodeID, - NodeAZ: nodeAZ, - WithTopology: withTopology, - ShareProto: protoSelector, - ServerCSIEndpoint: endpoint, - FwdCSIEndpoint: fwdEndpoint, - ManilaClientBuilder: manilaClientBuilder, - CSIClientBuilder: csiClientBuilder, - ClusterID: clusterID, - }, - ) + opts := &manila.DriverOpts{ + DriverName: driverName, + WithTopology: withTopology, + ShareProto: protoSelector, + ServerCSIEndpoint: endpoint, + FwdCSIEndpoint: fwdEndpoint, + ManilaClientBuilder: manilaClientBuilder, + CSIClientBuilder: csiClientBuilder, + ClusterID: clusterID, + } + + if provideNodeService { + opts.NodeID = nodeID + opts.NodeAZ = nodeAZ + } + d, err := manila.NewDriver(opts) if err != nil { - klog.Fatalf("driver initialization failed: %v", err) + klog.Fatalf("Driver initialization failed: %v", err) + } + + if provideControllerService { + err = d.SetupControllerService() + if err != nil { + klog.Fatalf("Driver controller service initialization failed: %v", err) + } + } + + if provideNodeService { + err = d.SetupNodeService() + if err != nil { + klog.Fatalf("Driver node service initialization failed: %v", err) + } } runtimeconfig.RuntimeConfigFilename = runtimeConfigFile @@ -105,10 +123,7 @@ func main() { cmd.PersistentFlags().StringVar(&driverName, "drivername", "manila.csi.openstack.org", "name of the driver") - cmd.PersistentFlags().StringVar(&nodeID, "nodeid", "", "this node's ID") - if err := cmd.MarkPersistentFlagRequired("nodeid"); err != nil { - klog.Fatalf("Unable to mark flag nodeid to be required: %v", err) - } + cmd.PersistentFlags().StringVar(&nodeID, "nodeid", "", "this node's ID. This value is required if the node service is provided by this CSI driver instance.") cmd.PersistentFlags().StringVar(&nodeAZ, "nodeaz", "", "this node's availability zone") @@ -132,6 +147,9 @@ func main() { cmd.PersistentFlags().StringVar(&clusterID, "cluster-id", "", "The identifier of the cluster that the plugin is running in.") + cmd.PersistentFlags().BoolVar(&provideControllerService, "provide-controller-service", true, "If set to true then the CSI driver does provide the controller service (default: true)") + cmd.PersistentFlags().BoolVar(&provideNodeService, "provide-node-service", true, "If set to true then the CSI driver does provide the node service (default: true)") + code := cli.Run(cmd) os.Exit(code) } diff --git a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md index 4bcb1eaeed..c3a3656ffd 100644 --- a/docs/cinder-csi-plugin/using-cinder-csi-plugin.md +++ b/docs/cinder-csi-plugin/using-cinder-csi-plugin.md @@ -98,6 +98,19 @@ In addition to the standard set of klog flags, `cinder-csi-plugin` accepts the f The default is empty string, which means the server is disabled. +
--provide-controller-service <enabled>
+
+ If set to true then the CSI driver does provide the controller service. + + The default is to provide the controller service. +
+ +
--provide-node-service <enabled>
+
+ If set to true then the CSI driver does provide the node service. + + The default is to provide the node service. +
## Driver Config @@ -114,7 +127,7 @@ Implementation of `cinder-csi-plugin` relies on following OpenStack services. For Driver configuration, parameters must be passed via configuration file specified in `$CLOUD_CONFIG` environment variable. The following sections are supported in configuration file. -### Global +### Global For Cinder CSI Plugin to authenticate with OpenStack Keystone, required parameters needs to be passed in `[Global]` section of the file. For all supported parameters, please refer [Global](../openstack-cloud-controller-manager/using-openstack-cloud-controller-manager.md#global) section. ### Block Storage @@ -196,7 +209,7 @@ cinder.csi.openstack.org true true false < mountPath: /etc/cacert readOnly: true - volumes: + volumes: .... - name: cacert hostPath: @@ -254,7 +267,7 @@ helm install --namespace kube-system --name cinder-csi ./charts/cinder-csi-plugi | StorageClass `parameters` | `availability` | `nova` | String. Volume Availability Zone | | StorageClass `parameters` | `type` | Empty String | String. Name/ID of Volume type. Corresponding volume type should exist in cinder | | VolumeSnapshotClass `parameters` | `force-create` | `false` | Enable to support creating snapshot for a volume in in-use status | -| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| +| Inline Volume `volumeAttributes` | `capacity` | `1Gi` | volume size for creating inline volumes| | Inline Volume `VolumeAttributes` | `type` | Empty String | Name/ID of Volume type. Corresponding volume type should exist in cinder | ## Local Development @@ -266,14 +279,14 @@ To build the plugin, run ``` $ export ARCH=amd64 # Defaults to amd64 $ make build-cmd-cinder-csi-plugin -``` +``` To build cinder-csi-plugin image ``` $ export ARCH=amd64 # Defaults to amd64 $ make build-local-image-cinder-csi-plugin -``` +``` ### Testing @@ -284,7 +297,7 @@ To run all unit tests: $ make test ``` #### Sanity Tests -Sanity tests ensures the CSI spec conformance of the driver. For more info, refer [Sanity check](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) +Sanity tests ensures the CSI spec conformance of the driver. For more info, refer [Sanity check](https://github.com/kubernetes-csi/csi-test/tree/master/pkg/sanity) Run sanity tests for cinder CSI driver using: @@ -298,5 +311,5 @@ Optionally, to test the driver csc tool could be used. please refer, [usage guid Starting from Kubernetes 1.21, OpenStack Cinder CSI migration is supported as beta feature and is `ON` by default. Cinder CSI driver must be installed on clusters on OpenStack for Cinder volumes to work. If you have persistence volumes that are created with in-tree `kubernetes.io/cinder` plugin, you could migrate to use `cinder.csi.openstack.org` Container Storage Interface (CSI) Driver. -* The CSI Migration feature for Cinder, when enabled, shims all plugin operations from the existing in-tree plugin to the `cinder.csi.openstack.org` CSI Driver. +* The CSI Migration feature for Cinder, when enabled, shims all plugin operations from the existing in-tree plugin to the `cinder.csi.openstack.org` CSI Driver. * For more info, please refer [Migrate to CCM with CSI Migration](../openstack-cloud-controller-manager/migrate-to-ccm-with-csimigration.md#migrate-from-in-tree-cloud-provider-to-openstack-cloud-controller-manager-and-enable-csimigration) guide diff --git a/docs/manila-csi-plugin/using-manila-csi-plugin.md b/docs/manila-csi-plugin/using-manila-csi-plugin.md index bfca15c56a..f9e02636eb 100644 --- a/docs/manila-csi-plugin/using-manila-csi-plugin.md +++ b/docs/manila-csi-plugin/using-manila-csi-plugin.md @@ -40,6 +40,8 @@ Option | Default value | Description `--share-protocol-selector` | _none_ | Specifies which Manila share protocol to use for this instance of the driver. See [supported protocols](#share-protocol-support-matrix) for valid values. `--fwdendpoint` | _none_ | [CSI Node Plugin](https://github.com/container-storage-interface/spec/blob/master/spec.md#rpc-interface) endpoint to which all Node Service RPCs are forwarded. Must be able to handle the file-system specified in `share-protocol-selector`. Check out the [Deployment](#deployment) section to see why this is necessary. `--cluster-id` | _none_ | The identifier of the cluster that the plugin is running in. If set then the plugin will add "manila.csi.openstack.org/cluster: \" to metadata of created shares. +`--provide-controller-service` | `true` | If set to true then the CSI driver does provide the controller service. +`--provide-node-service` | `true` | If set to true then the CSI driver does provide the node service. ### Controller Service volume parameters @@ -56,7 +58,7 @@ Parameter | Required | Description `cephfs-kernelMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS kernel client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-fuseMountOptions` | _no_ | Relevant for CephFS Manila shares. Specifies mount options for CephFS FUSE client. See [CSI CephFS docs](https://github.com/ceph/ceph-csi/blob/csi-v1.0/docs/deploy-cephfs.md#configuration) for further information. `cephfs-clientID` | _no_ | Relevant for CephFS Manila shares. Specifies the cephx client ID when creating an access rule for the provisioned share. The same cephx client ID may be shared with multiple Manila shares. If no value is provided, client ID for the provisioned Manila share will be set to some unique value (PersistentVolume name). -`nfs-shareClient` | _no_ | Relevant for NFS Manila shares. Specifies what address has access to the NFS share. Defaults to `0.0.0.0/0`, i.e. anyone. +`nfs-shareClient` | _no_ | Relevant for NFS Manila shares. Specifies what address has access to the NFS share. Defaults to `0.0.0.0/0`, i.e. anyone. ### Node Service volume context @@ -199,7 +201,7 @@ CSI Manila Helm chart is located in `charts/manila-csi-plugin`. First, modify `values.yaml` to suite your environment, and then simply install the Helm chart with `$ helm install ./charts/manila-csi-plugin`. -Note that the release name generated by `helm install` may not be suitable due to their length. The chart generates object names with the release name included in them, which may cause the names to exceed 63 characters and result in chart installation failure. You may use `--name` flag to set the release name manually. See [helm installation docs](https://helm.sh/docs/helm/#helm-install) for more info. Alternatively, you may also use `nameOverride` or `fullnameOverride` variables in `values.yaml` to override the respective names. +Note that the release name generated by `helm install` may not be suitable due to their length. The chart generates object names with the release name included in them, which may cause the names to exceed 63 characters and result in chart installation failure. You may use `--name` flag to set the release name manually. See [helm installation docs](https://helm.sh/docs/helm/#helm-install) for more info. Alternatively, you may also use `nameOverride` or `fullnameOverride` variables in `values.yaml` to override the respective names. **Manual deployment** diff --git a/pkg/csi/cinder/controllerserver_test.go b/pkg/csi/cinder/controllerserver_test.go index eb32140a4c..ea01bcbc2d 100644 --- a/pkg/csi/cinder/controllerserver_test.go +++ b/pkg/csi/cinder/controllerserver_test.go @@ -34,7 +34,7 @@ func init() { osmock = new(openstack.OpenStackMock) openstack.OsInstance = osmock - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) fakeCs = NewControllerServer(d, openstack.OsInstance) } @@ -42,7 +42,6 @@ func init() { // Test CreateVolume func TestCreateVolume(t *testing.T) { - // mock OpenStack properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) @@ -89,7 +88,6 @@ func TestCreateVolume(t *testing.T) { // Test CreateVolume with additional param func TestCreateVolumeWithParam(t *testing.T) { - // mock OpenStack properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) @@ -141,7 +139,6 @@ func TestCreateVolumeWithParam(t *testing.T) { } func TestCreateVolumeWithExtraMetadata(t *testing.T) { - // mock OpenStack properties := map[string]string{ "cinder.csi.openstack.org/cluster": FakeCluster, @@ -188,7 +185,6 @@ func TestCreateVolumeWithExtraMetadata(t *testing.T) { } func TestCreateVolumeFromSnapshot(t *testing.T) { - properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", FakeSnapshotID, "", &properties).Return(&FakeVolFromSnapshot, nil) @@ -236,7 +232,6 @@ func TestCreateVolumeFromSnapshot(t *testing.T) { } func TestCreateVolumeFromSourceVolume(t *testing.T) { - properties := map[string]string{"cinder.csi.openstack.org/cluster": FakeCluster} // CreateVolume(name string, size int, vtype, availability string, snapshotID string, tags *map[string]string) (string, string, int, error) osmock.On("CreateVolume", FakeVolName, mock.AnythingOfType("int"), FakeVolType, "", "", FakeVolID, &properties).Return(&FakeVolFromSourceVolume, nil) @@ -285,7 +280,6 @@ func TestCreateVolumeFromSourceVolume(t *testing.T) { // Test CreateVolumeDuplicate func TestCreateVolumeDuplicate(t *testing.T) { - // Init assert assert := assert.New(t) @@ -318,7 +312,6 @@ func TestCreateVolumeDuplicate(t *testing.T) { // Test DeleteVolume func TestDeleteVolume(t *testing.T) { - // DeleteVolume(volumeID string) error osmock.On("DeleteVolume", FakeVolID).Return(nil) @@ -345,7 +338,6 @@ func TestDeleteVolume(t *testing.T) { // Test ControllerPublishVolume func TestControllerPublishVolume(t *testing.T) { - // AttachVolume(instanceID, volumeID string) (string, error) osmock.On("AttachVolume", FakeNodeID, FakeVolID).Return(FakeVolID, nil) // WaitDiskAttached(instanceID string, volumeID string) error @@ -387,7 +379,6 @@ func TestControllerPublishVolume(t *testing.T) { // Test ControllerUnpublishVolume func TestControllerUnpublishVolume(t *testing.T) { - // DetachVolume(instanceID, volumeID string) error osmock.On("DetachVolume", FakeNodeID, FakeVolID).Return(nil) // WaitDiskDetached(instanceID string, volumeID string) error @@ -416,7 +407,6 @@ func TestControllerUnpublishVolume(t *testing.T) { } func TestListVolumes(t *testing.T) { - osmock.On("ListVolumes", 2, FakeVolID).Return(FakeVolListMultiple, "", nil) // Init assert @@ -461,7 +451,6 @@ func TestListVolumes(t *testing.T) { // Test CreateSnapshot func TestCreateSnapshot(t *testing.T) { - osmock.On("CreateSnapshot", FakeSnapshotName, FakeVolID, &map[string]string{cinderCSIClusterIDKey: "cluster"}).Return(&FakeSnapshotRes, nil) osmock.On("ListSnapshots", map[string]string{"Name": FakeSnapshotName}).Return(FakeSnapshotListEmpty, "", nil) osmock.On("WaitSnapshotReady", FakeSnapshotID).Return(nil) @@ -490,7 +479,6 @@ func TestCreateSnapshot(t *testing.T) { // Test CreateSnapshot with extra metadata func TestCreateSnapshotWithExtraMetadata(t *testing.T) { - properties := map[string]string{ "cinder.csi.openstack.org/cluster": FakeCluster, "csi.storage.k8s.io/volumesnapshot/name": FakeSnapshotName, @@ -532,7 +520,6 @@ func TestCreateSnapshotWithExtraMetadata(t *testing.T) { // Test DeleteSnapshot func TestDeleteSnapshot(t *testing.T) { - // DeleteSnapshot(volumeID string) error osmock.On("DeleteSnapshot", FakeSnapshotID).Return(nil) @@ -558,7 +545,6 @@ func TestDeleteSnapshot(t *testing.T) { } func TestListSnapshots(t *testing.T) { - osmock.On("ListSnapshots", map[string]string{"Limit": "1", "Marker": FakeVolID, "Status": "available"}).Return(FakeSnapshotsRes, "", nil) assert := assert.New(t) @@ -574,7 +560,6 @@ func TestListSnapshots(t *testing.T) { } func TestControllerExpandVolume(t *testing.T) { - tState := []string{"available", "in-use"} // ExpandVolume(volumeID string, status string, size int) osmock.On("ExpandVolume", FakeVolID, openstack.VolumeAvailableStatus, 5).Return(nil) @@ -611,7 +596,6 @@ func TestControllerExpandVolume(t *testing.T) { } func TestValidateVolumeCapabilities(t *testing.T) { - // GetVolume(volumeID string) osmock.On("GetVolume", FakeVolID).Return(FakeVol1) diff --git a/pkg/csi/cinder/driver.go b/pkg/csi/cinder/driver.go index a51cb9d06a..bd75fbce9d 100644 --- a/pkg/csi/cinder/driver.go +++ b/pkg/csi/cinder/driver.go @@ -70,13 +70,17 @@ type Driver struct { nscap []*csi.NodeServiceCapability } -func NewDriver(endpoint, cluster string) *Driver { +type DriverOpts struct { + ClusterID string + Endpoint string +} +func NewDriver(o *DriverOpts) *Driver { d := &Driver{} d.name = driverName d.fqVersion = fmt.Sprintf("%s@%s", Version, version.Version) - d.endpoint = endpoint - d.cluster = cluster + d.endpoint = o.Endpoint + d.cluster = o.ClusterID klog.Info("Driver: ", d.name) klog.Info("Driver version: ", d.fqVersion) @@ -108,6 +112,8 @@ func NewDriver(endpoint, cluster string) *Driver { csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, }) + d.ids = NewIdentityServer(d) + return d } @@ -166,15 +172,20 @@ func (d *Driver) GetVolumeCapabilityAccessModes() []*csi.VolumeCapability_Access return d.vcap } -func (d *Driver) SetupDriver(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata) { - - d.ids = NewIdentityServer(d) +func (d *Driver) SetupControllerService(cloud openstack.IOpenStack) { + klog.Info("Providing controller service") d.cs = NewControllerServer(d, cloud) - d.ns = NewNodeServer(d, mount, metadata, cloud) +} +func (d *Driver) SetupNodeService(cloud openstack.IOpenStack, mount mount.IMount, metadata metadata.IMetadata) { + klog.Info("Providing node service") + d.ns = NewNodeServer(d, mount, metadata, cloud) } func (d *Driver) Run() { + if nil == d.cs && nil == d.ns { + klog.Fatal("No CSI services initialized") + } - RunControllerandNodePublishServer(d.endpoint, d.ids, d.cs, d.ns) + RunServicesInitialized(d.endpoint, d.ids, d.cs, d.ns) } diff --git a/pkg/csi/cinder/driver_test.go b/pkg/csi/cinder/driver_test.go index 511556c3f7..3f37ab1416 100644 --- a/pkg/csi/cinder/driver_test.go +++ b/pkg/csi/cinder/driver_test.go @@ -29,10 +29,11 @@ var ( func NewFakeDriver() *Driver { - driver := NewDriver(FakeEndpoint, FakeCluster) + driver := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) return driver } + func TestValidateControllerServiceRequest(t *testing.T) { d := NewFakeDriver() diff --git a/pkg/csi/cinder/identityserver_test.go b/pkg/csi/cinder/identityserver_test.go index ce34ebca22..37a266addb 100644 --- a/pkg/csi/cinder/identityserver_test.go +++ b/pkg/csi/cinder/identityserver_test.go @@ -26,7 +26,7 @@ import ( ) func TestGetPluginInfo(t *testing.T) { - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) ids := NewIdentityServer(d) diff --git a/pkg/csi/cinder/nodeserver_test.go b/pkg/csi/cinder/nodeserver_test.go index d6762dc788..8e67d6f70c 100644 --- a/pkg/csi/cinder/nodeserver_test.go +++ b/pkg/csi/cinder/nodeserver_test.go @@ -38,7 +38,7 @@ var omock *openstack.OpenStackMock func init() { if fakeNs == nil { - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) // mock MountMock mmock = new(mount.MountMock) @@ -142,7 +142,7 @@ func TestNodePublishVolumeEphermeral(t *testing.T) { metadata.MetadataService = metamock openstack.OsInstance = omock - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) // Init assert @@ -281,7 +281,6 @@ func TestNodeUnpublishVolume(t *testing.T) { } func TestNodeUnpublishVolumeEphermeral(t *testing.T) { - mount.MInstance = mmock metadata.MetadataService = metamock openstack.OsInstance = omock @@ -293,7 +292,7 @@ func TestNodeUnpublishVolumeEphermeral(t *testing.T) { omock.On("WaitDiskDetached", FakeNodeID, FakeVolID).Return(nil) omock.On("DeleteVolume", FakeVolID).Return(nil) - d := NewDriver(FakeEndpoint, FakeCluster) + d := NewDriver(&DriverOpts{Endpoint: FakeEndpoint, ClusterID: FakeCluster}) fakeNse := NewNodeServer(d, mount.MInstance, metadata.MetadataService, openstack.OsInstance) // Init assert diff --git a/pkg/csi/cinder/utils.go b/pkg/csi/cinder/utils.go index 7e3e925c98..5758065cde 100644 --- a/pkg/csi/cinder/utils.go +++ b/pkg/csi/cinder/utils.go @@ -68,8 +68,7 @@ func NewNodeServer(d *Driver, mount mount.IMount, metadata metadata.IMetadata, c //revive:enable:unexported-return -func RunControllerandNodePublishServer(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { - +func RunServicesInitialized(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) { s := NewNonBlockingGRPCServer() s.Start(endpoint, ids, cs, ns) s.Wait() diff --git a/pkg/csi/manila/driver.go b/pkg/csi/manila/driver.go index 3d977785c7..baf0517c59 100644 --- a/pkg/csi/manila/driver.go +++ b/pkg/csi/manila/driver.go @@ -99,7 +99,6 @@ func argNotEmpty(val, name string) error { func NewDriver(o *DriverOpts) (*Driver, error) { m := map[string]string{ - "node ID": o.NodeID, "driver name": o.DriverName, "driver endpoint": o.ServerCSIEndpoint, "FWD endpoint": o.FwdCSIEndpoint, @@ -151,6 +150,14 @@ func NewDriver(o *DriverOpts) (*Driver, error) { d.serverEndpoint = endpointAddress(serverProto, serverAddr) d.fwdEndpoint = endpointAddress(fwdProto, fwdAddr) + d.ids = &identityServer{d: d} + + return d, nil +} + +func (d *Driver) SetupControllerService() error { + klog.Info("Providing controller service") + d.addControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{ csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, @@ -165,11 +172,22 @@ func NewDriver(o *DriverOpts) (*Driver, error) { csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY, }) + d.cs = &controllerServer{d: d} + return nil +} + +func (d *Driver) SetupNodeService() error { + if err := argNotEmpty(d.nodeID, "node ID"); err != nil { + return err + } + + klog.Info("Providing node service") + var supportsNodeStage bool nodeCapsMap, err := d.initProxiedDriver() if err != nil { - return nil, fmt.Errorf("failed to initialize proxied CSI driver: %v", err) + return fmt.Errorf("failed to initialize proxied CSI driver: %v", err) } nscaps := make([]csi.NodeServiceCapability_RPC_Type, 0, len(nodeCapsMap)) for c := range nodeCapsMap { @@ -182,14 +200,15 @@ func NewDriver(o *DriverOpts) (*Driver, error) { d.addNodeServiceCapabilities(nscaps) - d.ids = &identityServer{d: d} - d.cs = &controllerServer{d: d} d.ns = &nodeServer{d: d, supportsNodeStage: supportsNodeStage, nodeStageCache: make(map[volumeID]stageCacheEntry)} - - return d, nil + return nil } func (d *Driver) Run() { + if nil == d.cs && nil == d.ns { + klog.Fatal("No CSI services initialized") + } + s := nonBlockingGRPCServer{} s.start(d.serverEndpoint, d.ids, d.cs, d.ns) s.wait() @@ -317,9 +336,15 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids *identityServer, cs * s.server = server - csi.RegisterIdentityServer(server, ids) - csi.RegisterControllerServer(server, cs) - csi.RegisterNodeServer(server, ns) + if ids != nil { + csi.RegisterIdentityServer(server, ids) + } + if cs != nil { + csi.RegisterControllerServer(server, cs) + } + if ns != nil { + csi.RegisterNodeServer(server, ns) + } klog.Infof("listening for connections on %#v", listener.Addr()) diff --git a/tests/sanity/cinder/sanity_test.go b/tests/sanity/cinder/sanity_test.go index a3d13d33fc..4e3a2abd65 100644 --- a/tests/sanity/cinder/sanity_test.go +++ b/tests/sanity/cinder/sanity_test.go @@ -19,14 +19,16 @@ func TestDriver(t *testing.T) { endpoint := "unix://" + socket cluster := "kubernetes" - d := cinder.NewDriver(endpoint, cluster) + d := cinder.NewDriver(&cinder.DriverOpts{Endpoint: endpoint, ClusterID: cluster}) + fakecloudprovider := getfakecloud() openstack.OsInstance = fakecloudprovider fakemnt := GetFakeMountProvider() fakemet := &fakemetadata{} - d.SetupDriver(fakecloudprovider, fakemnt, fakemet) + d.SetupControllerService(fakecloudprovider) + d.SetupNodeService(fakecloudprovider, fakemnt, fakemet) // TODO: Stop call diff --git a/tests/sanity/manila/sanity_test.go b/tests/sanity/manila/sanity_test.go index 8fb7b17b8b..8a9d672f85 100644 --- a/tests/sanity/manila/sanity_test.go +++ b/tests/sanity/manila/sanity_test.go @@ -43,10 +43,20 @@ func TestDriver(t *testing.T) { FwdCSIEndpoint: fwdEndpoint, ManilaClientBuilder: &fakeManilaClientBuilder{}, CSIClientBuilder: &fakeCSIClientBuilder{}, - }) + }, + ) + if err != nil { + t.Fatalf("Failed to initialize CSI Manila driver: %v", err) + } + + err = d.SetupControllerService() + if err != nil { + t.Fatalf("Failed to initialize CSI Manila controller service: %v", err) + } + err = d.SetupNodeService() if err != nil { - t.Fatalf("failed to initialize CSI Manila driver: %v", err) + t.Fatalf("Failed to initialize CSI Manila node service: %v", err) } go d.Run()