From df23016f62fc1e01ac50ad42aea178785a6f8e5a Mon Sep 17 00:00:00 2001 From: ialidzhikov Date: Sat, 16 Jan 2021 22:17:43 +0200 Subject: [PATCH 1/4] Update to golang@1.15.6 Signed-off-by: ialidzhikov --- Dockerfile | 2 +- docs/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index d1b42b5760..aa6464e644 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM golang:1.14.1-stretch AS builder +FROM golang:1.15.6 AS builder WORKDIR /go/src/github.com/kubernetes-sigs/aws-ebs-csi-driver COPY . . RUN make diff --git a/docs/README.md b/docs/README.md index 6bd5314f4c..9e3902ddce 100644 --- a/docs/README.md +++ b/docs/README.md @@ -162,7 +162,7 @@ To make sure dynamically provisioned EBS volumes have all tags that the in-tree Please go through [CSI Spec](https://github.com/container-storage-interface/spec/blob/master/spec.md) and [General CSI driver development guideline](https://kubernetes-csi.github.io/docs/developing.html) to get some basic understanding of CSI driver before you start. ### Requirements -* Golang 1.14.+ +* Golang 1.15.+ * [Ginkgo](https://github.com/onsi/ginkgo) in your PATH for integration testing and end-to-end testing * Docker 17.05+ for releasing From 71ff40d48ece885a0f83864757f2e2a95fb7f3f2 Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Tue, 19 Jan 2021 13:19:39 -0800 Subject: [PATCH 2/4] no operation for block device in NodeExpandVolume --- pkg/driver/node.go | 45 +++++++++++++++++++-- pkg/driver/node_test.go | 86 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 127 insertions(+), 4 deletions(-) diff --git a/pkg/driver/node.go b/pkg/driver/node.go index 68329998d5..96f68f3cec 100644 --- a/pkg/driver/node.go +++ b/pkg/driver/node.go @@ -254,14 +254,47 @@ func (d *nodeService) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV if len(volumeID) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") } + volumePath := req.GetVolumePath() + if len(volumePath) == 0 { + return nil, status.Error(codes.InvalidArgument, "volume path must be provided") + } + + volumeCapability := req.GetVolumeCapability() + // VolumeCapability is optional, if specified, use that as source of truth + if volumeCapability != nil { + caps := []*csi.VolumeCapability{volumeCapability} + if !isValidVolumeCapabilities(caps) { + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("VolumeCapability is invalid: %v", volumeCapability)) + } - args := []string{"-o", "source", "--noheadings", "--target", req.GetVolumePath()} + if blk := volumeCapability.GetBlock(); blk != nil { + // Noop for Block NodeExpandVolume + klog.V(4).Infof("NodeExpandVolume called for %v at %s. Since it is a block device, ignoring...", volumeID, volumePath) + return &csi.NodeExpandVolumeResponse{}, nil + } + } else { + // VolumeCapability is nil, check if volumePath point to a block device + isBlock, err := d.IsBlockDevice(volumePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to determine device path for volumePath [%v]: %v", volumePath, err) + } + if isBlock { + // Skip resizing for Block NodeExpandVolume + bcap, err := d.getBlockSizeBytes(volumePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get block capacity on path %s: %v", req.VolumePath, err) + } + klog.V(4).Infof("NodeExpandVolume called for %v at %s, since given volumePath is a block device, ignoring...", volumeID, volumePath) + return &csi.NodeExpandVolumeResponse{CapacityBytes: bcap}, nil + } + } + + args := []string{"-o", "source", "--noheadings", "--target", volumePath} output, err := d.mounter.Command("findmnt", args...).Output() if err != nil { return nil, status.Errorf(codes.Internal, "Could not determine device path: %v", err) } - devicePath := strings.TrimSpace(string(output)) if len(devicePath) == 0 { return nil, status.Errorf(codes.Internal, "Could not get valid device for mount path: %q", req.GetVolumePath()) @@ -274,11 +307,15 @@ func (d *nodeService) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV }) // TODO: lock per volume ID to have some idempotency - if _, err := r.Resize(devicePath, req.GetVolumePath()); err != nil { + if _, err := r.Resize(devicePath, volumePath); err != nil { return nil, status.Errorf(codes.Internal, "Could not resize volume %q (%q): %v", volumeID, devicePath, err) } - return &csi.NodeExpandVolumeResponse{}, nil + bcap, err := d.getBlockSizeBytes(devicePath) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get block capacity on path %s: %v", req.VolumePath, err) + } + return &csi.NodeExpandVolumeResponse{CapacityBytes: bcap}, nil } func (d *nodeService) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { diff --git a/pkg/driver/node_test.go b/pkg/driver/node_test.go index 12f88dee5f..3a2572db19 100644 --- a/pkg/driver/node_test.go +++ b/pkg/driver/node_test.go @@ -1082,6 +1082,92 @@ func TestNodePublishVolume(t *testing.T) { t.Run(tc.name, tc.testFunc) } } +func TestNodeExpandVolume(t *testing.T) { + mockCtl := gomock.NewController(t) + defer mockCtl.Finish() + + mockMetadata := mocks.NewMockMetadataService(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + awsDriver := &nodeService{ + metadata: mockMetadata, + mounter: mockMounter, + inFlight: internal.NewInFlight(), + } + + tests := []struct { + name string + request csi.NodeExpandVolumeRequest + expectResponseCode codes.Code + }{ + { + name: "fail missing volumeId", + request: csi.NodeExpandVolumeRequest{}, + expectResponseCode: codes.InvalidArgument, + }, + { + name: "fail missing volumePath", + request: csi.NodeExpandVolumeRequest{ + StagingTargetPath: "/testDevice/Path", + VolumeId: "test-volume-id", + }, + expectResponseCode: codes.InvalidArgument, + }, + { + name: "fail volume path not exist", + request: csi.NodeExpandVolumeRequest{ + VolumePath: "./test", + VolumeId: "test-volume-id", + }, + expectResponseCode: codes.Internal, + }, + { + name: "Fail validate VolumeCapability", + request: csi.NodeExpandVolumeRequest{ + VolumePath: "./test", + VolumeId: "test-volume-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_UNKNOWN, + }, + }, + }, + expectResponseCode: codes.InvalidArgument, + }, + { + name: "Success [VolumeCapability is block]", + request: csi.NodeExpandVolumeRequest{ + VolumePath: "./test", + VolumeId: "test-volume-id", + VolumeCapability: &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Block{ + Block: &csi.VolumeCapability_BlockVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, + }, + }, + }, + expectResponseCode: codes.OK, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _, err := awsDriver.NodeExpandVolume(context.Background(), &test.request) + if err != nil { + if test.expectResponseCode != codes.OK { + expectErr(t, err, test.expectResponseCode) + } else { + t.Fatalf("Expect no error but got: %v", err) + } + } + }) + } +} func TestNodeUnpublishVolume(t *testing.T) { targetPath := "/test/path" From 1a0bd405de3b5ac142fa2888f85f884540d5cb18 Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Fri, 29 Jan 2021 10:52:40 -0800 Subject: [PATCH 3/4] update sidecar image version and use repo /sig-storage --- charts/aws-ebs-csi-driver/Chart.yaml | 4 +- .../templates/clusterrole-attacher.yaml | 3 + .../templates/clusterrole-provisioner.yaml | 57 ++++----- .../templates/controller.yaml | 4 +- .../templates/statefulset.yaml | 2 +- charts/aws-ebs-csi-driver/values.yaml | 22 ++-- .../base/arm64/clusterrole-attacher.yaml | 24 ++++ .../base/arm64/clusterrole-provisioner.yaml | 39 ++++++ .../arm64/clusterrolebinding-attacher.yaml | 16 +++ .../arm64/clusterrolebinding-provisioner.yaml | 16 +++ deploy/kubernetes/base/arm64/controller.yaml | 102 ++++++++++++++++ deploy/kubernetes/base/arm64/csidriver.yaml | 11 ++ .../kubernetes/base/arm64/kustomization.yaml | 12 ++ deploy/kubernetes/base/arm64/node.yaml | 113 ++++++++++++++++++ .../arm64/serviceaccount-csi-controller.yaml | 12 ++ .../base/clusterrole-provisioner.yaml | 3 + deploy/kubernetes/base/controller.yaml | 8 +- deploy/kubernetes/base/node.yaml | 2 +- .../alpha/controller_add_resizer.yaml | 3 +- .../alpha/controller_add_snapshotter.yaml | 2 +- .../overlays/alpha/snapshot_controller.yaml | 2 +- .../overlays/stable/arm64/kustomization.yaml | 22 ++-- .../overlays/stable/kustomization.yaml | 6 +- docs/README.md | 5 +- 24 files changed, 421 insertions(+), 69 deletions(-) create mode 100644 deploy/kubernetes/base/arm64/clusterrole-attacher.yaml create mode 100644 deploy/kubernetes/base/arm64/clusterrole-provisioner.yaml create mode 100644 deploy/kubernetes/base/arm64/clusterrolebinding-attacher.yaml create mode 100644 deploy/kubernetes/base/arm64/clusterrolebinding-provisioner.yaml create mode 100644 deploy/kubernetes/base/arm64/controller.yaml create mode 100644 deploy/kubernetes/base/arm64/csidriver.yaml create mode 100644 deploy/kubernetes/base/arm64/kustomization.yaml create mode 100644 deploy/kubernetes/base/arm64/node.yaml create mode 100644 deploy/kubernetes/base/arm64/serviceaccount-csi-controller.yaml diff --git a/charts/aws-ebs-csi-driver/Chart.yaml b/charts/aws-ebs-csi-driver/Chart.yaml index 4814b7f96f..c2ff64e2e4 100644 --- a/charts/aws-ebs-csi-driver/Chart.yaml +++ b/charts/aws-ebs-csi-driver/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v1 appVersion: "0.8.1" name: aws-ebs-csi-driver description: A Helm chart for AWS EBS CSI Driver -version: 0.8.2 -kubeVersion: ">=1.14.0-0" +version: 0.8.3 +kubeVersion: ">=1.17.0-0" home: https://github.com/kubernetes-sigs/aws-ebs-csi-driver sources: - https://github.com/kubernetes-sigs/aws-ebs-csi-driver diff --git a/charts/aws-ebs-csi-driver/templates/clusterrole-attacher.yaml b/charts/aws-ebs-csi-driver/templates/clusterrole-attacher.yaml index c4b766a36a..e0919cec14 100644 --- a/charts/aws-ebs-csi-driver/templates/clusterrole-attacher.yaml +++ b/charts/aws-ebs-csi-driver/templates/clusterrole-attacher.yaml @@ -18,3 +18,6 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments/status" ] + verbs: [ "patch" ] diff --git a/charts/aws-ebs-csi-driver/templates/clusterrole-provisioner.yaml b/charts/aws-ebs-csi-driver/templates/clusterrole-provisioner.yaml index 84f122a039..0fb7ded0fc 100644 --- a/charts/aws-ebs-csi-driver/templates/clusterrole-provisioner.yaml +++ b/charts/aws-ebs-csi-driver/templates/clusterrole-provisioner.yaml @@ -6,30 +6,33 @@ metadata: labels: {{- include "aws-ebs-csi-driver.labels" . | nindent 4 }} rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshots"] - verbs: ["get", "list"] - - apiGroups: ["snapshot.storage.k8s.io"] - resources: ["volumesnapshotcontents"] - verbs: ["get", "list"] - - apiGroups: ["storage.k8s.io"] - resources: ["csinodes"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["get", "list", "watch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [ "" ] + resources: [ "persistentvolumes" ] + verbs: [ "get", "list", "watch", "create", "delete" ] + - apiGroups: [ "" ] + resources: [ "persistentvolumeclaims" ] + verbs: [ "get", "list", "watch", "update" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "list", "watch", "create", "update", "patch" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshots" ] + verbs: [ "get", "list" ] + - apiGroups: [ "snapshot.storage.k8s.io" ] + resources: [ "volumesnapshotcontents" ] + verbs: [ "get", "list" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "csinodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "nodes" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "coordination.k8s.io" ] + resources: [ "leases" ] + verbs: [ "get", "watch", "list", "delete", "update", "create" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments" ] + verbs: [ "get", "list", "watch" ] diff --git a/charts/aws-ebs-csi-driver/templates/controller.yaml b/charts/aws-ebs-csi-driver/templates/controller.yaml index c0c94060e2..43b5b082cd 100644 --- a/charts/aws-ebs-csi-driver/templates/controller.yaml +++ b/charts/aws-ebs-csi-driver/templates/controller.yaml @@ -105,8 +105,8 @@ spec: {{- if .Values.extraCreateMetadata }} - --extra-create-metadata {{- end}} - - --enable-leader-election - - --leader-election-type=leases + - --leader-election=true + - --default-fstype=ext4 env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock diff --git a/charts/aws-ebs-csi-driver/templates/statefulset.yaml b/charts/aws-ebs-csi-driver/templates/statefulset.yaml index 6a904c4b35..c3925d5fbd 100644 --- a/charts/aws-ebs-csi-driver/templates/statefulset.yaml +++ b/charts/aws-ebs-csi-driver/templates/statefulset.yaml @@ -23,7 +23,7 @@ spec: serviceAccountName: {{ .Values.serviceAccount.snapshot.name }} containers: - name: snapshot-controller - image: quay.io/k8scsi/snapshot-controller:v2.1.1 + image: k8s.gcr.io/sig-storage/snapshot-controller:v3.0.3 args: - --v=5 - --leader-election=false diff --git a/charts/aws-ebs-csi-driver/values.yaml b/charts/aws-ebs-csi-driver/values.yaml index 0e746c7893..cc21073b29 100644 --- a/charts/aws-ebs-csi-driver/values.yaml +++ b/charts/aws-ebs-csi-driver/values.yaml @@ -11,23 +11,23 @@ image: sidecars: provisionerImage: - repository: quay.io/k8scsi/csi-provisioner - tag: "v1.6.0" + repository: k8s.gcr.io/sig-storage/csi-provisioner + tag: "v2.0.2" attacherImage: - repository: quay.io/k8scsi/csi-attacher - tag: "v2.2.0" + repository: k8s.gcr.io/sig-storage/csi-attacher + tag: "v3.0.0" snapshotterImage: - repository: quay.io/k8scsi/csi-snapshotter - tag: "v2.1.1" + repository: k8s.gcr.io/sig-storage/csi-snapshotter + tag: "v3.0.3" livenessProbeImage: - repository: quay.io/k8scsi/livenessprobe + repository: k8s.gcr.io/sig-storage/livenessprobe tag: "v2.1.0" resizerImage: - repository: quay.io/k8scsi/csi-resizer - tag: "v0.5.0" + repository: k8s.gcr.io/sig-storage/csi-resizer + tag: "v1.0.0" nodeDriverRegistrarImage: - repository: quay.io/k8scsi/csi-node-driver-registrar - tag: "v1.3.0" + repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar + tag: "v2.0.1" imagePullSecrets: [] nameOverride: "" diff --git a/deploy/kubernetes/base/arm64/clusterrole-attacher.yaml b/deploy/kubernetes/base/arm64/clusterrole-attacher.yaml new file mode 100644 index 0000000000..5805412acb --- /dev/null +++ b/deploy/kubernetes/base/arm64/clusterrole-attacher.yaml @@ -0,0 +1,24 @@ +--- +# Source: aws-ebs-csi-driver/templates/clusterrole-attacher.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-attacher-role + labels: + app.kubernetes.io/name: aws-ebs-csi-driver +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] diff --git a/deploy/kubernetes/base/arm64/clusterrole-provisioner.yaml b/deploy/kubernetes/base/arm64/clusterrole-provisioner.yaml new file mode 100644 index 0000000000..b8cf0b054e --- /dev/null +++ b/deploy/kubernetes/base/arm64/clusterrole-provisioner.yaml @@ -0,0 +1,39 @@ +--- +# Source: aws-ebs-csi-driver/templates/clusterrole-provisioner.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-external-provisioner-role + labels: + app.kubernetes.io/name: aws-ebs-csi-driver +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments" ] + verbs: [ "get", "list", "watch" ] diff --git a/deploy/kubernetes/base/arm64/clusterrolebinding-attacher.yaml b/deploy/kubernetes/base/arm64/clusterrolebinding-attacher.yaml new file mode 100644 index 0000000000..9a97b8efcb --- /dev/null +++ b/deploy/kubernetes/base/arm64/clusterrolebinding-attacher.yaml @@ -0,0 +1,16 @@ +--- +# Source: aws-ebs-csi-driver/templates/clusterrolebinding-attacher.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-attacher-binding + labels: + app.kubernetes.io/name: aws-ebs-csi-driver +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-attacher-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/kubernetes/base/arm64/clusterrolebinding-provisioner.yaml b/deploy/kubernetes/base/arm64/clusterrolebinding-provisioner.yaml new file mode 100644 index 0000000000..084bed9df9 --- /dev/null +++ b/deploy/kubernetes/base/arm64/clusterrolebinding-provisioner.yaml @@ -0,0 +1,16 @@ +--- +# Source: aws-ebs-csi-driver/templates/clusterrolebinding-provisioner.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: ebs-csi-provisioner-binding + labels: + app.kubernetes.io/name: aws-ebs-csi-driver +subjects: + - kind: ServiceAccount + name: ebs-csi-controller-sa + namespace: kube-system +roleRef: + kind: ClusterRole + name: ebs-external-provisioner-role + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/kubernetes/base/arm64/controller.yaml b/deploy/kubernetes/base/arm64/controller.yaml new file mode 100644 index 0000000000..009b16df29 --- /dev/null +++ b/deploy/kubernetes/base/arm64/controller.yaml @@ -0,0 +1,102 @@ +--- +# Source: aws-ebs-csi-driver/templates/controller.yaml +# Controller Service +kind: Deployment +apiVersion: apps/v1 +metadata: + name: ebs-csi-controller + namespace: kube-system + labels: + app.kubernetes.io/name: aws-ebs-csi-driver +spec: + replicas: 2 + selector: + matchLabels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-controller + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: ebs-csi-controller-sa + priorityClassName: system-cluster-critical + tolerations: + - operator: Exists + containers: + - name: ebs-plugin + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:latest + imagePullPolicy: IfNotPresent + args: + # - {all,controller,node} # specify the driver mode + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-secret + key: key_id + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-secret + key: access_key + optional: true + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + - name: csi-provisioner + image: k8s.gcr.io/sig-storage/csi-provisioner:v2.0.2 + args: + - --csi-address=$(ADDRESS) + - --v=5 + - --feature-gates=Topology=true + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-attacher + image: k8s.gcr.io/sig-storage/csi-attacher:v3.0.0 + args: + - --csi-address=$(ADDRESS) + - --v=5 + - --leader-election=true + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: liveness-probe + image: k8s.gcr.io/sig-storage/livenessprobe:v2.1.0 + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + volumes: + - name: socket-dir + emptyDir: { } diff --git a/deploy/kubernetes/base/arm64/csidriver.yaml b/deploy/kubernetes/base/arm64/csidriver.yaml new file mode 100644 index 0000000000..357b3d71a2 --- /dev/null +++ b/deploy/kubernetes/base/arm64/csidriver.yaml @@ -0,0 +1,11 @@ +--- +# Source: aws-ebs-csi-driver/templates/csidriver.yaml +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: ebs.csi.aws.com + labels: + app.kubernetes.io/name: aws-ebs-csi-driver +spec: + attachRequired: true + podInfoOnMount: false diff --git a/deploy/kubernetes/base/arm64/kustomization.yaml b/deploy/kubernetes/base/arm64/kustomization.yaml new file mode 100644 index 0000000000..f6978d7e21 --- /dev/null +++ b/deploy/kubernetes/base/arm64/kustomization.yaml @@ -0,0 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: kube-system +resources: +- clusterrole-attacher.yaml +- clusterrole-provisioner.yaml +- clusterrolebinding-attacher.yaml +- clusterrolebinding-provisioner.yaml +- controller.yaml +- csidriver.yaml +- node.yaml +- serviceaccount-csi-controller.yaml diff --git a/deploy/kubernetes/base/arm64/node.yaml b/deploy/kubernetes/base/arm64/node.yaml new file mode 100644 index 0000000000..2f6c0348da --- /dev/null +++ b/deploy/kubernetes/base/arm64/node.yaml @@ -0,0 +1,113 @@ +--- +# Source: aws-ebs-csi-driver/templates/node.yaml +# Node Service +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: ebs-csi-node + namespace: kube-system + labels: + app.kubernetes.io/name: aws-ebs-csi-driver +spec: + selector: + matchLabels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + template: + metadata: + labels: + app: ebs-csi-node + app.kubernetes.io/name: aws-ebs-csi-driver + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/compute-type + operator: NotIn + values: + - fargate + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + containers: + - name: ebs-plugin + securityContext: + privileged: true + image: k8s.gcr.io/provider-aws/aws-ebs-csi-driver:latest + args: + - node + - --endpoint=$(CSI_ENDPOINT) + - --logtostderr + - --v=5 + env: + - name: CSI_ENDPOINT + value: unix:/csi/csi.sock + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /dev + ports: + - name: healthz + containerPort: 9808 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 10 + failureThreshold: 5 + - name: node-driver-registrar + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1 + args: + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + - --v=5 + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"] + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: liveness-probe + image: quay.io/k8scsi/livenessprobe:v2.1.0 + args: + - --csi-address=/csi/csi.sock + volumeMounts: + - name: plugin-dir + mountPath: /csi + volumes: + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ + type: DirectoryOrCreate + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry/ + type: Directory + - name: device-dir + hostPath: + path: /dev + type: Directory diff --git a/deploy/kubernetes/base/arm64/serviceaccount-csi-controller.yaml b/deploy/kubernetes/base/arm64/serviceaccount-csi-controller.yaml new file mode 100644 index 0000000000..529473f3a3 --- /dev/null +++ b/deploy/kubernetes/base/arm64/serviceaccount-csi-controller.yaml @@ -0,0 +1,12 @@ +--- +# Source: aws-ebs-csi-driver/templates/serviceaccount-csi-controller.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ebs-csi-controller-sa + namespace: kube-system + labels: + app.kubernetes.io/name: aws-ebs-csi-driver + #Enable if EKS IAM for SA is used + #annotations: + # eks.amazonaws.com/role-arn: arn:aws:iam::586565787010:role/ebs-csi-role diff --git a/deploy/kubernetes/base/clusterrole-provisioner.yaml b/deploy/kubernetes/base/clusterrole-provisioner.yaml index 827e8f06cd..b8cf0b054e 100644 --- a/deploy/kubernetes/base/clusterrole-provisioner.yaml +++ b/deploy/kubernetes/base/clusterrole-provisioner.yaml @@ -34,3 +34,6 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "volumeattachments" ] + verbs: [ "get", "list", "watch" ] diff --git a/deploy/kubernetes/base/controller.yaml b/deploy/kubernetes/base/controller.yaml index 2c84fc0e97..24ac7cad7a 100644 --- a/deploy/kubernetes/base/controller.yaml +++ b/deploy/kubernetes/base/controller.yaml @@ -66,13 +66,13 @@ spec: periodSeconds: 10 failureThreshold: 5 - name: csi-provisioner - image: quay.io/k8scsi/csi-provisioner:v1.6.0 + image: quay.io/k8scsi/csi-provisioner:v2.0.2 args: - --csi-address=$(ADDRESS) - --v=5 - --feature-gates=Topology=true - - --enable-leader-election - - --leader-election-type=leases + - --leader-election=true + - --default-fstype=ext4 env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock @@ -80,7 +80,7 @@ spec: - name: socket-dir mountPath: /var/lib/csi/sockets/pluginproxy/ - name: csi-attacher - image: quay.io/k8scsi/csi-attacher:v2.2.0 + image: quay.io/k8scsi/csi-attacher:v3.0.0 args: - --csi-address=$(ADDRESS) - --v=5 diff --git a/deploy/kubernetes/base/node.yaml b/deploy/kubernetes/base/node.yaml index 1853df522c..56e7a4f77c 100644 --- a/deploy/kubernetes/base/node.yaml +++ b/deploy/kubernetes/base/node.yaml @@ -68,7 +68,7 @@ spec: periodSeconds: 10 failureThreshold: 5 - name: node-driver-registrar - image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0 + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 args: - --csi-address=$(ADDRESS) - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) diff --git a/deploy/kubernetes/overlays/alpha/controller_add_resizer.yaml b/deploy/kubernetes/overlays/alpha/controller_add_resizer.yaml index db7f8002ad..cc50a885a5 100644 --- a/deploy/kubernetes/overlays/alpha/controller_add_resizer.yaml +++ b/deploy/kubernetes/overlays/alpha/controller_add_resizer.yaml @@ -8,10 +8,11 @@ spec: spec: containers: - name: csi-resizer - image: quay.io/k8scsi/csi-resizer:v0.3.0 + image: k8s.gcr.io/sig-storage/csi-resizer:v1.0.0 args: - --csi-address=$(ADDRESS) - --v=5 + - --handle-volume-inuse-error=false env: - name: ADDRESS value: /var/lib/csi/sockets/pluginproxy/csi.sock diff --git a/deploy/kubernetes/overlays/alpha/controller_add_snapshotter.yaml b/deploy/kubernetes/overlays/alpha/controller_add_snapshotter.yaml index 166d16230e..11af8a6491 100644 --- a/deploy/kubernetes/overlays/alpha/controller_add_snapshotter.yaml +++ b/deploy/kubernetes/overlays/alpha/controller_add_snapshotter.yaml @@ -8,7 +8,7 @@ spec: spec: containers: - name: csi-snapshotter - image: quay.io/k8scsi/csi-snapshotter:v2.1.1 + image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3 args: - --csi-address=$(ADDRESS) - --leader-election=true diff --git a/deploy/kubernetes/overlays/alpha/snapshot_controller.yaml b/deploy/kubernetes/overlays/alpha/snapshot_controller.yaml index 5b28f4ca02..bc100234d5 100644 --- a/deploy/kubernetes/overlays/alpha/snapshot_controller.yaml +++ b/deploy/kubernetes/overlays/alpha/snapshot_controller.yaml @@ -24,7 +24,7 @@ spec: serviceAccountName: ebs-snapshot-controller containers: - name: snapshot-controller - image: quay.io/k8scsi/snapshot-controller:v2.1.1 + image: k8s.gcr.io/sig-storage/snapshot-controller:v3.0.3 args: - --v=5 - --leader-election=false diff --git a/deploy/kubernetes/overlays/stable/arm64/kustomization.yaml b/deploy/kubernetes/overlays/stable/arm64/kustomization.yaml index 099a97c4b2..bda0746760 100644 --- a/deploy/kubernetes/overlays/stable/arm64/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/arm64/kustomization.yaml @@ -1,19 +1,15 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization bases: - - ../../../base + - ../../../base/arm64 images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver newTag: v0.8.1 - - name: quay.io/k8scsi/csi-provisioner - newName: raspbernetes/csi-external-provisioner - newTag: "1.6.0" - - name: quay.io/k8scsi/csi-attacher - newName: raspbernetes/csi-external-attacher - newTag: "2.2.0" - - name: quay.io/k8scsi/livenessprobe - newName: k8s.gcr.io/sig-storage/livenessprobe - newTag: "v2.1.0" - - name: quay.io/k8scsi/csi-node-driver-registrar - newName: raspbernetes/csi-node-driver-registrar - newTag: "1.3.0" + - name: k8s.gcr.io/sig-storage/csi-provisioner + newTag: v2.0.2 + - name: k8s.gcr.io/sig-storage/csi-attacher + newTag: v3.0.0 + - name: k8s.gcr.io/sig-storage/livenessprobe + newTag: v2.1.0 + - name: k8s.gcr.io/sig-storage/csi-node-driver-registrar + newTag: v2.0.1 diff --git a/deploy/kubernetes/overlays/stable/kustomization.yaml b/deploy/kubernetes/overlays/stable/kustomization.yaml index a815e3dd30..941c57ce28 100644 --- a/deploy/kubernetes/overlays/stable/kustomization.yaml +++ b/deploy/kubernetes/overlays/stable/kustomization.yaml @@ -6,10 +6,10 @@ images: - name: k8s.gcr.io/provider-aws/aws-ebs-csi-driver newTag: v0.8.1 - name: quay.io/k8scsi/csi-provisioner - newTag: v1.5.0 + newTag: v2.0.2 - name: quay.io/k8scsi/csi-attacher - newTag: v2.2.0 + newTag: v3.0.0 - name: quay.io/k8scsi/livenessprobe newTag: v2.1.0 - name: quay.io/k8scsi/csi-node-driver-registrar - newTag: v1.3.0 + newTag: v2.0.1 diff --git a/docs/README.md b/docs/README.md index 9e3902ddce..b4074fda52 100644 --- a/docs/README.md +++ b/docs/README.md @@ -50,7 +50,7 @@ Following sections are Kubernetes specific. If you are Kubernetes user, use foll ## Kubernetes Version Compatibility Matrix | AWS EBS CSI Driver \ Kubernetes Version| v1.12 | v1.13 | v1.14 | v1.15 | v1.16 | v1.17 | v1.18+ | |----------------------------------------|-------|-------|-------|-------|-------|-------|-------| -| master branch | no | no+ | yes | yes | yes | yes | yes | +| master branch | no | no+ | no | no | no | yes | yes | | v0.8.x | no | no+ | yes | yes | yes | yes | yes | | v0.7.1 | no | no+ | yes | yes | yes | yes | yes | | v0.6.0 | no | no+ | yes | yes | yes | yes | yes | @@ -109,6 +109,8 @@ kubectl create -f https://raw.githubusercontent.com/kubernetes/csi-api/release-1 ``` #### Deploy driver +Please see the compatibility matrix above before you deploy the driver + If you want to deploy the stable driver without alpha features: ```sh kubectl apply -k "github.com/kubernetes-sigs/aws-ebs-csi-driver/deploy/kubernetes/overlays/stable/?ref=release-0.8" @@ -141,7 +143,6 @@ helm upgrade --install aws-ebs-csi-driver \ --set enableVolumeSnapshot=true \ aws-ebs-csi-driver/aws-ebs-csi-driver ``` - ## Examples Make sure you follow the [Prerequisites](README.md#Prerequisites) before the examples: * [Dynamic Provisioning](../examples/kubernetes/dynamic-provisioning) From c5eba4f1c7d9349cec79b5d0195003bef5e861f0 Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Fri, 29 Jan 2021 18:12:04 -0800 Subject: [PATCH 4/4] add e2e test for volume resizing --- .../templates/clusterrole-resizer.yaml | 4 +- .../alpha/rbac_add_resizer_clusterrole.yaml | 3 + tests/e2e/driver/driver.go | 16 ++-- tests/e2e/driver/ebs_csi_driver.go | 5 +- tests/e2e/dynamic_provisioning.go | 24 +++++ tests/e2e/pre_provsioning.go | 3 +- ...ically_provisioned_resize_volume_tester.go | 92 +++++++++++++++++++ tests/e2e/testsuites/specs.go | 6 +- 8 files changed, 139 insertions(+), 14 deletions(-) create mode 100644 tests/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go diff --git a/charts/aws-ebs-csi-driver/templates/clusterrole-resizer.yaml b/charts/aws-ebs-csi-driver/templates/clusterrole-resizer.yaml index 5f0c758c5c..9d85b97ca4 100644 --- a/charts/aws-ebs-csi-driver/templates/clusterrole-resizer.yaml +++ b/charts/aws-ebs-csi-driver/templates/clusterrole-resizer.yaml @@ -27,5 +27,7 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] - + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] {{- end}} diff --git a/deploy/kubernetes/overlays/alpha/rbac_add_resizer_clusterrole.yaml b/deploy/kubernetes/overlays/alpha/rbac_add_resizer_clusterrole.yaml index c24f13a6c6..9e5f2d14f4 100644 --- a/deploy/kubernetes/overlays/alpha/rbac_add_resizer_clusterrole.yaml +++ b/deploy/kubernetes/overlays/alpha/rbac_add_resizer_clusterrole.yaml @@ -27,3 +27,6 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [ "" ] + resources: [ "pods" ] + verbs: [ "get", "list", "watch" ] diff --git a/tests/e2e/driver/driver.go b/tests/e2e/driver/driver.go index 4b54e6912c..853bda4eb1 100644 --- a/tests/e2e/driver/driver.go +++ b/tests/e2e/driver/driver.go @@ -35,7 +35,7 @@ type PVTestDriver interface { // DynamicPVTestDriver represents an interface for a CSI driver that supports DynamicPV type DynamicPVTestDriver interface { // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume - GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass + GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, volumeExpansion *bool, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass } // PreProvisionedVolumeTestDriver represents an interface for a CSI driver that supports pre-provisioned volume @@ -54,6 +54,7 @@ func getStorageClass( parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, + volumeExpansion *bool, bindingMode *storagev1.VolumeBindingMode, allowedTopologies []v1.TopologySelectorTerm, ) *storagev1.StorageClass { @@ -69,12 +70,13 @@ func getStorageClass( ObjectMeta: metav1.ObjectMeta{ GenerateName: generateName, }, - Provisioner: provisioner, - Parameters: parameters, - MountOptions: mountOptions, - ReclaimPolicy: reclaimPolicy, - VolumeBindingMode: bindingMode, - AllowedTopologies: allowedTopologies, + Provisioner: provisioner, + Parameters: parameters, + MountOptions: mountOptions, + ReclaimPolicy: reclaimPolicy, + VolumeBindingMode: bindingMode, + AllowedTopologies: allowedTopologies, + AllowVolumeExpansion: volumeExpansion, } } diff --git a/tests/e2e/driver/ebs_csi_driver.go b/tests/e2e/driver/ebs_csi_driver.go index 94260f1309..9a72392ed4 100644 --- a/tests/e2e/driver/ebs_csi_driver.go +++ b/tests/e2e/driver/ebs_csi_driver.go @@ -41,10 +41,11 @@ func InitEbsCSIDriver() PVTestDriver { } } -func (d *ebsCSIDriver) GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass { +func (d *ebsCSIDriver) GetDynamicProvisionStorageClass(parameters map[string]string, mountOptions []string, reclaimPolicy *v1.PersistentVolumeReclaimPolicy, volumeExpansion *bool, bindingMode *storagev1.VolumeBindingMode, allowedTopologyValues []string, namespace string) *storagev1.StorageClass { provisioner := d.driverName generateName := fmt.Sprintf("%s-%s-dynamic-sc-", namespace, provisioner) allowedTopologies := []v1.TopologySelectorTerm{} + if len(allowedTopologyValues) > 0 { allowedTopologies = []v1.TopologySelectorTerm{ { @@ -57,7 +58,7 @@ func (d *ebsCSIDriver) GetDynamicProvisionStorageClass(parameters map[string]str }, } } - return getStorageClass(generateName, provisioner, parameters, mountOptions, reclaimPolicy, bindingMode, allowedTopologies) + return getStorageClass(generateName, provisioner, parameters, mountOptions, reclaimPolicy, volumeExpansion, bindingMode, allowedTopologies) } func (d *ebsCSIDriver) GetVolumeSnapshotClass(namespace string) *v1beta1.VolumeSnapshotClass { diff --git a/tests/e2e/dynamic_provisioning.go b/tests/e2e/dynamic_provisioning.go index 104021d3ff..a5ab32d6b6 100644 --- a/tests/e2e/dynamic_provisioning.go +++ b/tests/e2e/dynamic_provisioning.go @@ -404,6 +404,30 @@ var _ = Describe("[ebs-csi-e2e] [single-az] Dynamic Provisioning", func() { } test.Run(cs, ns) }) + + It("should create a volume on demand and resize it ", func() { + allowVolumeExpansion := true + pod := testsuites.PodDetails{ + Cmd: "echo 'hello world' >> /mnt/test-1/data && grep 'hello world' /mnt/test-1/data && sync", + Volumes: []testsuites.VolumeDetails{ + { + VolumeType: awscloud.VolumeTypeGP2, + FSType: ebscsidriver.FSTypeExt4, + ClaimSize: driver.MinimumSizeForVolumeType(awscloud.VolumeTypeGP2), + VolumeMount: testsuites.VolumeMountDetails{ + NameGenerate: "test-volume-", + MountPathGenerate: "/mnt/test-", + }, + AllowVolumeExpansion: &allowVolumeExpansion, + }, + }, + } + test := testsuites.DynamicallyProvisionedResizeVolumeTest{ + CSIDriver: ebsDriver, + Pod: pod, + } + test.Run(cs, ns) + }) }) var _ = Describe("[ebs-csi-e2e] [single-az] Snapshot", func() { diff --git a/tests/e2e/pre_provsioning.go b/tests/e2e/pre_provsioning.go index 06ef2d86ab..7612279466 100644 --- a/tests/e2e/pre_provsioning.go +++ b/tests/e2e/pre_provsioning.go @@ -17,6 +17,7 @@ package e2e import ( "context" "fmt" + ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" k8srestclient "k8s.io/client-go/rest" "math/rand" "os" @@ -29,8 +30,6 @@ import ( v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - - ebscsidriver "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/driver" ) const ( diff --git a/tests/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go b/tests/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go new file mode 100644 index 0000000000..f1b6330f2c --- /dev/null +++ b/tests/e2e/testsuites/dynamically_provisioned_resize_volume_tester.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/pkg/util" + "github.com/kubernetes-sigs/aws-ebs-csi-driver/tests/e2e/driver" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/framework" + "time" + + . "github.com/onsi/ginkgo" + "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" +) + +// DynamicallyProvisionedResizeVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s) +// Waiting for the PV provisioner to create a new PV +// Update pvc storage size +// Waiting for new PVC and PV to be ready +// And finally attach pvc to the pod and wait for pod to be ready. +type DynamicallyProvisionedResizeVolumeTest struct { + CSIDriver driver.DynamicPVTestDriver + Pod PodDetails +} + +func (t *DynamicallyProvisionedResizeVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { + volume := t.Pod.Volumes[0] + tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver) + defer tpvc.Cleanup() + + pvcName := tpvc.persistentVolumeClaim.Name + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(pvcName, metav1.GetOptions{}) + By(fmt.Sprintf("Get pvc name: %v", pvc.Name)) + originalSize := pvc.Spec.Resources.Requests["storage"] + delta := resource.Quantity{} + delta.Set(util.GiBToBytes(1)) + originalSize.Add(delta) + pvc.Spec.Resources.Requests["storage"] = originalSize + + By("resizing the pvc") + updatedPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Update(pvc) + if err != nil { + framework.ExpectNoError(err, fmt.Sprintf("fail to resize pvc(%s): %v", pvcName, err)) + } + updatedSize := updatedPvc.Spec.Resources.Requests["storage"] + + By("checking the resizing PV result") + error := WaitForPvToResize(client, namespace, updatedPvc.Spec.VolumeName, updatedSize, 1*time.Minute, 5*time.Second) + framework.ExpectNoError(error) + + By("Validate volume can be attached") + tpod := NewTestPod(client, namespace, t.Pod.Cmd) + + tpod.SetupVolume(tpvc.persistentVolumeClaim, volume.VolumeMount.NameGenerate+"1", volume.VolumeMount.MountPathGenerate+"1", volume.VolumeMount.ReadOnly) + + By("deploying the pod") + tpod.Create() + By("checking that the pods is running") + tpod.WaitForSuccess() + + defer tpod.Cleanup() + +} + +// WaitForPvToResize waiting for pvc size to be resized to desired size +func WaitForPvToResize(c clientset.Interface, ns *v1.Namespace, pvName string, desiredSize resource.Quantity, timeout time.Duration, interval time.Duration) error { + By(fmt.Sprintf("Waiting up to %v for pv in namespace %q to be complete", timeout, ns.Name)) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(interval) { + newPv, _ := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) + newPvSize := newPv.Spec.Capacity["storage"] + if desiredSize.Equal(newPvSize) { + By(fmt.Sprintf("Pv size is updated to %v", newPvSize.String())) + return nil + } + } + return fmt.Errorf("Gave up after waiting %v for pv %q to complete resizing", timeout, pvName) +} diff --git a/tests/e2e/testsuites/specs.go b/tests/e2e/testsuites/specs.go index cbe30c507f..6c43d6cf01 100644 --- a/tests/e2e/testsuites/specs.go +++ b/tests/e2e/testsuites/specs.go @@ -39,6 +39,7 @@ type VolumeDetails struct { MountOptions []string ClaimSize string ReclaimPolicy *v1.PersistentVolumeReclaimPolicy + AllowVolumeExpansion *bool VolumeBindingMode *storagev1.VolumeBindingMode AllowedTopologyValues []string VolumeMode VolumeMode @@ -119,7 +120,8 @@ func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1 cleanupFuncs := make([]func(), 0) volume := pod.Volumes[0] By("setting up the StorageClass") - storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(volume.VolumeType, volume.FSType, volume.Encrypted), volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) + + storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(volume.VolumeType, volume.FSType, volume.Encrypted), volume.MountOptions, volume.ReclaimPolicy, volume.AllowVolumeExpansion, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) tsc := NewTestStorageClass(client, namespace, storageClass) createdStorageClass := tsc.Create() cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) @@ -139,7 +141,7 @@ func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1 func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver) (*TestPersistentVolumeClaim, []func()) { cleanupFuncs := make([]func(), 0) By("setting up the StorageClass") - storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(volume.VolumeType, volume.FSType, volume.Encrypted), volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) + storageClass := csiDriver.GetDynamicProvisionStorageClass(driver.GetParameters(volume.VolumeType, volume.FSType, volume.Encrypted), volume.MountOptions, volume.ReclaimPolicy, volume.AllowVolumeExpansion, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) tsc := NewTestStorageClass(client, namespace, storageClass) createdStorageClass := tsc.Create() cleanupFuncs = append(cleanupFuncs, tsc.Cleanup)