From f3276f4cb264e51e3b97ee2ddf9cac109d30e917 Mon Sep 17 00:00:00 2001 From: Aman Gupta Date: Fri, 22 Mar 2024 12:31:36 +0530 Subject: [PATCH] fix(kind): change kind for lvm-controller to deployment in e2e-tests (#287) Signed-off-by: w3aman --- .../lvm-controller-high-availability/README.md | 6 +++--- .../lvm-controller-high-availability/test.yml | 14 +++++++------- e2e-tests/experiments/upgrade-lvm-localpv/test.yml | 10 +++++----- e2e-tests/utils/k8s/deprovision_statefulset.yml | 2 +- upgrade/README.md | 4 ++-- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/e2e-tests/experiments/functional/lvm-controller-high-availability/README.md b/e2e-tests/experiments/functional/lvm-controller-high-availability/README.md index a6d815fb..eb60d0fa 100644 --- a/e2e-tests/experiments/functional/lvm-controller-high-availability/README.md +++ b/e2e-tests/experiments/functional/lvm-controller-high-availability/README.md @@ -1,6 +1,6 @@ ## About this experiment -This functional experiment scale up the lvm-controller replicas to use it in high availability mode and then verify the lvm-localpv behaviour when one of the replicas go down. This experiment checks the initial number of replicas of lvm-controller and scale it by one if a free node is present which should be able to schedule the pods. Default value for lvm-controller statefulset replica is one. +This functional experiment scale up the lvm-controller replicas to use it in high availability mode and then verify the lvm-localpv behaviour when one of the replicas go down. This experiment checks the initial number of replicas of lvm-controller and scale it by one if a free node is present which should be able to schedule the pods. Default value for lvm-controller deployment replica is one. ## Supported platforms: @@ -17,10 +17,10 @@ LVM version: LVM 2 ## Exit-Criteria -- lvm-controller statefulset should be scaled up by one replica. +- lvm-controller deployment should be scaled up by one replica. - All the replias should be in running state. - lvm-localpv volumes should be healthy and data after scaling up controller should not be impacted. -- This experiment makes one of the lvm-controller statefulset replica to go down, as a result active/master replica of lvm-controller prior to the experiment will be changed to some other remaining replica after the experiment completes. This happens because of the lease mechanism, which is being used to decide which replica will be serving as master. At a time only one replica will be master and other replica will follow the anti-affinity rules so that these replica pods will be present on different nodes only. +- This experiment makes one of the lvm-controller deployment replica to go down, as a result active/master replica of lvm-controller prior to the experiment will be changed to some other remaining replica after the experiment completes. This happens because of the lease mechanism, which is being used to decide which replica will be serving as master. At a time only one replica will be master and other replica will follow the anti-affinity rules so that these replica pods will be present on different nodes only. - Volumes provisioning / deprovisioning should not be impacted if any one replica goes down. ## How to run diff --git a/e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml b/e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml index a7234916..34cbe00e 100644 --- a/e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml +++ b/e2e-tests/experiments/functional/lvm-controller-high-availability/test.yml @@ -16,9 +16,9 @@ vars: status: 'SOT' - - name: Get the no of replicas in lvm-controller statefulset + - name: Get the no of replicas in lvm-controller deployment shell: > - kubectl get sts openebs-lvm-controller -n kube-system -o jsonpath='{.status.replicas}' + kubectl get deploy openebs-lvm-controller -n kube-system -o jsonpath='{.status.replicas}' args: executable: /bin/bash register: lvm_ctrl_replicas @@ -38,9 +38,9 @@ executable: /bin/bash register: no_of_Schedulable_nodes - - name: scale down the replicas to zero of lvm-controller statefulset + - name: scale down the replicas to zero of lvm-controller deployment shell: > - kubectl scale sts openebs-lvm-controller -n kube-system --replicas=0 + kubectl scale deploy openebs-lvm-controller -n kube-system --replicas=0 args: executable: /bin/bash register: status @@ -81,15 +81,15 @@ register: pvc_status failed_when: "'Pending' not in pvc_status.stdout" - - name: scale up the lvm-controller statefulset replica + - name: scale up the lvm-controller deployment replica shell: > - kubectl scale sts openebs-lvm-controller -n kube-system + kubectl scale deploy openebs-lvm-controller -n kube-system --replicas="{{ lvm_ctrl_replicas.stdout|int + 1 }}" args: executable: /bin/bash failed_when: "{{ lvm_ctrl_replicas.stdout|int + 1 }} > {{no_of_Schedulable_nodes.stdout|int}}" - - name: check that lvm-controller statefulset replicas are up and running + - name: check that lvm-controller deployment replicas are up and running shell: > kubectl get pods -n kube-system -l app=openebs-lvm-controller --no-headers -o custom-columns=:.status.phase | grep Running | wc -l diff --git a/e2e-tests/experiments/upgrade-lvm-localpv/test.yml b/e2e-tests/experiments/upgrade-lvm-localpv/test.yml index 4ec58346..760ba226 100644 --- a/e2e-tests/experiments/upgrade-lvm-localpv/test.yml +++ b/e2e-tests/experiments/upgrade-lvm-localpv/test.yml @@ -43,9 +43,9 @@ executable: /bin/bash register: lvm_driver_tag - - name: Get the replica count for lvm-controller statefulset + - name: Get the replica count for lvm-controller deployment shell: > - kubectl get sts openebs-lvm-controller -n kube-system -o jsonpath='{.status.replicas}' + kubectl get deploy openebs-lvm-controller -n kube-system -o jsonpath='{.status.replicas}' args: executable: /bin/bash register: no_of_lvm_ctrl_replicas @@ -92,7 +92,7 @@ failed_when: "update_status.rc != 0" when: "lvm_operator_ns != 'openebs'" - - name: Update the number of lvm-controller statefulset replicas + - name: Update the number of lvm-controller deployment replicas replace: path: ./new_lvm_operator.yml regexp: "replicas: 1" @@ -158,9 +158,9 @@ delay: 5 retries: 20 - - name: Verify that lvm-driver version from the lvm-controller statefulset image is upgraded + - name: Verify that lvm-driver version from the lvm-controller deployment image is upgraded shell: > - kubectl get sts openebs-lvm-controller -n kube-system + kubectl get deploy openebs-lvm-controller -n kube-system -o jsonpath='{.spec.template.spec.containers[?(@.name=="openebs-lvm-plugin")].image}' args: executable: /bin/bash diff --git a/e2e-tests/utils/k8s/deprovision_statefulset.yml b/e2e-tests/utils/k8s/deprovision_statefulset.yml index b1e0b4b9..5967d428 100644 --- a/e2e-tests/utils/k8s/deprovision_statefulset.yml +++ b/e2e-tests/utils/k8s/deprovision_statefulset.yml @@ -1,7 +1,7 @@ --- - block: - - name: Check if the statefulset application exists. + - name: Check if the deployment application exists. shell: kubectl get pods -n {{ app_ns }} -l {{ app_label }} register: pods failed_when: "'No resources found' in pods.stdout" diff --git a/upgrade/README.md b/upgrade/README.md index d060aa23..99575e7d 100644 --- a/upgrade/README.md +++ b/upgrade/README.md @@ -34,7 +34,7 @@ wget https://raw.githubusercontent.com/openebs/lvm-localpv/v0.7.x/deploy/lvm-ope 2. Update the lvm-driver image tag. We have to update this at two places, -one at `openebs-lvm-plugin` container image in lvm-controller statefulset +one at `openebs-lvm-plugin` container image in lvm-controller deployment ``` - name: openebs-lvm-plugin image: openebs/lvm-driver:ci // update it to openebs/lvm-driver:0.7.0 @@ -62,7 +62,7 @@ and other one at `openebs-lvm-plugin` container in lvm-node daemonset. - "--listen-address=$(METRICS_LISTEN_ADDRESS)" ``` -3. If you were using lvm-controller in high-availability (HA) mode, make sure to update statefulset replicas. By default it is set to one (1). +3. If you were using lvm-controller in high-availability (HA) mode, make sure to update deployment replicas. By default it is set to one (1). ``` spec: