diff --git a/.github/workflows/e2e-chainsaw.yml b/.github/workflows/e2e-chainsaw.yml index 595ceddb0..e3708c5f9 100644 --- a/.github/workflows/e2e-chainsaw.yml +++ b/.github/workflows/e2e-chainsaw.yml @@ -21,6 +21,7 @@ jobs: - ./tests/e2e-chainsaw/v1beta2/password/ - ./tests/e2e-chainsaw/v1beta2/ha-setup/ - ./tests/e2e-chainsaw/v1beta2/nodeport/ + - ./tests/e2e-chainsaw/v1beta2/pvc-name/ steps: - name: Checkout code diff --git a/k8sutils/const.go b/k8sutils/const.go index 3a3da6603..b0377c3be 100644 --- a/k8sutils/const.go +++ b/k8sutils/const.go @@ -3,3 +3,7 @@ package k8sutils const ( AnnotationKeyRecreateStatefulset = "redis.opstreelabs.in/recreate-statefulset" ) + +const ( + EnvOperatorSTSPVCTemplateName = "OPERATOR_STS_PVC_TEMPLATE_NAME" +) diff --git a/k8sutils/finalizer.go b/k8sutils/finalizer.go index 58fdc680d..b85ef3c51 100644 --- a/k8sutils/finalizer.go +++ b/k8sutils/finalizer.go @@ -3,6 +3,7 @@ package k8sutils import ( "context" "fmt" + "k8s.io/utils/env" redisv1beta2 "github.com/OT-CONTAINER-KIT/redis-operator/api/v1beta2" "github.com/go-logr/logr" @@ -133,7 +134,8 @@ func AddRedisSentinelFinalizer(cr *redisv1beta2.RedisSentinel, cl client.Client) // finalizeRedisPVC delete PVC func finalizeRedisPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.Redis) error { - PVCName := fmt.Sprintf("%s-%s-0", cr.Name, cr.Name) + pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) + PVCName := fmt.Sprintf("%s-%s-0", pvcTemplateName, cr.Name) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { logger.Error(err, "Could not delete Persistent Volume Claim", "PVCName", PVCName) @@ -146,7 +148,8 @@ func finalizeRedisPVC(client kubernetes.Interface, logger logr.Logger, cr *redis func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisCluster) error { for _, role := range []string{"leader", "follower"} { for i := 0; i < int(cr.Spec.GetReplicaCounts(role)); i++ { - PVCName := fmt.Sprintf("%s-%s-%s-%s-%d", cr.Name, role, cr.Name, role, i) + pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name+"-"+role) + PVCName := fmt.Sprintf("%s-%s-%s-%d", pvcTemplateName, cr.Name, role, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) @@ -171,7 +174,8 @@ func finalizeRedisClusterPVC(client kubernetes.Interface, logger logr.Logger, cr // finalizeRedisReplicationPVC delete PVCs func finalizeRedisReplicationPVC(client kubernetes.Interface, logger logr.Logger, cr *redisv1beta2.RedisReplication) error { for i := 0; i < int(cr.Spec.GetReplicationCounts("replication")); i++ { - PVCName := fmt.Sprintf("%s-%s-%d", cr.Name, cr.Name, i) + pvcTemplateName := env.GetString(EnvOperatorSTSPVCTemplateName, cr.Name) + PVCName := fmt.Sprintf("%s-%s-%d", pvcTemplateName, cr.Name, i) err := client.CoreV1().PersistentVolumeClaims(cr.Namespace).Delete(context.TODO(), PVCName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { logger.Error(err, "Could not delete Persistent Volume Claim "+PVCName) diff --git a/k8sutils/statefulset.go b/k8sutils/statefulset.go index 8011f20a6..f1caa2891 100644 --- a/k8sutils/statefulset.go +++ b/k8sutils/statefulset.go @@ -3,6 +3,7 @@ package k8sutils import ( "context" "fmt" + "k8s.io/utils/env" "path" "sort" "strconv" @@ -269,7 +270,8 @@ func generateStatefulSetsDef(stsMeta metav1.ObjectMeta, params statefulSetParame statefulset.Spec.VolumeClaimTemplates = append(statefulset.Spec.VolumeClaimTemplates, createPVCTemplate("node-conf", stsMeta, params.NodeConfPersistentVolumeClaim)) } if containerParams.PersistenceEnabled != nil && *containerParams.PersistenceEnabled { - statefulset.Spec.VolumeClaimTemplates = append(statefulset.Spec.VolumeClaimTemplates, createPVCTemplate(stsMeta.GetName(), stsMeta, params.PersistentVolumeClaim)) + pvcTplName := env.GetString(EnvOperatorSTSPVCTemplateName, stsMeta.GetName()) + statefulset.Spec.VolumeClaimTemplates = append(statefulset.Spec.VolumeClaimTemplates, createPVCTemplate(pvcTplName, stsMeta, params.PersistentVolumeClaim)) } if params.ExternalConfig != nil { statefulset.Spec.Template.Spec.Volumes = getExternalConfig(*params.ExternalConfig) @@ -328,7 +330,7 @@ func createPVCTemplate(volumeName string, stsMeta metav1.ObjectMeta, storageSpec pvcTemplate.CreationTimestamp = metav1.Time{} pvcTemplate.Name = volumeName pvcTemplate.Labels = stsMeta.GetLabels() - // We want the same annoations as the StatefulSet here + // We want the same annotation as the StatefulSet here pvcTemplate.Annotations = generateStatefulSetsAnots(stsMeta, nil) if storageSpec.Spec.AccessModes == nil { pvcTemplate.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} @@ -559,7 +561,7 @@ func getVolumeMount(name string, persistenceEnabled *bool, clusterMode bool, nod if persistenceEnabled != nil && *persistenceEnabled { VolumeMounts = append(VolumeMounts, corev1.VolumeMount{ - Name: name, + Name: env.GetString(EnvOperatorSTSPVCTemplateName, name), MountPath: "/data", }) } diff --git a/tests/_config/chainsaw-configuration.yaml b/tests/_config/chainsaw-configuration.yaml index 828895dbb..3e2daf4bb 100644 --- a/tests/_config/chainsaw-configuration.yaml +++ b/tests/_config/chainsaw-configuration.yaml @@ -9,5 +9,5 @@ spec: timeouts: apply: 5m delete: 5m - assert: 15m - error: 15m + assert: 10m + error: 10m diff --git a/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/chainsaw-test.yaml b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/chainsaw-test.yaml new file mode 100644 index 000000000..a022f9537 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/chainsaw-test.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: pvc-name +spec: + steps: + - name: Add PVC name environment + try: + - script: + content: | + kubectl patch deployment redis-operator-redis-operator --namespace redis-operator-system --type json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/env/-", "value": {"name": "OPERATOR_STS_PVC_TEMPLATE_NAME", "value": "data"}}]' + - sleep: + duration: 1m + + - name: redis-cluster-install + try: + - apply: + file: cluster.yaml + - assert: + file: ready-cluster.yaml + - assert: + file: ready-sts.yaml + - assert: + file: ready-svc.yaml + - assert: + file: ready-pvc.yaml + + - name: redis-cluster-uninstall + try: + - delete: + ref: + name: redis-cluster-v1beta2 + kind: RedisCluster + apiVersion: redis.redis.opstreelabs.in/v1beta2 + - error: + file: ready-cluster.yaml + - error: + file: ready-sts.yaml + - error: + file: ready-svc.yaml + - error: + file: ready-pvc.yaml + + - name: Remove PVC name environment + try: + - script: + content: | + kubectl patch deployment redis-operator-redis-operator --namespace redis-operator-system --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/env/1"}]' + kubectl wait --for=condition=available --timeout=300s deployment/redis-operator-redis-operator -n redis-operator-system diff --git a/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/cluster.yaml b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/cluster.yaml new file mode 100644 index 000000000..ecf147104 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/cluster.yaml @@ -0,0 +1,47 @@ +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisCluster +metadata: + name: redis-cluster-v1beta2 +spec: + clusterSize: 3 + clusterVersion: v7 + persistenceEnabled: true + podSecurityContext: + runAsUser: 1000 + fsGroup: 1000 + kubernetesConfig: + image: quay.io/opstree/redis:latest + imagePullPolicy: Always + resources: + requests: + cpu: 101m + memory: 128Mi + limits: + cpu: 101m + memory: 128Mi + redisExporter: + enabled: true + image: quay.io/opstree/redis-exporter:v1.44.0 + imagePullPolicy: Always + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + storage: + volumeClaimTemplate: + spec: + # storageClassName: standard + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + nodeConfVolume: true + nodeConfVolumeClaimTemplate: + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-cluster.yaml b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-cluster.yaml new file mode 100644 index 000000000..49e754e81 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-cluster.yaml @@ -0,0 +1,7 @@ +apiVersion: redis.redis.opstreelabs.in/v1beta2 +kind: RedisCluster +metadata: + name: redis-cluster-v1beta2 +status: + readyFollowerReplicas: 3 + readyLeaderReplicas: 3 diff --git a/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-pvc.yaml b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-pvc.yaml new file mode 100644 index 000000000..2511fa8d5 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-pvc.yaml @@ -0,0 +1,181 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-leader-0 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-leader-1 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-leader-2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-follower-0 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-follower-1 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: node-conf-redis-cluster-v1beta2-follower-2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound + +--- + +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-redis-cluster-v1beta2-leader-0 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-redis-cluster-v1beta2-leader-1 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-redis-cluster-v1beta2-leader-2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-redis-cluster-v1beta2-follower-0 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-redis-cluster-v1beta2-follower-1 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-redis-cluster-v1beta2-follower-2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + phase: Bound diff --git a/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-sts.yaml b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-sts.yaml new file mode 100644 index 000000000..1053eb784 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-sts.yaml @@ -0,0 +1,25 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis-cluster-v1beta2-leader + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader +status: + replicas: 3 + readyReplicas: 3 + +--- + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: redis-cluster-v1beta2-follower + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower +status: + replicas: 3 + readyReplicas: 3 diff --git a/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-svc.yaml b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-svc.yaml new file mode 100644 index 000000000..e8af234a6 --- /dev/null +++ b/tests/e2e-chainsaw/v1beta2/pvc-name/redis-cluster/ready-svc.yaml @@ -0,0 +1,201 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + name: redis-cluster-v1beta2-leader-additional + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + name: redis-cluster-v1beta2-leader + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-exporter + port: 9121 + protocol: TCP + targetPort: 9121 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + name: redis-cluster-v1beta2-leader-headless + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + clusterIP: None + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-leader + redis_setup_type: cluster + role: leader + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + name: redis-cluster-v1beta2-follower + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + - name: redis-exporter + port: 9121 + protocol: TCP + targetPort: 9121 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + name: redis-cluster-v1beta2-follower-additional + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + type: ClusterIP +status: + loadBalancer: {} +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9121" + prometheus.io/scrape: "true" + redis.opstreelabs.in: "true" + redis.opstreelabs.instance: redis-cluster-v1beta2 + labels: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + name: redis-cluster-v1beta2-follower-headless + ownerReferences: + - apiVersion: redis.redis.opstreelabs.in/v1beta2 + controller: true + kind: RedisCluster + name: redis-cluster-v1beta2 +spec: + clusterIP: None + ports: + - name: redis-client + port: 6379 + protocol: TCP + targetPort: 6379 + selector: + app: redis-cluster-v1beta2-follower + redis_setup_type: cluster + role: follower + type: ClusterIP +status: + loadBalancer: {} \ No newline at end of file