From 68410d7a38afb5f195ab9a2760c38e74984a7444 Mon Sep 17 00:00:00 2001 From: Triona Doyle Date: Thu, 3 Jul 2025 14:37:38 +0100 Subject: [PATCH 1/2] Add OLM Install & Delete Testing Signed-off-by: Triona Doyle --- .gitignore | 4 +- test/openshift/e2e/ginkgo/fixture/fixture.go | 71 ++-- .../e2e/ginkgo/fixture/olm/fixture.go | 65 ++++ .../ginkgo/fixture/subscription/fixture.go | 98 +++--- .../e2e/ginkgo/fixture/utils/fixtureUtils.go | 16 +- .../1-120_validate_olm_operator_test.go | 314 ++++++++++++++++++ .../01-install.yaml | 19 ++ .../02-assert.yaml | 34 ++ .../03-delete.yaml | 121 +++++++ 9 files changed, 638 insertions(+), 104 deletions(-) create mode 100644 test/openshift/e2e/ginkgo/fixture/olm/fixture.go create mode 100644 test/openshift/e2e/ginkgo/sequential/1-120_validate_olm_operator_test.go create mode 100644 test/openshift/e2e/sequential/1-120_validate_olm_operator/01-install.yaml create mode 100644 test/openshift/e2e/sequential/1-120_validate_olm_operator/02-assert.yaml create mode 100644 test/openshift/e2e/sequential/1-120_validate_olm_operator/03-delete.yaml diff --git a/.gitignore b/.gitignore index 9d4ef10b5..f6fc7d3be 100644 --- a/.gitignore +++ b/.gitignore @@ -22,11 +22,13 @@ kuttl-test.json !vendor/**/zz_generated.* -# editor and IDE paraphernalia +# editor and IDE paraphernalia (inc MacOs) .idea *.swp *.swo *~ +.DS_Store + # ignore vendor vendor/ diff --git a/test/openshift/e2e/ginkgo/fixture/fixture.go b/test/openshift/e2e/ginkgo/fixture/fixture.go index f9d753573..177d97c70 100644 --- a/test/openshift/e2e/ginkgo/fixture/fixture.go +++ b/test/openshift/e2e/ginkgo/fixture/fixture.go @@ -29,7 +29,7 @@ import ( "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/argocd" deploymentFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/deployment" "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/k8s" - osFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/os" + osFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/os" // Corrected import path subscriptionFixture "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/subscription" "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" appsv1 "k8s.io/api/apps/v1" @@ -40,16 +40,18 @@ import ( ) const ( - // E2ETestLabelsKey and E2ETestLabelsValue are added to cluster-scoped resources (e.g. Namespaces) created by E2E tests (where possible). On startup (and before each test for sequential tests), any resources with this label will be deleted. - E2ETestLabelsKey = "app" - E2ETestLabelsValue = "test-argo-app" + E2ETestLabelsKey = "app" + E2ETestLabelsValue = "test-argo-app" + GitOpsOperatorNamespace = "openshift-gitops-operator" + GitOpsOperatorDeploymentName = "openshift-gitops-operator-controller-manager" + GitOpsOperatorSubscriptionName = "openshift-gitops-operator" + HelloWorldPattern = "helloworld-operator" ) var NamespaceLabels = map[string]string{E2ETestLabelsKey: E2ETestLabelsValue} func EnsureParallelCleanSlate() { - // Increase the maximum length of debug output, for when tests fail format.MaxLength = 64 * 1024 SetDefaultEventuallyTimeout(time.Second * 60) SetDefaultEventuallyPollingInterval(time.Second * 3) @@ -71,20 +73,12 @@ func EnsureParallelCleanSlate() { // Unlike sequential clean slate, parallel clean slate cannot assume that there are no other tests running. This limits our ability to clean up old test artifacts. } -// EnsureSequentialCleanSlate will clean up resources that were created during previous sequential tests -// - Deletes namespaces that were created by previous tests -// - Deletes other cluster-scoped resources that were created -// - Reverts changes made to Subscription CR -// - etc func EnsureSequentialCleanSlate() { Expect(EnsureSequentialCleanSlateWithError()).To(Succeed()) } func EnsureSequentialCleanSlateWithError() error { - // With sequential tests, we are always safe to assume that there is no other test running. That allows us to clean up old test artifacts before new test starts. - - // Increase the maximum length of debug output, for when tests fail format.MaxLength = 64 * 1024 SetDefaultEventuallyTimeout(time.Second * 60) SetDefaultEventuallyPollingInterval(time.Second * 3) @@ -131,8 +125,8 @@ func EnsureSequentialCleanSlateWithError() error { Enabled: true, TLS: nil, // TLS: &routev1.TLSConfig{ - // Termination: routev1.TLSTerminationReencrypt, - // InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect, + // Termination: routev1.TLSTerminationReencrypt, + // InsecureEdgeTerminationPolicy: routev1.InsecureEdgeTerminationPolicyRedirect, // }, } @@ -206,13 +200,12 @@ func EnsureSequentialCleanSlateWithError() error { func RemoveDynamicPluginFromCSV(ctx context.Context, k8sClient client.Client) error { if EnvNonOLM() || EnvLocalRun() { - // Skipping as CSV does exist when not using OLM, nor does it exist when running locally return nil } var csv *olmv1alpha1.ClusterServiceVersion var csvList olmv1alpha1.ClusterServiceVersionList - Expect(k8sClient.List(ctx, &csvList, client.InNamespace("openshift-gitops-operator"))).To(Succeed()) + Expect(k8sClient.List(ctx, &csvList, client.InNamespace(GitOpsOperatorNamespace))).To(Succeed()) for idx := range csvList.Items { idxCSV := csvList.Items[idx] @@ -221,7 +214,7 @@ func RemoveDynamicPluginFromCSV(ctx context.Context, k8sClient client.Client) er break } } - Expect(csv).ToNot(BeNil(), "if you see this, it likely means, either: A) the operator is not installed via OLM (and you meant to install it), OR B) you are running the operator locally via 'make run', and thus should specify LOCAL_RUN=true env var when calling the test") + Expect(csv).ToNot(BeNil(), "if you see this, it likely means, either: A) the operator is not installed via OLM (and you meant to install it), OR B) you are running the operator locally via 'make run', and thus should not specify LOCAL_RUN=true env var when calling the test") if err := updateWithoutConflict(csv, func(obj client.Object) { @@ -374,7 +367,7 @@ func GetEnvInOperatorSubscriptionOrDeployment(key string) (*string, error) { } if EnvNonOLM() { - depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator-controller-manager", Namespace: "openshift-gitops-operator"}} + depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorDeploymentName, Namespace: GitOpsOperatorNamespace}} return deploymentFixture.GetEnv(depl, key) @@ -394,7 +387,7 @@ func GetEnvInOperatorSubscriptionOrDeployment(key string) (*string, error) { } else { - sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator", Namespace: "openshift-gitops-operator"}} + sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorSubscriptionName, Namespace: GitOpsOperatorNamespace}} if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(sub), sub); err != nil { return nil, err } @@ -411,11 +404,11 @@ func SetEnvInOperatorSubscriptionOrDeployment(key string, value string) { k8sClient, _ := utils.GetE2ETestKubeClient() if EnvNonOLM() { - depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator-controller-manager", Namespace: "openshift-gitops-operator"}} + depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorDeploymentName, Namespace: GitOpsOperatorNamespace}} deploymentFixture.SetEnv(depl, key, value) - WaitForAllDeploymentsInTheNamespaceToBeReady("openshift-gitops-operator", k8sClient) + WaitForAllDeploymentsInTheNamespaceToBeReady(GitOpsOperatorNamespace, k8sClient) } else if EnvCI() { @@ -429,7 +422,7 @@ func SetEnvInOperatorSubscriptionOrDeployment(key string, value string) { } else { - sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator", Namespace: "openshift-gitops-operator"}} + sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorSubscriptionName, Namespace: GitOpsOperatorNamespace}} Expect(k8sClient.Get(context.Background(), client.ObjectKeyFromObject(sub), sub)).To(Succeed()) subscriptionFixture.SetEnv(sub, key, value) @@ -448,11 +441,11 @@ func RemoveEnvFromOperatorSubscriptionOrDeployment(key string) error { } if EnvNonOLM() { - depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator-controller-manager", Namespace: "openshift-gitops-operator"}} + depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorDeploymentName, Namespace: GitOpsOperatorNamespace}} deploymentFixture.RemoveEnv(depl, key) - WaitForAllDeploymentsInTheNamespaceToBeReady("openshift-gitops-operator", k8sClient) + WaitForAllDeploymentsInTheNamespaceToBeReady(GitOpsOperatorNamespace, k8sClient) } else if EnvCI() { @@ -470,7 +463,7 @@ func RemoveEnvFromOperatorSubscriptionOrDeployment(key string) error { } else { - sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator", Namespace: "openshift-gitops-operator"}} + sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorSubscriptionName, Namespace: GitOpsOperatorNamespace}} if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(sub), sub); err != nil { return err } @@ -485,9 +478,7 @@ func RemoveEnvFromOperatorSubscriptionOrDeployment(key string) error { func GetSubscriptionInEnvCIEnvironment(k8sClient client.Client) (*olmv1alpha1.Subscription, error) { subscriptionList := olmv1alpha1.SubscriptionList{} - if err := k8sClient.List(context.Background(), &subscriptionList, client.InNamespace("openshift-gitops-operator")); err != nil { - return nil, err - } + Expect(k8sClient.List(context.Background(), &subscriptionList, client.InNamespace(GitOpsOperatorNamespace))).To(Succeed()) var sub *olmv1alpha1.Subscription @@ -516,7 +507,7 @@ func RestoreSubcriptionToDefault() error { if EnvNonOLM() { - depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator-controller-manager", Namespace: "openshift-gitops-operator"}} + depl := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorDeploymentName, Namespace: GitOpsOperatorNamespace}} for _, envKey := range optionalEnvVarsToRemove { deploymentFixture.RemoveEnv(depl, envKey) @@ -539,30 +530,26 @@ func RestoreSubcriptionToDefault() error { subscriptionFixture.RemoveSpecConfig(sub) } - if err := waitForAllEnvVarsToBeRemovedFromDeployments("openshift-gitops-operator", optionalEnvVarsToRemove, k8sClient); err != nil { + if err := waitForAllEnvVarsToBeRemovedFromDeployments(GitOpsOperatorNamespace, optionalEnvVarsToRemove, k8sClient); err != nil { return err } - WaitForAllDeploymentsInTheNamespaceToBeReady("openshift-gitops-operator", k8sClient) - - } else if EnvLocalRun() { - // When running locally, there are no cluster resources to clean up - return nil + WaitForAllDeploymentsInTheNamespaceToBeReady(GitOpsOperatorNamespace, k8sClient) } else { - sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: "openshift-gitops-operator", Namespace: "openshift-gitops-operator"}} + sub := &olmv1alpha1.Subscription{ObjectMeta: metav1.ObjectMeta{Name: GitOpsOperatorSubscriptionName, Namespace: GitOpsOperatorNamespace}} if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(sub), sub); err != nil { return err } subscriptionFixture.RemoveSpecConfig(sub) - if err := waitForAllEnvVarsToBeRemovedFromDeployments("openshift-gitops-operator", optionalEnvVarsToRemove, k8sClient); err != nil { + if err := waitForAllEnvVarsToBeRemovedFromDeployments(GitOpsOperatorNamespace, optionalEnvVarsToRemove, k8sClient); err != nil { return err } - WaitForAllDeploymentsInTheNamespaceToBeReady("openshift-gitops-operator", k8sClient) + WaitForAllDeploymentsInTheNamespaceToBeReady(GitOpsOperatorNamespace, k8sClient) } return nil @@ -848,7 +835,6 @@ func OutputDebugOnFail(namespaceParams ...any) { debugOutput, exists := testReportMap[testName] if exists && debugOutput.isOutputted { - // Skip output if we have already outputted once for this test return } @@ -856,7 +842,7 @@ func OutputDebugOnFail(namespaceParams ...any) { isOutputted: true, } - outputPodLog("openshift-gitops-operator-controller-manager") + outputPodLog(GitOpsOperatorDeploymentName) for _, namespace := range namespaces { @@ -911,7 +897,6 @@ func outputPodLog(podSubstring string) { } if len(matchingPods) == 0 { - // This can happen when the operator is not running on the cluster GinkgoWriter.Println("DebugOutputOperatorLogs was called, but no pods were found.") return } @@ -922,7 +907,7 @@ func outputPodLog(podSubstring string) { } // Extract operator logs - kubectlLogOutput, err := osFixture.ExecCommandWithOutputParam(false, "kubectl", "logs", "pod/"+matchingPods[0].Name, "manager", "-n", matchingPods[0].Namespace) + kubectlLogOutput, err := osFixture.ExecCommandWithOutputParam(false, "kubectl", "logs", "pod/"+matchingPods[0].Name, "manager", "-n", GitOpsOperatorNamespace) if err != nil { GinkgoWriter.Println("unable to extract operator logs", err) return diff --git a/test/openshift/e2e/ginkgo/fixture/olm/fixture.go b/test/openshift/e2e/ginkgo/fixture/olm/fixture.go new file mode 100644 index 000000000..654d3e2e8 --- /dev/null +++ b/test/openshift/e2e/ginkgo/fixture/olm/fixture.go @@ -0,0 +1,65 @@ +package olm + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + apierr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" +) + +// WaitForClusterServiceVersion waits for a specific ClusterServiceVersion to reach the 'Succeeded' phase. +func WaitForClusterServiceVersion(ctx context.Context, namespace, csvName string, timeout, pollingInterval time.Duration) { + k8sClient, _, err := utils.GetE2ETestKubeClientWithError() // Get client here + Expect(err).ToNot(HaveOccurred()) + + GinkgoWriter.Printf("Waiting for ClusterServiceVersion '%s' in namespace '%s' to be Succeeded...\n", csvName, namespace) + + csvKey := types.NamespacedName{ + Name: csvName, + Namespace: namespace, + } + + var foundCSV olmv1alpha1.ClusterServiceVersion + + Eventually(func() bool { + getErr := k8sClient.Get(ctx, csvKey, &foundCSV) + if getErr != nil { + if apierr.IsNotFound(getErr) { + GinkgoWriter.Printf("CSV '%s' not found yet. Listing all CSVs in '%s' for debug...\n", csvName, namespace) + var csvList olmv1alpha1.ClusterServiceVersionList + listErr := k8sClient.List(ctx, &csvList, client.InNamespace(namespace)) + if listErr != nil { + GinkgoWriter.Printf("Error listing CSVs for debug: %v\n", listErr) + } else { + for _, csv := range csvList.Items { + GinkgoWriter.Printf("- Found CSV: %s (Phase: %s, Reason: %s)\n", csv.Name, csv.Status.Phase, csv.Status.Reason) + } + } + return false + } + GinkgoWriter.Printf("Error getting CSV '%s': %v. Retrying...\n", csvName, getErr) + return false // retrying on errors + } + + // Check if the CSV phase is Succeeded + if foundCSV.Status.Phase == olmv1alpha1.CSVPhaseSucceeded { + GinkgoWriter.Printf("ClusterServiceVersion '%s' is Succeeded.\n", csvName) + return true + } + + GinkgoWriter.Printf("CSV '%s' status is '%s' (Reason: %s). Waiting...\n", csvName, foundCSV.Status.Phase, foundCSV.Status.Reason) + return false // Not succeeded yet + }).WithTimeout(timeout).WithPolling(pollingInterval).Should(BeTrue(), + fmt.Sprintf("Expected ClusterServiceVersion '%s' in namespace '%s' to be Succeeded within %s", csvName, namespace, timeout)) + + GinkgoWriter.Println("ClusterServiceVersion successfully installed and Succeeded.") +} diff --git a/test/openshift/e2e/ginkgo/fixture/subscription/fixture.go b/test/openshift/e2e/ginkgo/fixture/subscription/fixture.go index 7a4b2ab8a..ac3e63b81 100644 --- a/test/openshift/e2e/ginkgo/fixture/subscription/fixture.go +++ b/test/openshift/e2e/ginkgo/fixture/subscription/fixture.go @@ -2,137 +2,130 @@ package subscription import ( "context" + "fmt" + "time" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" - "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" // Ensure this path is correct corev1 "k8s.io/api/core/v1" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) -func GetEnv(s *olmv1alpha1.Subscription, key string) (*string, error) { +// PollForSubscriptionCurrentCSV waits for the Subscription to have a CurrentCSV in its status. +// It returns the CurrentCSV name once found. +func PollForSubscriptionCurrentCSV(ctx context.Context, subNamespace, subName string, timeout, pollingInterval time.Duration) string { + k8sClient, _, err := utils.GetE2ETestKubeClientWithError() + Expect(err).ToNot(HaveOccurred()) + + GinkgoWriter.Printf("Polling for Subscription '%s/%s' to have a CurrentCSV...\n", subNamespace, subName) + subKey := client.ObjectKey{ + Name: subName, + Namespace: subNamespace, + } + var sub olmv1alpha1.Subscription + var csvName string + + Eventually(func() bool { + getErr := k8sClient.Get(ctx, subKey, &sub) + if getErr != nil { + GinkgoWriter.Printf("Error getting Subscription '%s/%s': %v. Retrying...\n", subNamespace, subName, getErr) + return false + } + if sub.Status.CurrentCSV != "" { + csvName = sub.Status.CurrentCSV + GinkgoWriter.Printf("Subscription '%s/%s' has CurrentCSV: %s\n", subNamespace, subName, csvName) + return true + } + GinkgoWriter.Printf("Subscription '%s/%s' does not have CurrentCSV yet. Status state: %s. Retrying...\n", subNamespace, subName, sub.Status.State) + return false + }).WithTimeout(timeout).WithPolling(pollingInterval).Should(BeTrue(), + fmt.Sprintf("Expected Subscription '%s/%s' to have a CurrentCSV within %s", subNamespace, subName, timeout)) + return csvName +} + +// GetEnv retrieves the value of an environment variable from a Subscription's spec.config.env. +func GetEnv(s *olmv1alpha1.Subscription, key string) (*string, error) { k8sClient, _, err := utils.GetE2ETestKubeClientWithError() if err != nil { return nil, err } - if err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(s), s); err != nil { return nil, err } - if s.Spec == nil { - return nil, nil - } - - if s.Spec.Config == nil { - return nil, nil - } - - if s.Spec.Config.Env == nil { + if s.Spec == nil || s.Spec.Config == nil || s.Spec.Config.Env == nil { return nil, nil } - for idx := range s.Spec.Config.Env { - idxEnv := s.Spec.Config.Env[idx] - if idxEnv.Name == key { return &idxEnv.Value, nil } } return nil, nil - } +// SetEnv sets or updates an environment variable in a Subscription's spec.config.env. func SetEnv(subscription *olmv1alpha1.Subscription, key string, value string) { - Update(subscription, func(s *olmv1alpha1.Subscription) { - if s.Spec == nil { s.Spec = &olmv1alpha1.SubscriptionSpec{} } - if s.Spec.Config == nil { s.Spec.Config = &olmv1alpha1.SubscriptionConfig{} } - if s.Spec.Config.Env == nil { s.Spec.Config.Env = []corev1.EnvVar{} } - newEnvVars := []corev1.EnvVar{} - match := false for idx := range s.Spec.Config.Env { - currEnv := s.Spec.Config.Env[idx] - if currEnv.Name == key { - // replace with the value from the param newEnvVars = append(newEnvVars, corev1.EnvVar{Name: key, Value: value}) match = true } else { newEnvVars = append(newEnvVars, currEnv) } } - if !match { newEnvVars = append(newEnvVars, corev1.EnvVar{Name: key, Value: value}) } - s.Spec.Config.Env = newEnvVars - }) - } +// RemoveEnv removes an environment variable from a Subscription's spec.config.env. func RemoveEnv(subscription *olmv1alpha1.Subscription, key string) { - Update(subscription, func(s *olmv1alpha1.Subscription) { - - if s.Spec == nil { - return - } - - if s.Spec.Config == nil { - return - } - - if s.Spec.Config.Env == nil { + if s.Spec == nil || s.Spec.Config == nil || s.Spec.Config.Env == nil { return } - newEnvVars := []corev1.EnvVar{} - for idx := range s.Spec.Config.Env { - currEnv := s.Spec.Config.Env[idx] - if currEnv.Name == key { // skip } else { newEnvVars = append(newEnvVars, currEnv) } } - s.Spec.Config.Env = newEnvVars - }) - } -// RemoveSpecConfig removes any configuration data (environment variables) specified under .spec.config of Subscription +// RemoveSpecConfig removes any configuration data (environment variables) specified under .spec.config of Subscription. func RemoveSpecConfig(sub *olmv1alpha1.Subscription) { - Update(sub, func(s *olmv1alpha1.Subscription) { if s.Spec != nil { s.Spec.Config = nil } }) - } // Update will keep trying to update object until it succeeds, or times out. @@ -140,17 +133,12 @@ func Update(obj *olmv1alpha1.Subscription, modify func(*olmv1alpha1.Subscription k8sClient, _ := utils.GetE2ETestKubeClient() err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Retrieve the latest version of the object err := k8sClient.Get(context.Background(), client.ObjectKeyFromObject(obj), obj) if err != nil { return err } - modify(obj) - - // Attempt to update the object return k8sClient.Update(context.Background(), obj) }) Expect(err).ToNot(HaveOccurred()) - } diff --git a/test/openshift/e2e/ginkgo/fixture/utils/fixtureUtils.go b/test/openshift/e2e/ginkgo/fixture/utils/fixtureUtils.go index 2c6237613..d7cf465b4 100644 --- a/test/openshift/e2e/ginkgo/fixture/utils/fixtureUtils.go +++ b/test/openshift/e2e/ginkgo/fixture/utils/fixtureUtils.go @@ -3,7 +3,8 @@ package utils import ( "os" - "k8s.io/apimachinery/pkg/runtime" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" @@ -12,6 +13,7 @@ import ( argocdv1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" osappsv1 "github.com/openshift/api/apps/v1" + olmv1 "github.com/operator-framework/api/pkg/operators/v1" olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" rolloutmanagerv1alpha1 "github.com/argoproj-labs/argo-rollouts-manager/api/v1alpha1" @@ -32,7 +34,7 @@ import ( . "github.com/onsi/gomega" ) -func GetE2ETestKubeClient() (client.Client, *runtime.Scheme) { +func GetE2ETestKubeClient() (client.Client, *k8sruntime.Scheme) { config, err := getSystemKubeConfig() Expect(err).ToNot(HaveOccurred()) @@ -42,7 +44,7 @@ func GetE2ETestKubeClient() (client.Client, *runtime.Scheme) { return k8sClient, scheme } -func GetE2ETestKubeClientWithError() (client.Client, *runtime.Scheme, error) { +func GetE2ETestKubeClientWithError() (client.Client, *k8sruntime.Scheme, error) { config, err := getSystemKubeConfig() if err != nil { return nil, nil, err @@ -57,9 +59,9 @@ func GetE2ETestKubeClientWithError() (client.Client, *runtime.Scheme, error) { } // getKubeClient returns a controller-runtime Client for accessing K8s API resources used by the controller. -func getKubeClient(config *rest.Config) (client.Client, *runtime.Scheme, error) { +func getKubeClient(config *rest.Config) (client.Client, *k8sruntime.Scheme, error) { - scheme := runtime.NewScheme() + scheme := k8sruntime.NewScheme() if err := corev1.AddToScheme(scheme); err != nil { return nil, nil, err @@ -99,6 +101,10 @@ func getKubeClient(config *rest.Config) (client.Client, *runtime.Scheme, error) if err := olmv1alpha1.AddToScheme(scheme); err != nil { return nil, nil, err } + // THIS IS THE CRUCIAL LINE: Add OperatorGroup (olmv1) to the scheme + if err := olmv1.AddToScheme(scheme); err != nil { + return nil, nil, err + } if err := routev1.AddToScheme(scheme); err != nil { return nil, nil, err diff --git a/test/openshift/e2e/ginkgo/sequential/1-120_validate_olm_operator_test.go b/test/openshift/e2e/ginkgo/sequential/1-120_validate_olm_operator_test.go new file mode 100644 index 000000000..3c2914614 --- /dev/null +++ b/test/openshift/e2e/ginkgo/sequential/1-120_validate_olm_operator_test.go @@ -0,0 +1,314 @@ +// test/openshift/e2e/ginkgo/sequential/1-120_validate_olm_operator_test.go +package sequential_test + +import ( + "context" + "fmt" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + olmv1 "github.com/operator-framework/api/pkg/operators/v1" // Corrected import path + olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" // Corrected import path + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture" // Corrected import path + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/olm" // Corrected import path + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/subscription" // Corrected import path + "github.com/redhat-developer/gitops-operator/test/openshift/e2e/ginkgo/fixture/utils" // Corrected import path +) + +var _ = Describe("GitOps Operator Sequential E2E Tests", func() { + Context("1-120_validate_olm_operator", Ordered, func() { + var ctx context.Context + var k8sClient client.Client + var k8sScheme *k8sruntime.Scheme + + const helloworldSubscriptionName = "helloworld-operator-subscription" + const helloworldSubscriptionNamespace = "openshift-operators" + const helloworldOperatorDeploymentName = "helloworld-operator-controller-manager" + const helloworldOperatorPodLabelKey = "control-plane" + const helloworldOperatorPodLabelValue = "controller-manager" + const helloworldOperatorPackageName = "helloworld-operator" + const helloworldOperatorChannel = "stable" + const helloworldOperatorSource = "redhat-operators" + const helloworldOperatorSourceNamespace = "openshift-marketplace" + const helloworldOperatorGroup = "global-operators" + + BeforeAll(func() { + ctx = context.Background() + var err error + k8sClient, k8sScheme, err = utils.GetE2ETestKubeClientWithError() + Expect(err).ToNot(HaveOccurred(), "Failed to get Kubernetes client in BeforeAll") + + fixture.EnsureSequentialCleanSlate() + + operatorGroup := &olmv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: helloworldOperatorGroup, + Namespace: helloworldSubscriptionNamespace, + }, + Spec: olmv1.OperatorGroupSpec{ + TargetNamespaces: []string{}, + }, + } + err = k8sClient.Create(ctx, operatorGroup) + if err != nil && !apierr.IsAlreadyExists(err) { + Expect(err).ToNot(HaveOccurred(), "Failed to create helloworld-operator OperatorGroup") + } + + subToCreate := &olmv1alpha1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: helloworldSubscriptionName, + Namespace: helloworldSubscriptionNamespace, + }, + Spec: &olmv1alpha1.SubscriptionSpec{ + Channel: helloworldOperatorChannel, + Package: helloworldOperatorPackageName, + CatalogSource: helloworldOperatorSource, + CatalogSourceNamespace: helloworldOperatorSourceNamespace, + InstallPlanApproval: olmv1alpha1.ApprovalAutomatic, + }, + } + err = k8sClient.Create(ctx, subToCreate) + if err != nil && !apierr.IsAlreadyExists(err) { + Expect(err).ToNot(HaveOccurred(), "Failed to create helloworld-operator Subscription") + } + + Eventually(func() bool { + subKey := types.NamespacedName{Name: helloworldSubscriptionName, Namespace: helloworldSubscriptionNamespace} + var createdSub olmv1alpha1.Subscription + getErr := k8sClient.Get(ctx, subKey, &createdSub) + return getErr == nil + }).WithTimeout(30*time.Second).WithPolling(5*time.Second).Should(BeTrue(), "helloworld-operator Subscription did not appear after creation.") + + const installTimeout = 18 * time.Minute + const pollingInterval = 5 * time.Second + + csvName := subscription.PollForSubscriptionCurrentCSV( + ctx, + helloworldSubscriptionNamespace, + helloworldSubscriptionName, + installTimeout, + pollingInterval, + ) + + olm.WaitForClusterServiceVersion( + ctx, + helloworldSubscriptionNamespace, + csvName, + installTimeout, + pollingInterval, + ) + }) + + BeforeEach(func() { + + }) + + It("should validate the Subscription state is AtLatestKnown", func() { + subKey := types.NamespacedName{Name: helloworldSubscriptionName, Namespace: helloworldSubscriptionNamespace} + var sub olmv1alpha1.Subscription + Eventually(func() olmv1alpha1.SubscriptionState { + err := k8sClient.Get(ctx, subKey, &sub) + Expect(err).ToNot(HaveOccurred(), "Failed to get Subscription during validation") + return sub.Status.State + }).WithTimeout(30*time.Second).WithPolling(5*time.Second).Should(Equal(olmv1alpha1.SubscriptionState("AtLatestKnown")), + "Expected Subscription to be in 'AtLatestKnown' state") + }) + + It("should validate the helloworld-operator Deployment has 1 ready replica", func() { + deploymentKey := types.NamespacedName{Name: helloworldOperatorDeploymentName, Namespace: helloworldSubscriptionNamespace} + var deployment appsv1.Deployment + Eventually(func() bool { + err := k8sClient.Get(ctx, deploymentKey, &deployment) + if err != nil { + return false + } + return deployment.Status.Replicas == 1 && deployment.Status.ReadyReplicas == 1 + }).WithTimeout(2*time.Minute).WithPolling(5*time.Second).Should(BeTrue(), + "Expected helloworld-operator Deployment to have 1 ready replica") + }) + + It("should validate at least one helloworld-operator Pod is running", func() { + Eventually(func() bool { + var podList corev1.PodList + labelSelector := client.MatchingLabels{helloworldOperatorPodLabelKey: helloworldOperatorPodLabelValue} + err := k8sClient.List(ctx, &podList, client.InNamespace(helloworldSubscriptionNamespace), labelSelector) + if err != nil { + return false + } + if len(podList.Items) == 0 { + return false + } + for _, p := range podList.Items { + if p.Status.Phase == corev1.PodRunning { + return true + } + } + return false + }).WithTimeout(2*time.Minute).WithPolling(5*time.Second).Should(BeTrue(), + "Expected at least one helloworld-operator pod to be in 'Running' phase") + }) + + AfterAll(func() { + fixture.EnsureSequentialCleanSlate() + + // 1. DELETE Roles, RoleBindings + deleteRBACResources(ctx, k8sClient, k8sScheme, "helloworld", helloworldSubscriptionNamespace) + deleteRBACResources(ctx, k8sClient, k8sScheme, "helloworld", helloworldSubscriptionNamespace) // Deliberate double call + + // 2. DELETE Subscription + subToDelete := &olmv1alpha1.Subscription{ + ObjectMeta: metav1.ObjectMeta{ + Name: helloworldSubscriptionName, + Namespace: helloworldSubscriptionNamespace, + }, + } + deleteK8sResource(ctx, k8sClient, subToDelete) + + // 3. DELETE CSVs + var csvList olmv1alpha1.ClusterServiceVersionList + err := k8sClient.List(ctx, &csvList, client.InNamespace(helloworldSubscriptionNamespace), client.MatchingLabels{"operators.coreos.com/helloworld-operator.openshift-operators": ""}) + if err == nil { + for _, csv := range csvList.Items { + if strings.HasPrefix(csv.Name, helloworldOperatorPackageName+".") { + deleteK8sResource(ctx, k8sClient, &csv) + } + } + } else if !apierr.IsNotFound(err) { + GinkgoWriter.Printf("Error listing CSVs for cleanup: %v\n", err) + } + + // 4. DELETE CRDs + deleteCRDsWithPrefix(ctx, k8sClient, "helloworld") + + // 5. DELETE Namespaces + deleteNamespacesWithPrefix(ctx, k8sClient, "helloworld") + + // 6. DELETE OperatorGroup (last, as it's a container for the operator) + ogToDelete := &olmv1.OperatorGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: helloworldOperatorGroup, + Namespace: helloworldSubscriptionNamespace, + }, + } + deleteK8sResource(ctx, k8sClient, ogToDelete) + }) + }) +}) + +// deleteK8sResource attempts to delete a Kubernetes resource. +// It includes a force-delete attempt if the initial deletion fails, and a short sleep. +// It does NOT wait for the resource to be gone. +func deleteK8sResource(ctx context.Context, k8sClient client.Client, obj client.Object) { + // Attempt normal delete first + err := k8sClient.Delete(ctx, obj, client.PropagationPolicy(metav1.DeletePropagationBackground)) + if err != nil && !apierr.IsNotFound(err) { + forceDeleteOptions := &client.DeleteOptions{ + PropagationPolicy: ptr.To(metav1.DeletePropagationBackground), + GracePeriodSeconds: ptr.To[int64](0), + } + err = k8sClient.Delete(ctx, obj, forceDeleteOptions) + if err != nil && !apierr.IsNotFound(err) { + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("Failed to force delete %s %s/%s", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetNamespace(), obj.GetName())) + } + } else if apierr.IsNotFound(err) { + return + } + + time.Sleep(5 * time.Second) +} + +// deleteRBACResources deletes RBAC resources with a given prefix. +func deleteRBACResources(ctx context.Context, k8sClient client.Client, scheme *k8sruntime.Scheme, prefix string, namespace string) { + // ClusterRoles + var crList rbacv1.ClusterRoleList + if err := k8sClient.List(ctx, &crList); err == nil { + for _, cr := range crList.Items { + if strings.Contains(cr.Name, prefix) { + deleteK8sResource(ctx, k8sClient, &cr) + } + } + } + + // ClusterRoleBindings + var crbList rbacv1.ClusterRoleBindingList + if err := k8sClient.List(ctx, &crbList); err == nil { + for _, crb := range crbList.Items { + if strings.Contains(crb.Name, prefix) { + deleteK8sResource(ctx, k8sClient, &crb) + } + } + } + + // Roles (in specific namespace) + var roleList rbacv1.RoleList + if err := k8sClient.List(ctx, &roleList, client.InNamespace(namespace)); err == nil { + for _, r := range roleList.Items { + if strings.Contains(r.Name, prefix) { + deleteK8sResource(ctx, k8sClient, &r) + } + } + } + + // RoleBindings (in specific namespace) + var rbList rbacv1.RoleBindingList + if err := k8sClient.List(ctx, &rbList, client.InNamespace(namespace)); err == nil { + for _, rb := range rbList.Items { + if strings.Contains(rb.Name, prefix) { + deleteK8sResource(ctx, k8sClient, &rb) + } + } + } +} + +// deleteCRDsWithPrefix deletes CRDs with a given prefix, handling finalizers. +func deleteCRDsWithPrefix(ctx context.Context, k8sClient client.Client, prefix string) { + var crdList apiextensionsv1.CustomResourceDefinitionList + if err := k8sClient.List(ctx, &crdList); err == nil { + for _, crd := range crdList.Items { + if strings.Contains(crd.Name, prefix) { + if len(crd.ObjectMeta.Finalizers) > 0 { + patch := client.MergeFrom(crd.DeepCopy()) + crd.ObjectMeta.Finalizers = []string{} + if patchErr := k8sClient.Patch(ctx, &crd, patch); patchErr != nil { + GinkgoWriter.Printf("Error patching CRD '%s' to remove finalizers: %v\n", crd.Name, patchErr) + } + } + deleteK8sResource(ctx, k8sClient, &crd) + } + } + } +} + +// deleteNamespacesWithPrefix deletes Namespaces with a given prefix, handling finalizers. +func deleteNamespacesWithPrefix(ctx context.Context, k8sClient client.Client, prefix string) { + var nsList corev1.NamespaceList + if err := k8sClient.List(ctx, &nsList); err == nil { + for _, ns := range nsList.Items { + if strings.Contains(ns.Name, prefix) { + if len(ns.ObjectMeta.Finalizers) > 0 { + patch := client.MergeFrom(ns.DeepCopy()) + ns.ObjectMeta.Finalizers = []string{} + if patchErr := k8sClient.Patch(ctx, &ns, patch); patchErr != nil { + GinkgoWriter.Printf("Error patching Namespace '%s' to remove finalizers: %v\n", ns.Name, patchErr) + } + } + deleteK8sResource(ctx, k8sClient, &ns) + } + } + } +} diff --git a/test/openshift/e2e/sequential/1-120_validate_olm_operator/01-install.yaml b/test/openshift/e2e/sequential/1-120_validate_olm_operator/01-install.yaml new file mode 100644 index 000000000..88b799a2b --- /dev/null +++ b/test/openshift/e2e/sequential/1-120_validate_olm_operator/01-install.yaml @@ -0,0 +1,19 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: helloworld-operator-subscription + namespace: openshift-operators +spec: + channel: stable + name: helloworld-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + installPlanApproval: Automatic +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: global-operators + namespace: openshift-operators +spec: + targetNamespaces: [] # all namespaces diff --git a/test/openshift/e2e/sequential/1-120_validate_olm_operator/02-assert.yaml b/test/openshift/e2e/sequential/1-120_validate_olm_operator/02-assert.yaml new file mode 100644 index 000000000..48c7f7cc4 --- /dev/null +++ b/test/openshift/e2e/sequential/1-120_validate_olm_operator/02-assert.yaml @@ -0,0 +1,34 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: helloworld-operator-subscription + namespace: openshift-operators +status: + state: AtLatestKnown +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + namespace: openshift-operators + labels: + operators.coreos.com/helloworld-operator.openshift-operators: "" +status: + phase: Succeeded +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld-operator-controller-manager + namespace: openshift-operators +status: + readyReplicas: 1 + replicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + namespace: openshift-operators + labels: + control-plane: controller-manager +status: + phase: Running \ No newline at end of file diff --git a/test/openshift/e2e/sequential/1-120_validate_olm_operator/03-delete.yaml b/test/openshift/e2e/sequential/1-120_validate_olm_operator/03-delete.yaml new file mode 100644 index 000000000..047f7b7bd --- /dev/null +++ b/test/openshift/e2e/sequential/1-120_validate_olm_operator/03-delete.yaml @@ -0,0 +1,121 @@ +# 03-delete.yaml +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + #!/usr/bin/env bash + + + operator_namespace=$(oc get deployment helloworld-operator-controller-manager -n openshift-gitops-operator -o jsonpath='{.metadata.namespace}' --ignore-not-found) + operator_namespace=${operator_namespace:-"openshift-operators"} + echo "operator_namespace: $operator_namespace" + + + #DELETE Roles, RoleBindings and Subscriptions + delete_clusterroles() { + cluster_roles=$(oc get clusterroles --no-headers=true | awk '/(helloworld)/{print $1}') + cluster_roles=($(echo "$cluster_roles")) #make array + echo "Deleting ${#cluster_roles[@]} clusterrole(s)" + for cr in "${cluster_roles[@]}" + do + oc delete clusterroles $cr --wait=true || true + done + } + + echo " ******************************************************** " + delete_clusterroles + delete_clusterroles + + roles=$(oc get roles --no-headers=true | awk '/(helloworld)/{print $1}') + roles=($(echo "$roles")) #make array + echo "Deleting ${#roles[@]} role(s)" + for r in "${roles[@]}" + do + oc delete roles $r --wait=true || true + done + + cluster_role_bindings=$(oc get clusterrolebindings --no-headers=true | awk '/(helloworld)/{print $1}') + cluster_role_bindings=($(echo "$cluster_role_bindings")) #make array + echo "Deleting ${#cluster_role_bindings[@]} clusterbinding(s)" + for crb in "${cluster_role_bindings[@]}" + do + oc delete clusterrolebindings $crb --wait=true || true + done + + role_bindings=$(oc get rolebindings --no-headers=true | awk '/(helloworld)/{print $1}') + role_bindings=($(echo "$role_bindings")) #make array + echo "Deleting ${#role_bindings[@]} rolebinding(s)" + for rb in "${role_bindings[@]}" + do + oc delete rolebindings $rb --wait=true || true + done + + echo "Delete subscription" + oc delete subscription helloworld-operator -n $operator_namespace --ignore-not-found + + #DELETE CSVs (Logic for multiple CVS) + echo " ******************************************************** " + echo "Delete CSVs". + CSVS=$(oc get csv -n $operator_namespace -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' --ignore-not-found | grep helloworld) + CSVS=($(echo "$CSVS")) #make array + echo "Delete ${#CSVS[@]} CSV(s): ${CSVS[@]}" + + for csv in "${CSVS[@]}" + do + echo "csv_name $c" + oc delete csv -n $operator_namespace $csv --ignore-not-found || true + done + + #DELETE CRDs + echo " ******************************************************** " + CRDS=$(oc get crd --no-headers=true | awk '/helloworld/ {print $1}') + CRDS=($(echo "$CRDS")) #make array + echo "Delete ${#CRDS[@]} CRDs" + + for c in "${CRDS[@]}" + do + echo "Delete CRD $c" + timeout 5 oc delete crd $c || true + + FINALIZER=$(oc get crd $c -o jsonpath='{.metadata.finalizers}' 2>&1 || true ) + if [[ ! -z "$FINALIZER" && ! "$FINALIZER" =~ "not found" ]]; + then + echo "Patching to remove finalizer .." + oc patch crd $c --type json -p '[{"op":"remove", "path":"/metadata/finalizers"}]' || true + echo -e "Patched \n" + else + echo -e "No Finalizer found (No patching required) \n" + fi + done + + + #DELETE Namespaces + echo " ******************************************************** " + echo "Delete Namespaces" + namespaces=$(oc get ns --no-headers=true | awk '/helloworld/ {print $1}') + namespaces=($(echo "$namespaces")) #make array + + for n in "${namespaces[@]}" + do + echo "Delete namespace $n" + timeout 15 oc delete ns $n || true + ns_finalizer=$(oc get ns $n -o jsonpath='{.spec.finalizers}' 2>&1 || true ) + + if [[ ! -z "$ns_finalizer" && ! "$ns_finalizer" =~ "not found" ]]; + then + echo "Patching namespace: $n" + kubectl get namespace $n -o json > temp-ns.json + #edit json to remove finalizer + jq '.spec.finalizers = []' temp-ns.json > temp-ns.tmp && mv temp-ns.tmp temp-ns.json + + # Patch Finalizer + echo "Patching .." + kubectl replace --raw "/api/v1/namespaces/$n/finalize" -f temp-ns.json + + # Cleanup temporary files + rm temp-ns.json || true + echo "Finalizers cleared for namespace '$n' " + fi + done + + echo -e "\nCleanup Complete" \ No newline at end of file From ae826db8bc5d10bae2dd00266ef58f16f46c473a Mon Sep 17 00:00:00 2001 From: Triona Doyle Date: Tue, 8 Jul 2025 17:54:48 +0100 Subject: [PATCH 2/2] fix DCO remediation Signed-off-by: Triona Doyle