diff --git a/test/e2e/actuals_test.go b/test/e2e/actuals_test.go index d8e54cdd6..e31284178 100644 --- a/test/e2e/actuals_test.go +++ b/test/e2e/actuals_test.go @@ -901,6 +901,15 @@ func crpEvictionRemovedActual(crpEvictionName string) func() error { } } +func crpDisruptionBudgetRemovedActual(crpDisruptionBudgetName string) func() error { + return func() error { + if err := hubClient.Get(ctx, types.NamespacedName{Name: crpDisruptionBudgetName}, &placementv1alpha1.ClusterResourcePlacementDisruptionBudget{}); !errors.IsNotFound(err) { + return fmt.Errorf("CRP disruption budget still exists or an unexpected error occurred: %w", err) + } + return nil + } +} + func validateCRPSnapshotRevisions(crpName string, wantPolicySnapshotRevision, wantResourceSnapshotRevision int) error { matchingLabels := client.MatchingLabels{placementv1beta1.CRPTrackingLabel: crpName} diff --git a/test/e2e/placement_eviction_test.go b/test/e2e/placement_eviction_test.go index 916a5e939..a8e390a68 100644 --- a/test/e2e/placement_eviction_test.go +++ b/test/e2e/placement_eviction_test.go @@ -11,6 +11,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" @@ -165,5 +167,373 @@ var _ = Describe("ClusterResourcePlacement eviction of bound binding, no taint s Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") }) + It("should still place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) +}) + +var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickAll CRP, PDB specified, eviction denied", Ordered, Serial, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpEvictionName := fmt.Sprintf(crpEvictionNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + ensureCRPEvictionDeletion(crpEvictionName) + ensureCRPDisruptionBudgetDeletion(crpName) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("create cluster resource placement disruption budget to block eviction", func() { + crpdb := placementv1alpha1.ClusterResourcePlacementDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1alpha1.PlacementDisruptionBudgetSpec{ + MinAvailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 3, + }, + }, + } + Expect(hubClient.Create(ctx, &crpdb)).To(Succeed(), "Failed to create CRP Disruption Budget %s", crpName) + }) + + It("create cluster resource placement eviction targeting member cluster 1", func() { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + Spec: placementv1alpha1.PlacementEvictionSpec{ + PlacementName: crpName, + ClusterName: memberCluster1EastProdName, + }, + } + Expect(hubClient.Create(ctx, crpe)).To(Succeed(), "Failed to create CRP eviction %s", crpe.Name) + }) + + It("should update cluster resource placement eviction status as expected", func() { + crpEvictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, hubClient, crpEvictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: false, Msg: fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, 3, 3)}) + Eventually(crpEvictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement eviction status as expected") + }) + + It("should ensure cluster resource placement status is unchanged", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should still place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) +}) + +var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickAll CRP, PDB specified, eviction allowed", Ordered, Serial, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpEvictionName := fmt.Sprintf(crpEvictionNameTemplate, GinkgoParallelProcess()) + taintClusterNames := []string{memberCluster1EastProdName} + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: workResourceSelector(), + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + removeTaintsFromMemberClusters(taintClusterNames) + ensureCRPEvictionDeletion(crpEvictionName) + ensureCRPDisruptionBudgetDeletion(crpName) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("create cluster resource placement disruption budget to block eviction", func() { + crpdb := placementv1alpha1.ClusterResourcePlacementDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1alpha1.PlacementDisruptionBudgetSpec{ + MinAvailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 2, + }, + }, + } + Expect(hubClient.Create(ctx, &crpdb)).To(Succeed(), "Failed to create CRP Disruption Budget %s", crpName) + }) + + It("add taint to member cluster 1", func() { + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + }) + + It("create cluster resource placement eviction targeting member cluster 1", func() { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + Spec: placementv1alpha1.PlacementEvictionSpec{ + PlacementName: crpName, + ClusterName: memberCluster1EastProdName, + }, + } + Expect(hubClient.Create(ctx, crpe)).To(Succeed(), "Failed to create CRP eviction %s", crpe.Name) + }) + + It("should update cluster resource placement eviction status as expected", func() { + crpEvictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, hubClient, crpEvictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, 3, 3)}) + Eventually(crpEvictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement eviction status as expected") + }) + + It("should ensure no resources exist on evicted member cluster with taint", func() { + unSelectedClusters := []*framework.Cluster{memberCluster1EastProd} + for _, cluster := range unSelectedClusters { + resourceRemovedActual := workNamespaceRemovedFromClusterActual(cluster) + Eventually(resourceRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to check if resources doesn't exist on member cluster") + } + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the selected clusters with no taint", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the selected clusters") + } + }) +}) + +var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP, PDB with MaxUnavailable specified as Integer, eviction blocked", Ordered, Serial, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpEvictionName := fmt.Sprintf(crpEvictionNameTemplate, GinkgoParallelProcess()) + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(3)), + }, + ResourceSelectors: workResourceSelector(), + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + ensureCRPEvictionDeletion(crpEvictionName) + ensureCRPDisruptionBudgetDeletion(crpName) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("create cluster resource placement disruption budget to block eviction", func() { + crpdb := placementv1alpha1.ClusterResourcePlacementDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1alpha1.PlacementDisruptionBudgetSpec{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: 0, + }, + }, + } + Expect(hubClient.Create(ctx, &crpdb)).To(Succeed(), "Failed to create CRP Disruption Budget %s", crpName) + }) + + It("create cluster resource placement eviction targeting member cluster 1", func() { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + Spec: placementv1alpha1.PlacementEvictionSpec{ + PlacementName: crpName, + ClusterName: memberCluster1EastProdName, + }, + } + Expect(hubClient.Create(ctx, crpe)).To(Succeed(), "Failed to create CRP eviction %s", crpe.Name) + }) + + It("should update cluster resource placement eviction status as expected", func() { + crpEvictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, hubClient, crpEvictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: false, Msg: fmt.Sprintf(condition.EvictionBlockedPDBSpecifiedMessageFmt, 3, 3)}) + Eventually(crpEvictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement eviction status as expected") + }) + + It("should ensure cluster resource placement status is unchanged", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should still place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) +}) + +var _ = Describe("ClusterResourcePlacement eviction of bound binding - PickN CRP, PDB with MaxUnavailable specified as percentage, eviction allowed", Ordered, Serial, func() { + crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) + crpEvictionName := fmt.Sprintf(crpEvictionNameTemplate, GinkgoParallelProcess()) + taintClusterNames := []string{memberCluster1EastProdName} + + BeforeAll(func() { + By("creating work resources") + createWorkResources() + + // Create the CRP. + crp := &placementv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + // Add a custom finalizer; this would allow us to better observe + // the behavior of the controllers. + Finalizers: []string{customDeletionBlockerFinalizer}, + }, + Spec: placementv1beta1.ClusterResourcePlacementSpec{ + Policy: &placementv1beta1.PlacementPolicy{ + PlacementType: placementv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(3)), + }, + ResourceSelectors: workResourceSelector(), + }, + } + Expect(hubClient.Create(ctx, crp)).To(Succeed(), "Failed to create CRP %s", crpName) + }) + + AfterAll(func() { + removeTaintsFromMemberClusters(taintClusterNames) + ensureCRPEvictionDeletion(crpEvictionName) + ensureCRPDisruptionBudgetDeletion(crpName) + ensureCRPAndRelatedResourcesDeletion(crpName, allMemberClusters) + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), allMemberClusterNames, nil, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the all available member clusters", checkIfPlacedWorkResourcesOnAllMemberClusters) + + It("add taint to member cluster 1", func() { + addTaintsToMemberClusters(taintClusterNames, buildTaints(taintClusterNames)) + }) + + It("create cluster resource placement disruption budget to block eviction", func() { + crpdb := placementv1alpha1.ClusterResourcePlacementDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpName, + }, + Spec: placementv1alpha1.PlacementDisruptionBudgetSpec{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.String, + StrVal: "10%", + }, + }, + } + Expect(hubClient.Create(ctx, &crpdb)).To(Succeed(), "Failed to create CRP Disruption Budget %s", crpName) + }) + + It("create cluster resource placement eviction targeting member cluster 1", func() { + crpe := &placementv1alpha1.ClusterResourcePlacementEviction{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpEvictionName, + }, + Spec: placementv1alpha1.PlacementEvictionSpec{ + PlacementName: crpName, + ClusterName: memberCluster1EastProdName, + }, + } + Expect(hubClient.Create(ctx, crpe)).To(Succeed(), "Failed to create CRP eviction %s", crpe.Name) + }) + + It("should update cluster resource placement eviction status as expected", func() { + crpEvictionStatusUpdatedActual := testutilseviction.StatusUpdatedActual( + ctx, hubClient, crpEvictionName, + &testutilseviction.IsValidEviction{IsValid: true, Msg: condition.EvictionValidMessage}, + &testutilseviction.IsExecutedEviction{IsExecuted: true, Msg: fmt.Sprintf(condition.EvictionAllowedPDBSpecifiedMessageFmt, 3, 3)}) + Eventually(crpEvictionStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement eviction status as expected") + }) + + It("should ensure no resources exist on evicted member cluster with taint", func() { + unSelectedClusters := []*framework.Cluster{memberCluster1EastProd} + for _, cluster := range unSelectedClusters { + resourceRemovedActual := workNamespaceRemovedFromClusterActual(cluster) + Eventually(resourceRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to check if resources doesn't exist on member cluster") + } + }) + + It("should update cluster resource placement status as expected", func() { + crpStatusUpdatedActual := crpStatusUpdatedActual(workResourceIdentifiers(), []string{memberCluster2EastCanaryName, memberCluster3WestProdName}, []string{memberCluster1EastProdName}, "0") + Eventually(crpStatusUpdatedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to update cluster resource placement status as expected") + }) + + It("should place resources on the selected clusters with no taint", func() { + targetClusters := []*framework.Cluster{memberCluster2EastCanary, memberCluster3WestProd} + for _, cluster := range targetClusters { + resourcePlacedActual := workNamespaceAndConfigMapPlacedOnClusterActual(cluster) + Eventually(resourcePlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place resources on the selected clusters") + } + }) }) diff --git a/test/e2e/setup_test.go b/test/e2e/setup_test.go index a7b3e178d..7ffbc3d55 100644 --- a/test/e2e/setup_test.go +++ b/test/e2e/setup_test.go @@ -332,9 +332,9 @@ func beforeSuiteForProcess1() { var _ = SynchronizedBeforeSuite(beforeSuiteForProcess1, beforeSuiteForAllProcesses) var _ = SynchronizedAfterSuite(func() {}, func() { - deleteResourcesForFleetGuardRail() - deleteTestResourceCRD() - setAllMemberClustersToLeave() - checkIfAllMemberClustersHaveLeft() - cleanupInvalidClusters() + //deleteResourcesForFleetGuardRail() + //deleteTestResourceCRD() + //setAllMemberClustersToLeave() + //checkIfAllMemberClustersHaveLeft() + //cleanupInvalidClusters() }) diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go index 98ac0c761..73cb1695c 100644 --- a/test/e2e/utils_test.go +++ b/test/e2e/utils_test.go @@ -928,6 +928,17 @@ func ensureCRPEvictionDeletion(crpEvictionName string) { Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "CRP eviction still exists") } +func ensureCRPDisruptionBudgetDeletion(crpDisruptionBudgetName string) { + crpdb := &placementv1alpha1.ClusterResourcePlacementDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: crpDisruptionBudgetName, + }, + } + Expect(hubClient.Delete(ctx, crpdb)).Should(SatisfyAny(Succeed(), utils.NotFoundMatcher{}), "Failed to delete CRP disruption budget") + removedActual := crpDisruptionBudgetRemovedActual(crpDisruptionBudgetName) + Eventually(removedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "CRP disruption budget still exists") +} + // verifyWorkPropagationAndMarkAsAvailable verifies that works derived from a specific CPR have been created // for a specific cluster, and marks these works in the specific member cluster's // reserved namespace as applied and available.