From e38817f91ad0ddc24a7286dbf4f00990a992362f Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 16 Jan 2025 22:20:23 +0800 Subject: [PATCH 1/9] Added DiffReported condition handling in the CRP controller --- apis/placement/v1beta1/binding_types.go | 10 + .../v1beta1/clusterresourceplacement_types.go | 22 + .../placement_status.go | 119 +- .../placement_status_test.go | 2735 +++++++++++++---- pkg/utils/condition/condition.go | 46 + 5 files changed, 2278 insertions(+), 654 deletions(-) diff --git a/apis/placement/v1beta1/binding_types.go b/apis/placement/v1beta1/binding_types.go index 26a52c2cc..58f51e1b7 100644 --- a/apis/placement/v1beta1/binding_types.go +++ b/apis/placement/v1beta1/binding_types.go @@ -174,6 +174,16 @@ const ( // - "False" means not all the resources are available in the target cluster yet. // - "Unknown" means we haven't finished the apply yet so that we cannot check the resource availability. ResourceBindingAvailable ResourceBindingConditionType = "Available" + + // ResourceBindingDiffReported indicates whether Fleet has successfully reported configuration + // differences between the hub cluster and a member cluster for the given resources. + // + // It can have the following condition statuses: + // * True: Fleet has successfully reported configuration differences for all resources. + // * False: Fleet has not yet reported configuration differences for some resources, or an + // error has occurred. + // * Unknown: The diff reporting has just started and its status is yet to be known. + ResourceBindingDiffReported ResourceBindingConditionType = "DiffReported" ) // ClusterResourceBindingList is a collection of ClusterResourceBinding. diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index 3dd769041..e6cdb7054 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -1142,6 +1142,17 @@ const ( // array. // - "Unknown" means we haven't finished the apply yet so that we cannot check the resource availability. ClusterResourcePlacementAvailableConditionType ClusterResourcePlacementConditionType = "ClusterResourcePlacementAvailable" + + // ClusterResourcePlacementDiffReportedConditionType indicates whether Fleet has reported + // configuration differences between the desired states of resources as kept in the hub cluster + // and the current states on the all member clusters. + // + // It can have the following condition statuses: + // * True: Fleet has reported complete sets of configuration differences on all member clusters. + // * False: Fleet has not yet reported complete sets of configuration differences on some member + // clusters, or an error has occurred. + // * Unknown: The diff reporting has just started and its status is not yet to be known. + ClusterResourcePlacementDiffReportedConditionType ClusterResourcePlacementConditionType = "ClusterResourcePlacementDiffReported" ) // ResourcePlacementConditionType defines a specific condition of a resource placement. @@ -1197,6 +1208,17 @@ const ( // - "False" means some of them are not available yet. // - "Unknown" means we haven't finished the apply yet so that we cannot check the resource availability. ResourcesAvailableConditionType ResourcePlacementConditionType = "Available" + + // ResourceDiffReportedConditionType indicates whether Fleet has reported configuration + // differences between the desired states of resources as kept in the hub cluster and the + // current states on the selected member cluster. + // + // It can have the following condition statuses: + // * True: Fleet has reported the complete set of configuration differences on the member cluster. + // * False: Fleet has not yet reported the complete set of configuration differences on the + // member cluster, or an error has occurred. + // * Unknown: The diff reporting has just started and its status is yet to be known. + ResourcesDiffReportedConditionType ResourcePlacementConditionType = "DiffReported" ) // PlacementType identifies the type of placement. diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index 90b84636e..da1b1cb5d 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -43,8 +43,27 @@ const ( ResourceScheduleFailedReason = "ScheduleFailed" ) +var ( + skippedCondTypesForCSASSAApplyStrategyTypes = map[condition.ResourceCondition]interface{}{ + condition.DiffReportedCondition: nil, + } + skippedCondTypesForReportDiffApplyStrategyType = map[condition.ResourceCondition]interface{}{ + condition.AppliedCondition: nil, + condition.AvailableCondition: nil, + } +) + +// Note (chenyu1): with the newly added DiffReported condition type, the status handling part +// probably needs some refactoring as it is triggering the code complexity linter (original +// cyclomatic complexity score 26, new score 31, threshold 30); the original +// assumption, where condition types can always be handled in sequential order, i.e., one failing +// condition type will exempt Fleet from processing all subsequent condition types, only partially +// stands now. + // setResourceConditions sets the resource related conditions by looking at the bindings and work, excluding the scheduled condition. // It returns whether there is a cluster scheduled or not. +// +// nolint: gocyclo func (r *Reconciler) setResourceConditions(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement, latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot, latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot) (bool, error) { placementStatuses := make([]fleetv1beta1.ResourcePlacementStatus, 0, len(latestSchedulingPolicySnapshot.Status.ClusterDecisions)) @@ -93,20 +112,22 @@ func (r *Reconciler) setResourceConditions(ctx context.Context, crp *fleetv1beta if err != nil { return false, err } - for i := range res { - switch res[i] { + for condType, condStatus := range res { + switch condStatus { case metav1.ConditionTrue: - clusterConditionStatusRes[i][condition.TrueConditionStatus]++ + clusterConditionStatusRes[condType][condition.TrueConditionStatus]++ case metav1.ConditionFalse: - clusterConditionStatusRes[i][condition.FalseConditionStatus]++ + clusterConditionStatusRes[condType][condition.FalseConditionStatus]++ case metav1.ConditionUnknown: - clusterConditionStatusRes[i][condition.UnknownConditionStatus]++ + clusterConditionStatusRes[condType][condition.UnknownConditionStatus]++ } } // The resources can be changed without updating the crp spec. - // To reflect the latest resource conditions, we reset the renaming conditions. - for i := condition.ResourceCondition(len(res)); i < condition.TotalCondition; i++ { - meta.RemoveStatusCondition(&rps.Conditions, string(i.ResourcePlacementConditionType())) + // To reflect the latest resource conditions, we reset conditions that are no longer relevant. + for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { + if _, ok := res[i]; !ok { + meta.RemoveStatusCondition(&rps.Conditions, string(i.ResourcePlacementConditionType())) + } } placementStatuses = append(placementStatuses, rps) klog.V(2).InfoS("Populated the resource placement status for the scheduled cluster", "clusterResourcePlacement", klog.KObj(crp), "cluster", c.ClusterName, "resourcePlacementStatus", rps) @@ -140,8 +161,26 @@ func (r *Reconciler) setResourceConditions(ctx context.Context, crp *fleetv1beta return false, nil } - i := condition.RolloutStartedCondition - for ; i < condition.TotalCondition; i++ { + // If the CRP has an apply strategy of the ClientSideApply or ServerSideApply type, skip + // any updates to the DiffReported condition type; similarly, should the apply strategy be of + // the ReportDiff type, Fleet will skip any updates to the Applied and Available condition + // type. + skippedCondTypes := skippedCondTypesForCSASSAApplyStrategyTypes + if crp.Spec.Strategy.ApplyStrategy != nil && crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff { + skippedCondTypes = skippedCondTypesForReportDiffApplyStrategyType + } + + // Track all the condition types that have been set. + setCondTypes := make(map[condition.ResourceCondition]interface{}) + for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { + if _, ok := skippedCondTypes[i]; ok { + // If the Applied and Available condition types are skipped (as the CRP uses + // the ReportDiff apply strategy), proceed to evaluate the DiffReported condition + // type. + continue + } + + setCondTypes[i] = nil if clusterConditionStatusRes[i][condition.UnknownConditionStatus] > 0 { crp.SetConditions(i.UnknownClusterResourcePlacementCondition(crp.Generation, clusterConditionStatusRes[i][condition.UnknownConditionStatus])) break @@ -167,10 +206,12 @@ func (r *Reconciler) setResourceConditions(ctx context.Context, crp *fleetv1beta } } // reset the remaining conditions, starting from the next one - for i = i + 1; i < condition.TotalCondition; i++ { + for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { // The resources can be changed without updating the crp spec. // To reflect the latest resource conditions, we reset the renaming conditions. - meta.RemoveStatusCondition(&crp.Status.Conditions, string(i.ClusterResourcePlacementConditionType())) + if _, ok := setCondTypes[i]; !ok { + meta.RemoveStatusCondition(&crp.Status.Conditions, string(i.ClusterResourcePlacementConditionType())) + } } klog.V(2).InfoS("Populated the placement conditions", "clusterResourcePlacement", klog.KObj(crp)) @@ -219,22 +260,33 @@ func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, crp *flee // The resource condition order (index) is defined as const: // const ( // -// RolloutStartedCondition resourceCondition = iota -// OverriddenCondition -// WorkSynchronizedCondition -// AppliedCondition -// AvailableCondition -// TotalCondition +// RolloutStartedCondition resourceCondition = iota +// OverriddenCondition +// WorkSynchronizedCondition +// AppliedCondition +// AvailableCondition +// DiffReportedCondition +// TotalCondition // // ) func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.ClusterResourcePlacement, latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, - binding *fleetv1beta1.ClusterResourceBinding, status *fleetv1beta1.ResourcePlacementStatus) ([]metav1.ConditionStatus, error) { + binding *fleetv1beta1.ClusterResourceBinding, status *fleetv1beta1.ResourcePlacementStatus) (map[condition.ResourceCondition]metav1.ConditionStatus, error) { + res := make(map[condition.ResourceCondition]metav1.ConditionStatus) if binding == nil { meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation)) - return []metav1.ConditionStatus{metav1.ConditionUnknown}, nil + res[condition.RolloutStartedCondition] = metav1.ConditionUnknown + return res, nil + } + + // If the CRP has an apply strategy of the ClientSideApply or ServerSideApply type, skip + // any updates to the DiffReported condition type; similarly, should the apply strategy be of + // the ReportDiff type, Fleet will skip any updates to the Applied and Available condition + // type. + skippedCondTypes := skippedCondTypesForCSASSAApplyStrategyTypes + if crp.Spec.Strategy.ApplyStrategy != nil && crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff { + skippedCondTypes = skippedCondTypesForReportDiffApplyStrategyType } - res := make([]metav1.ConditionStatus, 0, condition.TotalCondition) // There are few cases: // * if the resourceSnapshotName is not equal, // 1. the status is false, it means the rollout is stuck. @@ -243,12 +295,19 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.Clus // just return the corresponding status. if binding.Spec.ResourceSnapshotName == latestResourceSnapshot.Name { for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { + if _, ok := skippedCondTypes[i]; ok { + // If the Applied and Available condition types are skipped (as the CRP uses + // the ReportDiff apply strategy), proceed to evaluate the DiffReported condition + // type. + continue + } + bindingCond := binding.GetCondition(string(i.ResourceBindingConditionType())) if !condition.IsConditionStatusTrue(bindingCond, binding.Generation) && !condition.IsConditionStatusFalse(bindingCond, binding.Generation) { meta.SetStatusCondition(&status.Conditions, i.UnknownResourceConditionPerCluster(crp.Generation)) klog.V(5).InfoS("Find an unknown condition", "bindingCond", bindingCond, "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp)) - res = append(res, metav1.ConditionUnknown) + res[i] = metav1.ConditionUnknown break } @@ -262,9 +321,16 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.Clus if bindingCond.Status == metav1.ConditionFalse { status.FailedPlacements = binding.Status.FailedPlacements status.DiffedPlacements = binding.Status.DiffedPlacements - status.DriftedPlacements = binding.Status.DriftedPlacements + } + // Note that configuration drifts can occur whether the manifests are applied + // successfully or not. + status.DriftedPlacements = binding.Status.DriftedPlacements + case condition.DiffReportedCondition: + if bindingCond.Status == metav1.ConditionTrue { + status.DiffedPlacements = binding.Status.DiffedPlacements } } + cond := metav1.Condition{ Type: string(i.ResourcePlacementConditionType()), Status: bindingCond.Status, @@ -273,7 +339,7 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.Clus Message: bindingCond.Message, } meta.SetStatusCondition(&status.Conditions, cond) - res = append(res, bindingCond.Status) + res[i] = bindingCond.Status if bindingCond.Status == metav1.ConditionFalse { break // if the current condition is false, no need to populate the rest conditions @@ -292,12 +358,13 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.Clus Message: "The rollout is being blocked by the rollout strategy", } meta.SetStatusCondition(&status.Conditions, cond) - res = append(res, metav1.ConditionFalse) + res[condition.RolloutStartedCondition] = metav1.ConditionFalse return res, nil } // At this point, either the generation is not the one in the binding spec or the status is true/unknown. // It means the rollout controller has not handled the binding yet. meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation)) klog.V(5).InfoS("The staled binding rollout status is unknown", "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp)) - return []metav1.ConditionStatus{metav1.ConditionUnknown}, nil + res[condition.RolloutStartedCondition] = metav1.ConditionUnknown + return res, nil } diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index b625a5b3b..fd633db1b 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -60,6 +60,27 @@ func TestSetPlacementStatus(t *testing.T) { currentTime := time.Now() oldTransitionTime := metav1.NewTime(currentTime.Add(-1 * time.Hour)) + crp := &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: testName, + }, + Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Service", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"region": "east"}, + }, + }, + }, + }, + } + crpWithReportDiffApplyStrategy := crp.DeepCopy() + crpWithReportDiffApplyStrategy.Spec.Strategy.ApplyStrategy = &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + } crpGeneration := int64(25) selectedResources := []fleetv1beta1.ResourceIdentifier{ { @@ -86,6 +107,7 @@ func TestSetPlacementStatus(t *testing.T) { } tests := []struct { name string + crp *fleetv1beta1.ClusterResourcePlacement crpStatus fleetv1beta1.ClusterResourcePlacementStatus policy *fleetv1beta1.PlacementPolicy latestPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot @@ -97,6 +119,7 @@ func TestSetPlacementStatus(t *testing.T) { }{ { name: "empty policy and resource status", + crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -142,6 +165,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "unknown status of policy snapshot", + crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -201,6 +225,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "scheduler does not report the latest status for policy snapshot (annotation change)", + crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -261,6 +286,7 @@ func TestSetPlacementStatus(t *testing.T) { { // should not happen in the production as the policySnapshot is immutable name: "scheduler does not report the latest status for policy snapshot and snapshot observation does not match", + crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -319,6 +345,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement has been scheduled and no clusterResourcebindings and works", + crp: crp.DeepCopy(), policy: placementPolicyForTest(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ @@ -465,6 +492,7 @@ func TestSetPlacementStatus(t *testing.T) { { // TODO special handling no cluster is selected name: "the placement has been scheduled for pickAll; none of clusters are selected; no clusterResourceBindings and works", + crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -533,6 +561,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement scheduling failed", + crp: crp.DeepCopy(), policy: placementPolicyForTest(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ @@ -659,6 +688,7 @@ func TestSetPlacementStatus(t *testing.T) { // TODO special handling when selected cluster is 0 { name: "the placement scheduling succeeded when numberOfClusters is 0", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(0)), @@ -751,6 +781,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement is completed with clusterResourceBindings and works", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -977,6 +1008,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement is completed with clusterResourceBindings and works (no overrides)", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -1283,6 +1315,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "one of the placement condition is unknown with multiple bindings", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(7)), @@ -1746,6 +1779,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "placement rollout started condition false", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -1868,6 +1902,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "placement apply condition false", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), @@ -2230,6 +2265,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "placement available condition false", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -2498,6 +2534,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "update the CRP and it rollout status becomes unknown (reset the existing conditions)", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -2704,6 +2741,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement cannot be fulfilled for picFixed", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickFixedPlacementType, ClusterNames: []string{ @@ -2814,6 +2852,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement cannot be fulfilled for pickN", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(3)), @@ -2921,6 +2960,7 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement cannot be fulfilled for pickN (reset existing status)", + crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(3)), @@ -3123,405 +3163,1791 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - crp := &fleetv1beta1.ClusterResourcePlacement{ + { + name: "ReportDiff apply strategy, all diff reported", + crp: crpWithReportDiffApplyStrategy.DeepCopy(), + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ - Name: testName, - }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ - { - Group: corev1.GroupName, - Version: "v1", - Kind: "Service", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"region": "east"}, - }, - }, + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, }, - Policy: tc.policy, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, }, - Status: tc.crpStatus, - } - scheme := serviceScheme(t) - var objects []client.Object - for i := range tc.clusterResourceBindings { - objects = append(objects, &tc.clusterResourceBindings[i]) - } - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(objects...). - Build() - r := Reconciler{ - Client: fakeClient, - Scheme: scheme, - Recorder: record.NewFakeRecorder(10), - } - crp.Generation = crpGeneration - got, err := r.setPlacementStatus(context.Background(), crp, selectedResources, tc.latestPolicySnapshot, tc.latestResourceSnapshot) - if gotErr, wantErr := err != nil, tc.wantErr != nil; gotErr != wantErr || !errors.Is(err, tc.wantErr) { - t.Fatalf("setPlacementStatus() got error %v, want error %v", err, tc.wantErr) - } - if tc.wantErr != nil { - return - } - if got != tc.want { - t.Errorf("setPlacementStatus() = %v, want %v", got, tc.want) - } - - if diff := cmp.Diff(tc.wantStatus, &crp.Status, statusCmpOptions...); diff != "" { - t.Errorf("setPlacementStatus() status mismatch (-want, +got):\n%s", diff) - } - }) - } -} - -func TestBuildResourcePlacementStatusMap(t *testing.T) { - tests := []struct { - name string - status []fleetv1beta1.ResourcePlacementStatus - want map[string][]metav1.Condition - }{ - { - name: "empty status", - status: []fleetv1beta1.ResourcePlacementStatus{}, - want: map[string][]metav1.Condition{}, - }, - { - name: "nil status", - status: nil, - want: map[string][]metav1.Condition{}, - }, - { - name: "contain unselected cluster status", - status: []fleetv1beta1.ResourcePlacementStatus{ - { + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, Conditions: []metav1.Condition{ { - Type: "any", + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, }, }, - }, - }, - want: map[string][]metav1.Condition{}, - }, - { - name: "the status of the selected cluster is not set", - status: []fleetv1beta1.ResourcePlacementStatus{ - { - ClusterName: "member-1", - Conditions: []metav1.Condition{}, - }, - }, - want: map[string][]metav1.Condition{}, - }, - { - name: "the status of the selected clusters are set", - status: []fleetv1beta1.ResourcePlacementStatus{ - { - ClusterName: "member-1", - Conditions: []metav1.Condition{ + ClusterDecisions: []fleetv1beta1.ClusterDecision{ { - Type: "any", + ClusterName: "member-1", + Selected: true, + Reason: "success", }, - }, - }, - { - ClusterName: "member-2", - Conditions: []metav1.Condition{ { - Type: "other", + ClusterName: "member-2", + Selected: true, + Reason: "success", }, }, }, }, - want: map[string][]metav1.Condition{ - "member-1": { - { - Type: "any", - }, - }, - "member-2": { - { - Type: "other", + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "true", }, - }, - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - crp := fleetv1beta1.ClusterResourcePlacement{ - Status: fleetv1beta1.ClusterResourcePlacementStatus{ - PlacementStatuses: tc.status, - }, - } - got := buildResourcePlacementStatusMap(&crp) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("buildResourcePlacementStatusMap() status mismatch (-want, +got):\n%s", diff) - } - }) - } -} - -func TestBuildClusterResourceBindings(t *testing.T) { - policySnapshotName := "policy-2" - tests := []struct { - name string - bindings []fleetv1beta1.ClusterResourceBinding - want map[string]*fleetv1beta1.ClusterResourceBinding - }{ - { - name: "no associated bindings", - bindings: []fleetv1beta1.ClusterResourceBinding{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "other-binding", - Labels: map[string]string{ - fleetv1beta1.CRPTrackingLabel: "other-crp", - }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", }, }, }, - want: map[string]*fleetv1beta1.ClusterResourceBinding{}, - }, - { - name: "deleting binding", - bindings: []fleetv1beta1.ClusterResourceBinding{ + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ { ObjectMeta: metav1.ObjectMeta{ - Name: "deleting-binding", + Name: "binding-diff-reported-1", Labels: map[string]string{ fleetv1beta1.CRPTrackingLabel: testName, }, - DeletionTimestamp: &metav1.Time{Time: time.Date(00002, time.January, 1, 1, 1, 1, 1, time.UTC)}, - Finalizers: []string{"dummy-finalizer"}, + Generation: 1, }, - }, - }, - want: map[string]*fleetv1beta1.ClusterResourceBinding{}, - }, - { - name: "binding having stale policy snapshot", - bindings: []fleetv1beta1.ClusterResourceBinding{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "binding-without-latest-policy-snapshot", - Labels: map[string]string{ - fleetv1beta1.CRPTrackingLabel: testName, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, }, }, - Spec: fleetv1beta1.ResourceBindingSpec{ - SchedulingPolicySnapshotName: "not-latest-policy-snapshot", - TargetCluster: "member-7", + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: 1, + }, + }, }, }, - }, - want: map[string]*fleetv1beta1.ClusterResourceBinding{}, - }, - { - name: "matched bindings", - bindings: []fleetv1beta1.ClusterResourceBinding{ { ObjectMeta: metav1.ObjectMeta{ - Name: "binding-1", + Name: "binding-diff-reported-2", Labels: map[string]string{ fleetv1beta1.CRPTrackingLabel: testName, }, + Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: "member-1", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: 1, + }, + }, + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: currentTime}, + FirstDiffedObservedTime: metav1.Time{Time: currentTime}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/", + ValueInHub: "(the whole object)", + }, + }, + }, + }, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + { + ClusterName: "member-2", + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "Service", + Name: "svc-name", + Namespace: "svc-namespace", + }, + ObservationTime: metav1.Time{Time: currentTime}, + FirstDiffedObservedTime: metav1.Time{Time: currentTime}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/", + ValueInHub: "(the whole object)", + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ReportDiff apply strategy, one cluster has not reported diff yet", + crp: crpWithReportDiffApplyStrategy.DeepCopy(), + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: 1, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-2", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: condition.DiffReportedStatusUnknownReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + { + ClusterName: "member-2", + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusUnknownReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + }, + { + name: "ReportDiff apply strategy, one cluster has failed to report diff", + crp: crpWithReportDiffApplyStrategy.DeepCopy(), + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: 1, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-2", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusFalseReason, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: condition.DiffReportedStatusFalseReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + { + ClusterName: "member-2", + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusFalseReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + crp := tc.crp + crp.Spec.Policy = tc.policy + crp.Status = tc.crpStatus + scheme := serviceScheme(t) + var objects []client.Object + for i := range tc.clusterResourceBindings { + objects = append(objects, &tc.clusterResourceBindings[i]) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + r := Reconciler{ + Client: fakeClient, + Scheme: scheme, + Recorder: record.NewFakeRecorder(10), + } + crp.Generation = crpGeneration + got, err := r.setPlacementStatus(context.Background(), crp, selectedResources, tc.latestPolicySnapshot, tc.latestResourceSnapshot) + if gotErr, wantErr := err != nil, tc.wantErr != nil; gotErr != wantErr || !errors.Is(err, tc.wantErr) { + t.Fatalf("setPlacementStatus() got error %v, want error %v", err, tc.wantErr) + } + if tc.wantErr != nil { + return + } + if got != tc.want { + t.Errorf("setPlacementStatus() = %v, want %v", got, tc.want) + } + + if diff := cmp.Diff(tc.wantStatus, &crp.Status, statusCmpOptions...); diff != "" { + t.Errorf("setPlacementStatus() status mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestBuildResourcePlacementStatusMap(t *testing.T) { + tests := []struct { + name string + status []fleetv1beta1.ResourcePlacementStatus + want map[string][]metav1.Condition + }{ + { + name: "empty status", + status: []fleetv1beta1.ResourcePlacementStatus{}, + want: map[string][]metav1.Condition{}, + }, + { + name: "nil status", + status: nil, + want: map[string][]metav1.Condition{}, + }, + { + name: "contain unselected cluster status", + status: []fleetv1beta1.ResourcePlacementStatus{ + { + Conditions: []metav1.Condition{ + { + Type: "any", + }, + }, + }, + }, + want: map[string][]metav1.Condition{}, + }, + { + name: "the status of the selected cluster is not set", + status: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{}, + }, + }, + want: map[string][]metav1.Condition{}, + }, + { + name: "the status of the selected clusters are set", + status: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Type: "any", + }, + }, + }, + { + ClusterName: "member-2", + Conditions: []metav1.Condition{ + { + Type: "other", + }, + }, + }, + }, + want: map[string][]metav1.Condition{ + "member-1": { + { + Type: "any", + }, + }, + "member-2": { + { + Type: "other", + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + crp := fleetv1beta1.ClusterResourcePlacement{ + Status: fleetv1beta1.ClusterResourcePlacementStatus{ + PlacementStatuses: tc.status, + }, + } + got := buildResourcePlacementStatusMap(&crp) + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("buildResourcePlacementStatusMap() status mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestBuildClusterResourceBindings(t *testing.T) { + policySnapshotName := "policy-2" + tests := []struct { + name string + bindings []fleetv1beta1.ClusterResourceBinding + want map[string]*fleetv1beta1.ClusterResourceBinding + }{ + { + name: "no associated bindings", + bindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "other-binding", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: "other-crp", + }, + }, + }, + }, + want: map[string]*fleetv1beta1.ClusterResourceBinding{}, + }, + { + name: "deleting binding", + bindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "deleting-binding", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + DeletionTimestamp: &metav1.Time{Time: time.Date(00002, time.January, 1, 1, 1, 1, 1, time.UTC)}, + Finalizers: []string{"dummy-finalizer"}, + }, + }, + }, + want: map[string]*fleetv1beta1.ClusterResourceBinding{}, + }, + { + name: "binding having stale policy snapshot", + bindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-without-latest-policy-snapshot", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + SchedulingPolicySnapshotName: "not-latest-policy-snapshot", + TargetCluster: "member-7", + }, + }, + }, + want: map[string]*fleetv1beta1.ClusterResourceBinding{}, + }, + { + name: "matched bindings", + bindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: "member-1", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-2", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: "member-2", + }, + }, + }, + want: map[string]*fleetv1beta1.ClusterResourceBinding{ + "member-1": { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: "member-1", + }, + }, + "member-2": { + ObjectMeta: metav1.ObjectMeta{ Name: "binding-2", Labels: map[string]string{ fleetv1beta1.CRPTrackingLabel: testName, }, }, - Spec: fleetv1beta1.ResourceBindingSpec{ - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: "member-2", + Spec: fleetv1beta1.ResourceBindingSpec{ + SchedulingPolicySnapshotName: policySnapshotName, + TargetCluster: "member-2", + }, + }, + }, + }, + { + name: "invalid binding with missing target cluster", + bindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-without-latest-policy-snapshot", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + }, + }, + }, + want: map[string]*fleetv1beta1.ClusterResourceBinding{}, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + crp := fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: testName, + }, + } + policySnapshot := fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: policySnapshotName, + }, + } + scheme := serviceScheme(t) + var objects []client.Object + for i := range tc.bindings { + objects = append(objects, &tc.bindings[i]) + } + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + r := Reconciler{ + Client: fakeClient, + } + got, err := r.buildClusterResourceBindings(ctx, &crp, &policySnapshot) + if err != nil { + t.Fatalf("buildClusterResourceBindings() got err %v, want nil", err) + } + if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion")); diff != "" { + t.Errorf("buildClusterResourceBindings() status mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestSetResourcePlacementStatusPerCluster(t *testing.T) { + resourceSnapshotName := "snapshot-1" + cluster := "member-1" + bindingName := "binding-1" + + crp := &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: testName, + Generation: crpGeneration, + }, + } + crpWithReportDiffApplyStrategy := crp.DeepCopy() + crpWithReportDiffApplyStrategy.Spec.Strategy.ApplyStrategy = &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + } + + tests := []struct { + name string + crp *fleetv1beta1.ClusterResourcePlacement + binding *fleetv1beta1.ClusterResourceBinding + wantConditionStatusMap map[condition.ResourceCondition]metav1.ConditionStatus + wantResourcePlacementStatus fleetv1beta1.ResourcePlacementStatus + }{ + { + name: "binding not found", + crp: crp.DeepCopy(), + binding: nil, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionUnknown, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedUnknownReason, + ObservedGeneration: crpGeneration, + }, + }, + }, + }, + { + name: "stale binding with false rollout started condition", + crp: crp.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "not-latest", + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: 1, + }, + }, + }, + }, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionFalse, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutNotStartedYetReason, + ObservedGeneration: crpGeneration, + }, + }, + }, + }, + { + name: "stale binding with true rollout started condition", + crp: crp.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: "not-latest", + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + ObservedGeneration: 1, + }, + }, + }, + }, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionUnknown, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedUnknownReason, + ObservedGeneration: crpGeneration, + }, + }, + }, + }, + { + name: "completed binding", + crp: crp.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: cluster, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.AvailableReason, + ObservedGeneration: 1, + }, }, }, }, - want: map[string]*fleetv1beta1.ClusterResourceBinding{ - "member-1": { - ObjectMeta: metav1.ObjectMeta{ - Name: "binding-1", - Labels: map[string]string{ - fleetv1beta1.CRPTrackingLabel: testName, - }, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.AppliedCondition: metav1.ConditionTrue, + condition.AvailableCondition: metav1.ConditionTrue, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", }, - Spec: fleetv1beta1.ResourceBindingSpec{ - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: "member-1", + { + Name: "override-2", }, }, - "member-2": { - ObjectMeta: metav1.ObjectMeta{ - Name: "binding-2", - Labels: map[string]string{ - fleetv1beta1.CRPTrackingLabel: testName, - }, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration, }, - Spec: fleetv1beta1.ResourceBindingSpec{ - SchedulingPolicySnapshotName: policySnapshotName, - TargetCluster: "member-2", + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, }, }, }, }, { - name: "invalid binding with missing target cluster", - bindings: []fleetv1beta1.ClusterResourceBinding{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "binding-without-latest-policy-snapshot", - Labels: map[string]string{ - fleetv1beta1.CRPTrackingLabel: testName, + name: "unknown rollout started condition", + crp: crp.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: resourceSnapshotName, + TargetCluster: cluster, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + ObservedGeneration: 0, }, }, }, }, - want: map[string]*fleetv1beta1.ClusterResourceBinding{}, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionUnknown, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedUnknownReason, + ObservedGeneration: crpGeneration, + }, + }, + }, }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - crp := fleetv1beta1.ClusterResourcePlacement{ + { + name: "false overridden condition", + crp: crp.DeepCopy(), + binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: testName, + Generation: 1, }, - } - policySnapshot := fleetv1beta1.ClusterSchedulingPolicySnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: policySnapshotName, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + TargetCluster: cluster, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenFailedReason, + ObservedGeneration: 1, + }, + }, + }, + }, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionFalse, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, }, - } - scheme := serviceScheme(t) - var objects []client.Object - for i := range tc.bindings { - objects = append(objects, &tc.bindings[i]) - } - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(objects...). - Build() - r := Reconciler{ - Client: fakeClient, - } - got, err := r.buildClusterResourceBindings(ctx, &crp, &policySnapshot) - if err != nil { - t.Fatalf("buildClusterResourceBindings() got err %v, want nil", err) - } - if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion")); diff != "" { - t.Errorf("buildClusterResourceBindings() status mismatch (-want, +got):\n%s", diff) - } - }) - } -} - -func TestSetResourcePlacementStatusPerCluster(t *testing.T) { - resourceSnapshotName := "snapshot-1" - cluster := "member-1" - bindingName := "binding-1" - tests := []struct { - name string - binding *fleetv1beta1.ClusterResourceBinding - want []metav1.ConditionStatus - wantStatus fleetv1beta1.ResourcePlacementStatus - }{ - { - name: "binding not found", - binding: nil, - want: []metav1.ConditionStatus{metav1.ConditionUnknown}, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, Conditions: []metav1.Condition{ { - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenFailedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedUnknownReason, + Reason: condition.RolloutStartedReason, ObservedGeneration: crpGeneration, }, }, }, }, { - name: "stale binding with false rollout started condition", + name: "unknown work created condition", + crp: crp.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: "not-latest", + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: cluster, }, Status: fleetv1beta1.ResourceBindingStatus{ Conditions: []metav1.Condition{ { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingRolloutStarted), - Reason: condition.RolloutNotStartedYetReason, + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedUnknownReason, ObservedGeneration: 1, }, }, }, }, - want: []metav1.ConditionStatus{metav1.ConditionFalse}, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionUnknown, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, Conditions: []metav1.Condition{ { - Status: metav1.ConditionFalse, + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutNotStartedYetReason, + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedUnknownReason, ObservedGeneration: crpGeneration, }, }, }, }, { - name: "stale binding with true rollout started condition", + name: "false applied condition", + crp: crp.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ + Name: bindingName, Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: "not-latest", + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: cluster, }, Status: fleetv1beta1.ResourceBindingStatus{ + FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, ObservedGeneration: 1, }, }, }, }, - want: []metav1.ConditionStatus{metav1.ConditionUnknown}, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.AppliedCondition: metav1.ConditionFalse, + }, + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ + ClusterName: cluster, + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, + }, + }, Conditions: []metav1.Condition{ { - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.ApplyFailedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedUnknownReason, + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, ObservedGeneration: crpGeneration, }, }, }, }, { - name: "completed binding", + name: "false available condition", + crp: crp.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ + Name: bindingName, Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ @@ -3540,6 +4966,21 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { TargetCluster: cluster, }, Status: fleetv1beta1.ResourceBindingStatus{ + FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + }, + }, + }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -3566,22 +5007,22 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { ObservedGeneration: 1, }, { - Status: metav1.ConditionTrue, + Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourceBindingAvailable), - Reason: condition.AvailableReason, + Reason: condition.NotAvailableYetReason, ObservedGeneration: 1, }, }, }, }, - want: []metav1.ConditionStatus{ - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionTrue, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.AppliedCondition: metav1.ConditionTrue, + condition.AvailableCondition: metav1.ConditionFalse, }, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ @@ -3593,6 +5034,21 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { Name: "override-2", }, }, + FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "config-name", + Namespace: "config-namespace", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeAvailable, + Status: metav1.ConditionFalse, + }, + }, + }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -3601,9 +5057,9 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { ObservedGeneration: crpGeneration, }, { - Status: metav1.ConditionTrue, + Status: metav1.ConditionFalse, Type: string(fleetv1beta1.ResourcesAvailableConditionType), - Reason: condition.AvailableReason, + Reason: condition.NotAvailableYetReason, ObservedGeneration: crpGeneration, }, { @@ -3628,61 +5084,78 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, { - name: "unknown rollout started condition", + name: "drifts and configuration diffs", + crp: crp.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ + Name: bindingName, Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: resourceSnapshotName, - TargetCluster: cluster, + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{}, + ClusterResourceOverrideSnapshots: []string{}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: cluster, }, Status: fleetv1beta1.ResourceBindingStatus{ - Conditions: []metav1.Condition{ + FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingRolloutStarted), - ObservedGeneration: 0, + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cm-1", + Namespace: "ns-1", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, }, }, - }, - }, - want: []metav1.ConditionStatus{ - metav1.ConditionUnknown, - }, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ - ClusterName: cluster, - Conditions: []metav1.Condition{ - { - Status: metav1.ConditionUnknown, - Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedUnknownReason, - ObservedGeneration: crpGeneration, - }, - }, - }, - }, - { - name: "false overridden condition", - binding: &fleetv1beta1.ClusterResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Generation: 1, - }, - Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: resourceSnapshotName, - ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + DriftedPlacements: []fleetv1beta1.DriftedResourcePlacement{ { - Name: "override-1", - Namespace: "override-ns", + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cm-1", + Namespace: "ns-1", + }, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: 1, + FirstDriftedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, + }, }, + }, + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ { - Name: "override-2", + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "app-1", + Namespace: "ns-1", + }, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: ptr.To(int64(2)), + FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/replicas", + ValueInMember: "1", + ValueInHub: "2", + }, + }, }, }, - ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, - TargetCluster: cluster, - }, - Status: fleetv1beta1.ResourceBindingStatus{ Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -3691,68 +5164,164 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { ObservedGeneration: 1, }, { - Status: metav1.ConditionFalse, + Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingOverridden), - Reason: condition.OverriddenFailedReason, + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.NotAvailableYetReason, ObservedGeneration: 1, }, }, }, }, - want: []metav1.ConditionStatus{ - metav1.ConditionTrue, - metav1.ConditionFalse, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.AppliedCondition: metav1.ConditionFalse, }, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, - ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, - ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, + ApplicableClusterResourceOverrides: []string{}, + FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ { - Name: "override-1", - Namespace: "override-ns", + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cm-1", + Namespace: "ns-1", + }, + Condition: metav1.Condition{ + Type: fleetv1beta1.WorkConditionTypeApplied, + Status: metav1.ConditionFalse, + }, }, + }, + DriftedPlacements: []fleetv1beta1.DriftedResourcePlacement{ { - Name: "override-2", + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cm-1", + Namespace: "ns-1", + }, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: 1, + FirstDriftedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, + }, + }, + }, + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "app-1", + Namespace: "ns-1", + }, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: ptr.To(int64(2)), + FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/spec/replicas", + ValueInMember: "1", + ValueInHub: "2", + }, + }, }, }, Conditions: []metav1.Condition{ { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceOverriddenConditionType), - Reason: condition.OverriddenFailedReason, + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedReason, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, ObservedGeneration: crpGeneration, }, }, }, }, { - name: "unknown work created condition", + name: "always on drift detection", + crp: crp.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ + Name: bindingName, Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: resourceSnapshotName, - ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ - { - Name: "override-1", - Namespace: "override-ns", - }, - { - Name: "override-2", - }, - }, - ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{}, + ClusterResourceOverrideSnapshots: []string{}, SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), TargetCluster: cluster, }, Status: fleetv1beta1.ResourceBindingStatus{ + DriftedPlacements: []fleetv1beta1.DriftedResourcePlacement{ + { + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cm-1", + Namespace: "ns-1", + }, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: 1, + FirstDriftedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, + }, + }, + }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -3767,88 +5336,129 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { ObservedGeneration: 1, }, { - Status: metav1.ConditionUnknown, + Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), - Reason: condition.WorkSynchronizedUnknownReason, + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.AvailableReason, ObservedGeneration: 1, }, }, }, }, - want: []metav1.ConditionStatus{ - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionUnknown, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.AppliedCondition: metav1.ConditionTrue, + condition.AvailableCondition: metav1.ConditionTrue, }, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, - ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, - ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ - { - Name: "override-1", - Namespace: "override-ns", - }, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, + ApplicableClusterResourceOverrides: []string{}, + DriftedPlacements: []fleetv1beta1.DriftedResourcePlacement{ { - Name: "override-2", + ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Name: "cm-1", + Namespace: "ns-1", + }, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: 1, + FirstDriftedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDrifts: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, + }, }, }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), Reason: condition.OverriddenSucceededReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedReason, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, ObservedGeneration: crpGeneration, }, { - Status: metav1.ConditionUnknown, - Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), - Reason: condition.WorkSynchronizedUnknownReason, + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.AvailableReason, ObservedGeneration: crpGeneration, }, }, }, }, { - name: "false applied condition", + name: "ReportDiff apply strategy (diff reported)", + crp: crpWithReportDiffApplyStrategy.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingName, Generation: 1, }, Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: resourceSnapshotName, - ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ - { - Name: "override-1", - Namespace: "override-ns", - }, - { - Name: "override-2", - }, - }, - ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{}, + ClusterResourceOverrideSnapshots: []string{}, SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), TargetCluster: cluster, + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, }, Status: fleetv1beta1.ResourceBindingStatus{ - FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: "", Version: "v1", Kind: "ConfigMap", - Name: "config-name", - Namespace: "config-namespace", + Name: "cm-1", + Namespace: "ns-1", }, - Condition: metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionFalse, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: ptr.To(int64(1)), + FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, }, }, }, @@ -3877,105 +5487,111 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { Reason: condition.ApplyFailedReason, ObservedGeneration: 1, }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: 1, + }, }, }, }, - want: []metav1.ConditionStatus{ - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionFalse, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.DiffReportedCondition: metav1.ConditionTrue, }, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, - ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, - ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ - { - Name: "override-1", - Namespace: "override-ns", - }, - { - Name: "override-2", - }, - }, - FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, + ApplicableClusterResourceOverrides: []string{}, + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: "", Version: "v1", Kind: "ConfigMap", - Name: "config-name", - Namespace: "config-namespace", + Name: "cm-1", + Namespace: "ns-1", }, - Condition: metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionFalse, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: ptr.To(int64(1)), + FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, }, }, }, Conditions: []metav1.Condition{ { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourcesAppliedConditionType), - Reason: condition.ApplyFailedReason, + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Type: string(fleetv1beta1.ResourceBindingOverridden), Reason: condition.OverriddenSucceededReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedReason, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), - Reason: condition.WorkSynchronizedReason, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, ObservedGeneration: crpGeneration, }, }, }, }, { - name: "false available condition", + name: "ReportDiff apply strategy (diff not yet reported)", + crp: crpWithReportDiffApplyStrategy.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingName, - Generation: 1, + Generation: 2, }, Spec: fleetv1beta1.ResourceBindingSpec{ - ResourceSnapshotName: resourceSnapshotName, - ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ - { - Name: "override-1", - Namespace: "override-ns", - }, - { - Name: "override-2", - }, - }, - ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + ResourceSnapshotName: resourceSnapshotName, + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{}, + ClusterResourceOverrideSnapshots: []string{}, SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), TargetCluster: cluster, + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, }, Status: fleetv1beta1.ResourceBindingStatus{ - FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: "", Version: "v1", Kind: "ConfigMap", - Name: "config-name", - Namespace: "config-namespace", + Name: "cm-1", + Namespace: "ns-1", }, - Condition: metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeAvailable, - Status: metav1.ConditionFalse, + ObservationTime: metav1.Time{Time: time.Now()}, + TargetClusterObservedGeneration: ptr.To(int64(1)), + FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ + { + Path: "/data", + ValueInMember: "k=1", + ValueInHub: "k=2", + }, }, }, }, @@ -3984,105 +5600,70 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingRolloutStarted), Reason: condition.RolloutStartedReason, - ObservedGeneration: 1, + ObservedGeneration: 2, }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingOverridden), Reason: condition.OverriddenSucceededReason, - ObservedGeneration: 1, + ObservedGeneration: 2, }, { Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), Reason: condition.WorkSynchronizedReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceBindingApplied), - Reason: condition.ApplySucceededReason, - ObservedGeneration: 1, + ObservedGeneration: 2, }, { Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingAvailable), - Reason: condition.NotAvailableYetReason, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, ObservedGeneration: 1, }, }, }, }, - want: []metav1.ConditionStatus{ - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionFalse, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.DiffReportedCondition: metav1.ConditionUnknown, }, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, - ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, - ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ - { - Name: "override-1", - Namespace: "override-ns", - }, - { - Name: "override-2", - }, - }, - FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ - { - ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "config-name", - Namespace: "config-namespace", - }, - Condition: metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeAvailable, - Status: metav1.ConditionFalse, - }, - }, - }, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, + ApplicableClusterResourceOverrides: []string{}, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourcesAppliedConditionType), - Reason: condition.ApplySucceededReason, - ObservedGeneration: crpGeneration, - }, - { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourcesAvailableConditionType), - Reason: condition.NotAvailableYetReason, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Type: string(fleetv1beta1.ResourceBindingOverridden), Reason: condition.OverriddenSucceededReason, ObservedGeneration: crpGeneration, }, { Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), - Reason: condition.RolloutStartedReason, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, ObservedGeneration: crpGeneration, }, { - Status: metav1.ConditionTrue, - Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), - Reason: condition.WorkSynchronizedReason, + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusUnknownReason, ObservedGeneration: crpGeneration, }, }, }, }, { - name: "drifts and configuration diffs", + name: "ReportDiff apply strategy (failed to report diff)", + crp: crpWithReportDiffApplyStrategy.DeepCopy(), binding: &fleetv1beta1.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ Name: bindingName, @@ -4094,24 +5675,12 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { ClusterResourceOverrideSnapshots: []string{}, SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), TargetCluster: cluster, + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, }, Status: fleetv1beta1.ResourceBindingStatus{ - FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ - { - ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "cm-1", - Namespace: "ns-1", - }, - Condition: metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionFalse, - }, - }, - }, - DriftedPlacements: []fleetv1beta1.DriftedResourcePlacement{ + DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ { ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ Group: "", @@ -4121,9 +5690,9 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { Namespace: "ns-1", }, ObservationTime: metav1.Time{Time: time.Now()}, - TargetClusterObservedGeneration: 1, - FirstDriftedObservedTime: metav1.Time{Time: time.Now()}, - ObservedDrifts: []fleetv1beta1.PatchDetail{ + TargetClusterObservedGeneration: ptr.To(int64(1)), + FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, + ObservedDiffs: []fleetv1beta1.PatchDetail{ { Path: "/data", ValueInMember: "k=1", @@ -4132,27 +5701,6 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ - { - ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "app-1", - Namespace: "ns-1", - }, - ObservationTime: metav1.Time{Time: time.Now()}, - TargetClusterObservedGeneration: ptr.To(int64(2)), - FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, - ObservedDiffs: []fleetv1beta1.PatchDetail{ - { - Path: "/spec/replicas", - ValueInMember: "1", - ValueInHub: "2", - }, - }, - }, - }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -4174,86 +5722,23 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, { Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingApplied), - Reason: condition.ApplySucceededReason, - ObservedGeneration: 1, - }, - { - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingAvailable), - Reason: condition.NotAvailableYetReason, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusFalseReason, ObservedGeneration: 1, }, }, }, }, - want: []metav1.ConditionStatus{ - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionTrue, - metav1.ConditionFalse, + wantConditionStatusMap: map[condition.ResourceCondition]metav1.ConditionStatus{ + condition.RolloutStartedCondition: metav1.ConditionTrue, + condition.OverriddenCondition: metav1.ConditionTrue, + condition.WorkSynchronizedCondition: metav1.ConditionTrue, + condition.DiffReportedCondition: metav1.ConditionFalse, }, - wantStatus: fleetv1beta1.ResourcePlacementStatus{ + wantResourcePlacementStatus: fleetv1beta1.ResourcePlacementStatus{ ClusterName: cluster, ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{}, ApplicableClusterResourceOverrides: []string{}, - FailedPlacements: []fleetv1beta1.FailedResourcePlacement{ - { - ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "cm-1", - Namespace: "ns-1", - }, - Condition: metav1.Condition{ - Type: fleetv1beta1.WorkConditionTypeApplied, - Status: metav1.ConditionFalse, - }, - }, - }, - DriftedPlacements: []fleetv1beta1.DriftedResourcePlacement{ - { - ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ - Group: "", - Version: "v1", - Kind: "ConfigMap", - Name: "cm-1", - Namespace: "ns-1", - }, - ObservationTime: metav1.Time{Time: time.Now()}, - TargetClusterObservedGeneration: 1, - FirstDriftedObservedTime: metav1.Time{Time: time.Now()}, - ObservedDrifts: []fleetv1beta1.PatchDetail{ - { - Path: "/data", - ValueInMember: "k=1", - ValueInHub: "k=2", - }, - }, - }, - }, - DiffedPlacements: []fleetv1beta1.DiffedResourcePlacement{ - { - ResourceIdentifier: fleetv1beta1.ResourceIdentifier{ - Group: "apps", - Version: "v1", - Kind: "Deployment", - Name: "app-1", - Namespace: "ns-1", - }, - ObservationTime: metav1.Time{Time: time.Now()}, - TargetClusterObservedGeneration: ptr.To(int64(2)), - FirstDiffedObservedTime: metav1.Time{Time: time.Now()}, - ObservedDiffs: []fleetv1beta1.PatchDetail{ - { - Path: "/spec/replicas", - ValueInMember: "1", - ValueInHub: "2", - }, - }, - }, - }, Conditions: []metav1.Condition{ { Status: metav1.ConditionTrue, @@ -4275,8 +5760,8 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, { Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceBindingApplied), - Reason: condition.ApplySucceededReason, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusFalseReason, ObservedGeneration: crpGeneration, }, }, @@ -4285,12 +5770,6 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - crp := &fleetv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - Generation: crpGeneration, - }, - } resourceSnapshot := &fleetv1beta1.ClusterResourceSnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: resourceSnapshotName, @@ -4300,15 +5779,15 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { Recorder: record.NewFakeRecorder(10), } status := fleetv1beta1.ResourcePlacementStatus{ClusterName: cluster} - got, err := r.setResourcePlacementStatusPerCluster(crp, resourceSnapshot, tc.binding, &status) + got, err := r.setResourcePlacementStatusPerCluster(tc.crp, resourceSnapshot, tc.binding, &status) if err != nil { t.Fatalf("setResourcePlacementStatusPerCluster() got err %v, want nil", err) } - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Errorf("setResourcePlacementStatusPerCluster() conditionStatus mismatch (-want, +got):\n%s", diff) + if diff := cmp.Diff(got, tc.wantConditionStatusMap); diff != "" { + t.Errorf("setResourcePlacementStatusPerCluster() conditionStatus mismatch (-got, +want):\n%s", diff) } - if diff := cmp.Diff(tc.wantStatus, status, statusCmpOptions...); diff != "" { - t.Errorf("setResourcePlacementStatusPerCluster() status mismatch (-want, +got):\n%s", diff) + if diff := cmp.Diff(status, tc.wantResourcePlacementStatus, statusCmpOptions...); diff != "" { + t.Errorf("setResourcePlacementStatusPerCluster() status mismatch (-got, +want):\n%s", diff) } }) } diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index b5ee25bb3..a1b8dce69 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -68,6 +68,18 @@ const ( // AvailableReason is the reason string of placement condition if the selected resources are available. AvailableReason = "ResourceAvailable" + + // DiffReportedStatusUnknownReason is the reason string of the DiffReported condition when the + // diff reporting has just started and its status is not yet to be known. + DiffReportedStatusUnknownReason = "DiffReportingJustStarted" + + // DiffReportedStatusFalseReason is the reason string of the DiffReported condition when the + // diff reporting has not been fully completed. + DiffReportedStatusFalseReason = "DiffReportingIncompleteOrFailed" + + // DiffReportedStatusTrueReason is the reason string of the DiffReported condition when the + // diff reporting has been fully completed. + DiffReportedStatusTrueReason = "DiffReportingCompleted" ) // A group of condition reason string which is used to populate the placement condition per cluster. @@ -251,6 +263,7 @@ const ( WorkSynchronizedCondition AppliedCondition AvailableCondition + DiffReportedCondition TotalCondition ) @@ -261,6 +274,7 @@ func (c ResourceCondition) EventReasonForTrue() string { "PlacementWorkSynchronized", "PlacementApplied", "PlacementAvailable", + "PlacementDiffReported", }[c] } @@ -271,6 +285,7 @@ func (c ResourceCondition) EventMessageForTrue() string { "Work(s) have been created or updated successfully for the selected cluster(s)", "Resources have been applied to the selected cluster(s)", "Resources are available on the selected cluster(s)", + "Configuration differences have been reported on the selected cluster(s)", }[c] } @@ -282,6 +297,7 @@ func (c ResourceCondition) ResourcePlacementConditionType() fleetv1beta1.Resourc fleetv1beta1.ResourceWorkSynchronizedConditionType, fleetv1beta1.ResourcesAppliedConditionType, fleetv1beta1.ResourcesAvailableConditionType, + fleetv1beta1.ResourcesDiffReportedConditionType, }[c] } @@ -293,6 +309,7 @@ func (c ResourceCondition) ResourceBindingConditionType() fleetv1beta1.ResourceB fleetv1beta1.ResourceBindingWorkSynchronized, fleetv1beta1.ResourceBindingApplied, fleetv1beta1.ResourceBindingAvailable, + fleetv1beta1.ResourceBindingDiffReported, }[c] } @@ -304,6 +321,7 @@ func (c ResourceCondition) ClusterResourcePlacementConditionType() fleetv1beta1. fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType, fleetv1beta1.ClusterResourcePlacementAppliedConditionType, fleetv1beta1.ClusterResourcePlacementAvailableConditionType, + fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType, }[c] } @@ -345,6 +363,13 @@ func (c ResourceCondition) UnknownResourceConditionPerCluster(generation int64) Message: "The availability of the selected resources is unknown yet ", ObservedGeneration: generation, }, + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: DiffReportedStatusUnknownReason, + Message: "Diff reporting has just started; its status is not yet to be known", + ObservedGeneration: generation, + }, }[c] } @@ -386,6 +411,13 @@ func (c ResourceCondition) UnknownClusterResourcePlacementCondition(generation i Message: fmt.Sprintf("There are still %d cluster(s) in the process of checking the availability of the selected resources", clusterCount), ObservedGeneration: generation, }, + { + Status: metav1.ConditionUnknown, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: DiffReportedStatusUnknownReason, + Message: fmt.Sprintf("There are still %d cluster(s) which just started checking for configuration differences", clusterCount), + ObservedGeneration: generation, + }, }[c] } @@ -427,6 +459,13 @@ func (c ResourceCondition) FalseClusterResourcePlacementCondition(generation int Message: fmt.Sprintf("The selected resources in %d cluster(s) are still not available yet", clusterCount), ObservedGeneration: generation, }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: DiffReportedStatusFalseReason, + Message: fmt.Sprintf("Diff reporting in %d clusters is still in progress or has failed", clusterCount), + ObservedGeneration: generation, + }, }[c] } @@ -468,6 +507,13 @@ func (c ResourceCondition) TrueClusterResourcePlacementCondition(generation int6 Message: fmt.Sprintf("The selected resources in %d cluster(s) are available now", clusterCount), ObservedGeneration: generation, }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: DiffReportedStatusTrueReason, + Message: fmt.Sprintf("Diff reporting in %d cluster(s) has been completed", clusterCount), + ObservedGeneration: generation, + }, }[c] } From 5154f6128aad5ae39cab8a409debec61cdfbbe56 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Fri, 17 Jan 2025 11:02:18 +0800 Subject: [PATCH 2/9] Minor fixes --- .../clusterresourceplacement/controller.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index 9fb44ce90..4c6cfccce 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -974,7 +974,18 @@ func isRolloutCompleted(crp *fleetv1beta1.ClusterResourcePlacement) bool { return false } + skippedCondTypes := skippedCondTypesForCSASSAApplyStrategyTypes + if crp.Spec.Strategy.ApplyStrategy != nil && crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff { + skippedCondTypes = skippedCondTypesForReportDiffApplyStrategyType + } for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { + // If the CRP has an apply strategy of the ClientSideApply or ServerSideApply type, skip + // checking the DiffReported condition type; similarly, should the apply strategy be of + // the ReportDiff type, Fleet will skip checking the Applied and Available condition + // type. + if _, ok := skippedCondTypes[i]; ok { + continue + } if !condition.IsConditionStatusTrue(crp.GetCondition(string(i.ClusterResourcePlacementConditionType())), crp.Generation) { return false } From 7fc17c0b350417cc5a67471464c9bc87c7efa5f1 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Sat, 18 Jan 2025 02:03:15 +0800 Subject: [PATCH 3/9] Minor fixes --- .../clusterresourceplacement/placement_status.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index da1b1cb5d..a962e6dbb 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -260,13 +260,13 @@ func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, crp *flee // The resource condition order (index) is defined as const: // const ( // -// RolloutStartedCondition resourceCondition = iota -// OverriddenCondition -// WorkSynchronizedCondition -// AppliedCondition -// AvailableCondition -// DiffReportedCondition -// TotalCondition +// RolloutStartedCondition resourceCondition = iota +// OverriddenCondition +// WorkSynchronizedCondition +// AppliedCondition +// AvailableCondition +// DiffReportedCondition +// TotalCondition // // ) func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.ClusterResourcePlacement, latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, From 0d057e754400bb956e112b007c26814fe3eab243 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Sun, 19 Jan 2025 01:06:02 +0800 Subject: [PATCH 4/9] Refactored --- .../clusterresourceplacement/controller.go | 73 +++- .../placement_status.go | 367 +++++++++--------- .../placement_status_test.go | 17 +- pkg/utils/condition/condition.go | 29 +- 4 files changed, 288 insertions(+), 198 deletions(-) diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index 4c6cfccce..14d801abe 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -212,12 +212,12 @@ func (r *Reconciler) handleUpdate(ctx context.Context, crp *fleetv1beta1.Cluster } } - // There is no need to check if the CRP is available or not. - // If the available condition is true, it means the rollout is completed. + // Rollout is considered to be completed when all the expected condition types are set to the + // True status. if isRolloutCompleted(crp) { if !isRolloutCompleted(oldCRP) { - klog.V(2).InfoS("Placement rollout has finished and resources are available", "clusterResourcePlacement", crpKObj, "generation", crp.Generation) - r.Recorder.Event(crp, corev1.EventTypeNormal, "PlacementRolloutCompleted", "Resources are available in the selected clusters") + klog.V(2).InfoS("Placement has finished the rollout process and reached the desired status", "clusterResourcePlacement", crpKObj, "generation", crp.Generation) + r.Recorder.Event(crp, corev1.EventTypeNormal, "PlacementRolloutCompleted", "Placement has finished the rollout process and reached the desired status") } // We don't need to requeue any request now by watching the binding changes return ctrl.Result{}, nil @@ -888,8 +888,13 @@ func parseResourceGroupHashFromAnnotation(s *fleetv1beta1.ClusterResourceSnapsho } // setPlacementStatus returns if there is a cluster scheduled by the scheduler. -func (r *Reconciler) setPlacementStatus(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement, selectedResourceIDs []fleetv1beta1.ResourceIdentifier, - latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot, latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot) (bool, error) { +func (r *Reconciler) setPlacementStatus( + ctx context.Context, + crp *fleetv1beta1.ClusterResourcePlacement, + selectedResourceIDs []fleetv1beta1.ResourceIdentifier, + latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot, + latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, +) (bool, error) { crp.Status.SelectedResources = selectedResourceIDs scheduledCondition := buildScheduledCondition(crp, latestSchedulingPolicySnapshot) crp.SetConditions(scheduledCondition) @@ -913,7 +918,47 @@ func (r *Reconciler) setPlacementStatus(ctx context.Context, crp *fleetv1beta1.C return false, nil } - return r.setResourceConditions(ctx, crp, latestSchedulingPolicySnapshot, latestResourceSnapshot) + // Classify cluster decisions; find out clusters that have been selected and + // have not been selected. + selected, unselected := classifyClusterDecisions(latestSchedulingPolicySnapshot.Status.ClusterDecisions) + // Calculate the number of clusters that should have been selected yet cannot be, due to + // scheduling constraints. + failedToScheduleClusterCount := calculateFailedToScheduleClusterCount(crp, selected, unselected) + + // Prepare the resource placement status (status per cluster) in the CRP status. + allRPS := make([]fleetv1beta1.ResourcePlacementStatus, 0, len(latestSchedulingPolicySnapshot.Status.ClusterDecisions)) + + // For clusters that have been selected, set the resource placement status based on the + // respective resource binding status for each of them. + expectedCondTypes := determineExpectedCRPAndResourcePlacementStatusCondType(crp) + allRPS, rpsSetCondTypeCounter, err := r.setScheduledResourcePlacementStatuses( + ctx, allRPS, selected, expectedCondTypes, crp, latestSchedulingPolicySnapshot, latestResourceSnapshot) + if err != nil { + return false, err + } + + // For clusters that failed to get scheduled, set a resource placement status with the + // failed to schedule condition for each of them. + allRPS = setFailedToScheduleResourcePlacementStatuses(allRPS, unselected, failedToScheduleClusterCount, klog.KObj(crp), crp.Generation) + + crp.Status.PlacementStatuses = allRPS + + // Prepare the conditions for the CRP object itself. + + if len(selected) == 0 { + // There is no selected cluster at all. It could be that there is no matching cluster + // given the current scheduling policy; there remains a corner case as well where a cluster + // has been selected before (with resources being possibly applied), but has now + // left the fleet. To address this corner case, Fleet here will remove all lingering + // conditions (any condition type other than CRPScheduled). + + // Note that the scheduled condition has been set earlier in this method. + crp.Status.Conditions = []metav1.Condition{*crp.GetCondition(string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType))} + return false, nil + } + + setCRPConditions(crp, allRPS, rpsSetCondTypeCounter, expectedCondTypes) + return true, nil } func buildScheduledCondition(crp *fleetv1beta1.ClusterResourcePlacement, latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot) metav1.Condition { @@ -974,18 +1019,8 @@ func isRolloutCompleted(crp *fleetv1beta1.ClusterResourcePlacement) bool { return false } - skippedCondTypes := skippedCondTypesForCSASSAApplyStrategyTypes - if crp.Spec.Strategy.ApplyStrategy != nil && crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff { - skippedCondTypes = skippedCondTypesForReportDiffApplyStrategyType - } - for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { - // If the CRP has an apply strategy of the ClientSideApply or ServerSideApply type, skip - // checking the DiffReported condition type; similarly, should the apply strategy be of - // the ReportDiff type, Fleet will skip checking the Applied and Available condition - // type. - if _, ok := skippedCondTypes[i]; ok { - continue - } + expectedCondTypes := determineExpectedCRPAndResourcePlacementStatusCondType(crp) + for _, i := range expectedCondTypes { if !condition.IsConditionStatusTrue(crp.GetCondition(string(i.ClusterResourcePlacementConditionType())), crp.Generation) { return false } diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index a962e6dbb..6a7a605a2 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -43,155 +43,193 @@ const ( ResourceScheduleFailedReason = "ScheduleFailed" ) -var ( - skippedCondTypesForCSASSAApplyStrategyTypes = map[condition.ResourceCondition]interface{}{ - condition.DiffReportedCondition: nil, +// calculateFailedToScheduleClusterCount calculates the count of failed to schedule clusters based on the scheduling policy. +func calculateFailedToScheduleClusterCount(crp *fleetv1beta1.ClusterResourcePlacement, selected, unselected []*fleetv1beta1.ClusterDecision) int { + failedToScheduleClusterCount := 0 + switch { + case crp.Spec.Policy == nil: + // No scheduling policy is set; Fleet assumes that a PickAll scheduling policy + // is specified and in this case there is no need to calculate the count of + // failed to schedule clusters as the scheduler will always set all eligible clusters. + case crp.Spec.Policy.PlacementType == fleetv1beta1.PickNPlacementType && crp.Spec.Policy.NumberOfClusters != nil: + // The PickN scheduling policy is used; in this case the count of failed to schedule + // clusters is equal to the difference between the specified N number and the actual + // number of selected clusters. + failedToScheduleClusterCount = int(*crp.Spec.Policy.NumberOfClusters) - len(selected) + case crp.Spec.Policy.PlacementType == fleetv1beta1.PickFixedPlacementType: + // The PickFixed scheduling policy is used; in this case the count of failed to schedule + // clusters is equal to the difference between the number of specified cluster names and + // the actual number of selected clusters. + failedToScheduleClusterCount = len(crp.Spec.Policy.ClusterNames) - len(selected) + default: + // The PickAll scheduling policy is used; as explained earlier, in this case there is + // no need to calculate the count of failed to schedule clusters. } - skippedCondTypesForReportDiffApplyStrategyType = map[condition.ResourceCondition]interface{}{ - condition.AppliedCondition: nil, - condition.AvailableCondition: nil, + + if failedToScheduleClusterCount > len(unselected) { + // There exists a corner case where the failed to schedule cluster count exceeds the + // total number of unselected clusters; this can occur when there does not exist + // enough clusters in the fleet for the scheduler to pick. In this case, the count is + // set to the total number of unselected clusters. + failedToScheduleClusterCount = len(unselected) } -) -// Note (chenyu1): with the newly added DiffReported condition type, the status handling part -// probably needs some refactoring as it is triggering the code complexity linter (original -// cyclomatic complexity score 26, new score 31, threshold 30); the original -// assumption, where condition types can always be handled in sequential order, i.e., one failing -// condition type will exempt Fleet from processing all subsequent condition types, only partially -// stands now. - -// setResourceConditions sets the resource related conditions by looking at the bindings and work, excluding the scheduled condition. -// It returns whether there is a cluster scheduled or not. -// -// nolint: gocyclo -func (r *Reconciler) setResourceConditions(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement, - latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot, latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot) (bool, error) { - placementStatuses := make([]fleetv1beta1.ResourcePlacementStatus, 0, len(latestSchedulingPolicySnapshot.Status.ClusterDecisions)) - decisions := latestSchedulingPolicySnapshot.Status.ClusterDecisions - selected, unselected := classifyClusterDecisions(decisions) - - // In the pickN case, if the placement cannot be satisfied. For example, pickN deployment requires 5 clusters and - // scheduler schedules the resources on 3 clusters. We'll populate why the other two cannot be scheduled. - // Here it is calculating how many unscheduled resources there are. - unscheduledClusterCount := 0 - if crp.Spec.Policy != nil { - if crp.Spec.Policy.PlacementType == fleetv1beta1.PickNPlacementType && crp.Spec.Policy.NumberOfClusters != nil { - unscheduledClusterCount = int(*crp.Spec.Policy.NumberOfClusters) - len(selected) - } - if crp.Spec.Policy.PlacementType == fleetv1beta1.PickFixedPlacementType { - unscheduledClusterCount = len(crp.Spec.Policy.ClusterNames) - len(selected) + return failedToScheduleClusterCount +} + +// setFailedToScheduleResourcePlacementStatuses sets the resource placement statuses for the failed to schedule clusters. +func setFailedToScheduleResourcePlacementStatuses( + allRPS []fleetv1beta1.ResourcePlacementStatus, + unselected []*fleetv1beta1.ClusterDecision, + failedToScheduleClusterCount int, + crpRef klog.ObjectRef, crpGeneration int64, +) []fleetv1beta1.ResourcePlacementStatus { + // In the earlier step it has been guaranteed that failedToScheduleClusterCount is less than or equal to the + // total number of unselected clusters; here Fleet still performs a sanity check. + for i := 0; i < failedToScheduleClusterCount && i < len(unselected); i++ { + rps := &fleetv1beta1.ResourcePlacementStatus{} + + failedToScheduleCond := metav1.Condition{ + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: ResourceScheduleFailedReason, + Message: unselected[i].Reason, + ObservedGeneration: crpGeneration, } + meta.SetStatusCondition(&rps.Conditions, failedToScheduleCond) + // The allRPS slice has been pre-allocated, so the append call will never produce a new + // slice; here, however, Fleet will still return the old slice just in case. + allRPS = append(allRPS, *rps) + klog.V(2).InfoS("Populated the resource placement status for the unscheduled cluster", "clusterResourcePlacement", crpRef, "cluster", unselected[i].ClusterName) + } + + return allRPS +} + +// determineExpectedCRPAndResourcePlacementStatusCondType determines the expected condition types for the CRP and resource placement statuses +// given the currently in-use apply strategy. +func determineExpectedCRPAndResourcePlacementStatusCondType(crp *fleetv1beta1.ClusterResourcePlacement) []condition.ResourceCondition { + switch { + case crp.Spec.Strategy.ApplyStrategy == nil: + return condition.CondTypesForCSAAndSSAApplyStrategies + case crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff: + return condition.CondTypesForReportDiffApplyStrategy + default: + return condition.CondTypesForCSAAndSSAApplyStrategies } +} - oldResourcePlacementStatusMap := buildResourcePlacementStatusMap(crp) - resourceBindingMap, err := r.buildClusterResourceBindings(ctx, crp, latestSchedulingPolicySnapshot) +// setScheduledResourcePlacementStatuses sets the resource placement statuses for the scheduled clusters. +func (r *Reconciler) setScheduledResourcePlacementStatuses( + ctx context.Context, + allRPS []fleetv1beta1.ResourcePlacementStatus, + selected []*fleetv1beta1.ClusterDecision, + expectedCondTypes []condition.ResourceCondition, + crp *fleetv1beta1.ClusterResourcePlacement, + lastestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot, + latestClusterResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, +) ( + []fleetv1beta1.ResourcePlacementStatus, + [condition.TotalCondition][condition.TotalConditionStatus]int, + error, +) { + // Use a counter to track the number of each condition type set and their respective status. + var rpsSetCondTypeCounter [condition.TotalCondition][condition.TotalConditionStatus]int + + oldRPSMap := buildResourcePlacementStatusMap(crp) + bindingMap, err := r.buildClusterResourceBindings(ctx, crp, lastestSchedulingPolicySnapshot) if err != nil { - return false, err + return allRPS, rpsSetCondTypeCounter, err } - // record the total count per status for each condition - var clusterConditionStatusRes [condition.TotalCondition][condition.TotalConditionStatus]int + for idx := range selected { + clusterDecision := selected[idx] + rps := &fleetv1beta1.ResourcePlacementStatus{} - for _, c := range selected { - var rps fleetv1beta1.ResourcePlacementStatus + // Set the scheduled condition. scheduledCondition := metav1.Condition{ Status: metav1.ConditionTrue, Type: string(fleetv1beta1.ResourceScheduledConditionType), Reason: condition.ScheduleSucceededReason, - Message: c.Reason, + Message: clusterDecision.Reason, ObservedGeneration: crp.Generation, } - rps.ClusterName = c.ClusterName - oldConditions, ok := oldResourcePlacementStatusMap[c.ClusterName] - if ok { - // update the lastTransitionTime considering the existing condition status instead of overwriting - rps.Conditions = oldConditions - } meta.SetStatusCondition(&rps.Conditions, scheduledCondition) - res, err := r.setResourcePlacementStatusPerCluster(crp, latestResourceSnapshot, resourceBindingMap[c.ClusterName], &rps) + + // Set the cluster name. + rps.ClusterName = clusterDecision.ClusterName + + // Port back the old conditions. + // This is necessary for Fleet to track the last transition times correctly. + if oldConds, ok := oldRPSMap[clusterDecision.ClusterName]; ok { + rps.Conditions = oldConds + } + + // Prepare the new conditions. + binding := bindingMap[clusterDecision.ClusterName] + setStatusByCondType, err := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, binding, rps, expectedCondTypes) if err != nil { - return false, err + return allRPS, rpsSetCondTypeCounter, err } - for condType, condStatus := range res { + + // Update the counter. + for condType, condStatus := range setStatusByCondType { switch condStatus { case metav1.ConditionTrue: - clusterConditionStatusRes[condType][condition.TrueConditionStatus]++ + rpsSetCondTypeCounter[condType][condition.TrueConditionStatus]++ case metav1.ConditionFalse: - clusterConditionStatusRes[condType][condition.FalseConditionStatus]++ + rpsSetCondTypeCounter[condType][condition.FalseConditionStatus]++ case metav1.ConditionUnknown: - clusterConditionStatusRes[condType][condition.UnknownConditionStatus]++ + rpsSetCondTypeCounter[condType][condition.UnknownConditionStatus]++ } } - // The resources can be changed without updating the crp spec. - // To reflect the latest resource conditions, we reset conditions that are no longer relevant. + + // CRP status will refresh even if the spec has not changed. To avoid confusion, + // Fleet will reset unused conditions. for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { - if _, ok := res[i]; !ok { + if _, ok := setStatusByCondType[i]; !ok { meta.RemoveStatusCondition(&rps.Conditions, string(i.ResourcePlacementConditionType())) } } - placementStatuses = append(placementStatuses, rps) - klog.V(2).InfoS("Populated the resource placement status for the scheduled cluster", "clusterResourcePlacement", klog.KObj(crp), "cluster", c.ClusterName, "resourcePlacementStatus", rps) + // The allRPS slice has been pre-allocated, so the append call will never produce a new + // slice; here, however, Fleet will still return the old slice just in case. + allRPS = append(allRPS, *rps) + klog.V(2).InfoS("Populated the resource placement status for the scheduled cluster", "clusterResourcePlacement", klog.KObj(crp), "cluster", clusterDecision.ClusterName, "resourcePlacementStatus", rps) } - isClusterScheduled := len(placementStatuses) > 0 - - for i := 0; i < unscheduledClusterCount && i < len(unselected); i++ { - // TODO: we could improve the message by summarizing the failure reasons from all of the unselected clusters. - // For now, it starts from adding some sample failures of unselected clusters. - var rp fleetv1beta1.ResourcePlacementStatus - scheduledCondition := metav1.Condition{ - Status: metav1.ConditionFalse, - Type: string(fleetv1beta1.ResourceScheduledConditionType), - Reason: ResourceScheduleFailedReason, - Message: unselected[i].Reason, - ObservedGeneration: crp.Generation, - } - meta.SetStatusCondition(&rp.Conditions, scheduledCondition) - placementStatuses = append(placementStatuses, rp) - klog.V(2).InfoS("Populated the resource placement status for the unscheduled cluster", "clusterResourcePlacement", klog.KObj(crp), "cluster", unselected[i].ClusterName) - } - crp.Status.PlacementStatuses = placementStatuses - - if !isClusterScheduled { - // It covers one special case: CRP selects a cluster which joins (resource are applied) and then leaves. - // In this case, CRP generation has not been changed. - // And we cannot rely on the generation to filter out the stale conditions. - // But the resource related conditions are set before. So that, we reset them. - crp.Status.Conditions = []metav1.Condition{*crp.GetCondition(string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType))} - return false, nil - } - - // If the CRP has an apply strategy of the ClientSideApply or ServerSideApply type, skip - // any updates to the DiffReported condition type; similarly, should the apply strategy be of - // the ReportDiff type, Fleet will skip any updates to the Applied and Available condition - // type. - skippedCondTypes := skippedCondTypesForCSASSAApplyStrategyTypes - if crp.Spec.Strategy.ApplyStrategy != nil && crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff { - skippedCondTypes = skippedCondTypesForReportDiffApplyStrategyType - } + return allRPS, rpsSetCondTypeCounter, nil +} +// setCRPConditions sets the CRP conditions based on the resource placement statuses. +func setCRPConditions( + crp *fleetv1beta1.ClusterResourcePlacement, + allRPS []fleetv1beta1.ResourcePlacementStatus, + rpsSetCondTypeCounter [condition.TotalCondition][condition.TotalConditionStatus]int, + expectedCondTypes []condition.ResourceCondition, +) { // Track all the condition types that have been set. setCondTypes := make(map[condition.ResourceCondition]interface{}) - for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { - if _, ok := skippedCondTypes[i]; ok { - // If the Applied and Available condition types are skipped (as the CRP uses - // the ReportDiff apply strategy), proceed to evaluate the DiffReported condition - // type. - continue - } + for _, i := range expectedCondTypes { setCondTypes[i] = nil - if clusterConditionStatusRes[i][condition.UnknownConditionStatus] > 0 { - crp.SetConditions(i.UnknownClusterResourcePlacementCondition(crp.Generation, clusterConditionStatusRes[i][condition.UnknownConditionStatus])) - break - } else if clusterConditionStatusRes[i][condition.FalseConditionStatus] > 0 { - crp.SetConditions(i.FalseClusterResourcePlacementCondition(crp.Generation, clusterConditionStatusRes[i][condition.FalseConditionStatus])) - break - } else { - cond := i.TrueClusterResourcePlacementCondition(crp.Generation, clusterConditionStatusRes[i][condition.TrueConditionStatus]) + // If any given condition type is set to False or Unknown, Fleet will skip evaluation of the + // rest conditions. + shouldSkipRestCondTypes := false + switch { + case rpsSetCondTypeCounter[i][condition.UnknownConditionStatus] > 0: + // There is at least one Unknown condition of the given type being set on the per cluster placement statuses. + crp.SetConditions(i.UnknownClusterResourcePlacementCondition(crp.Generation, rpsSetCondTypeCounter[i][condition.UnknownConditionStatus])) + shouldSkipRestCondTypes = true + case rpsSetCondTypeCounter[i][condition.FalseConditionStatus] > 0: + // There is at least one False condition of the given type being set on the per cluster placement statuses. + crp.SetConditions(i.FalseClusterResourcePlacementCondition(crp.Generation, rpsSetCondTypeCounter[i][condition.FalseConditionStatus])) + shouldSkipRestCondTypes = true + default: + // All the conditions of the given type are True. + cond := i.TrueClusterResourcePlacementCondition(crp.Generation, rpsSetCondTypeCounter[i][condition.TrueConditionStatus]) if i == condition.OverriddenCondition { hasOverride := false - for _, status := range placementStatuses { + for _, status := range allRPS { if len(status.ApplicableResourceOverrides) > 0 || len(status.ApplicableClusterResourceOverrides) > 0 { hasOverride = true break @@ -204,18 +242,21 @@ func (r *Reconciler) setResourceConditions(ctx context.Context, crp *fleetv1beta } crp.SetConditions(cond) } + + if shouldSkipRestCondTypes { + break + } } - // reset the remaining conditions, starting from the next one + + // As CRP status will refresh even if the spec has not changed, Fleet will reset any unused conditions + // to avoid confusion. for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { - // The resources can be changed without updating the crp spec. - // To reflect the latest resource conditions, we reset the renaming conditions. if _, ok := setCondTypes[i]; !ok { meta.RemoveStatusCondition(&crp.Status.Conditions, string(i.ClusterResourcePlacementConditionType())) } } - klog.V(2).InfoS("Populated the placement conditions", "clusterResourcePlacement", klog.KObj(crp)) - return true, nil + klog.V(2).InfoS("Populated the placement conditions", "clusterResourcePlacement", klog.KObj(crp)) } func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, crp *fleetv1beta1.ClusterResourcePlacement, latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot) (map[string]*fleetv1beta1.ClusterResourceBinding, error) { @@ -256,52 +297,48 @@ func (r *Reconciler) buildClusterResourceBindings(ctx context.Context, crp *flee } // setResourcePlacementStatusPerCluster sets the resource related fields for each cluster. -// It returns an array which records the status for each resource condition. -// The resource condition order (index) is defined as const: -// const ( -// -// RolloutStartedCondition resourceCondition = iota -// OverriddenCondition -// WorkSynchronizedCondition -// AppliedCondition -// AvailableCondition -// DiffReportedCondition -// TotalCondition -// -// ) -func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.ClusterResourcePlacement, latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, - binding *fleetv1beta1.ClusterResourceBinding, status *fleetv1beta1.ResourcePlacementStatus) (map[condition.ResourceCondition]metav1.ConditionStatus, error) { +// It returns a map which tracks the set status for each relevant condition type. +func (r *Reconciler) setResourcePlacementStatusPerCluster( + crp *fleetv1beta1.ClusterResourcePlacement, + latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, + binding *fleetv1beta1.ClusterResourceBinding, + status *fleetv1beta1.ResourcePlacementStatus, + expectedCondTypes []condition.ResourceCondition, +) (map[condition.ResourceCondition]metav1.ConditionStatus, error) { res := make(map[condition.ResourceCondition]metav1.ConditionStatus) if binding == nil { + // The binding cannot be found; Fleet might be observing an in-between state where + // the cluster has been picked but the binding has not been created yet. meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation)) res[condition.RolloutStartedCondition] = metav1.ConditionUnknown return res, nil } - // If the CRP has an apply strategy of the ClientSideApply or ServerSideApply type, skip - // any updates to the DiffReported condition type; similarly, should the apply strategy be of - // the ReportDiff type, Fleet will skip any updates to the Applied and Available condition - // type. - skippedCondTypes := skippedCondTypesForCSASSAApplyStrategyTypes - if crp.Spec.Strategy.ApplyStrategy != nil && crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff { - skippedCondTypes = skippedCondTypesForReportDiffApplyStrategyType - } - - // There are few cases: - // * if the resourceSnapshotName is not equal, - // 1. the status is false, it means the rollout is stuck. - // 2. otherwise, the rollout controller has not processed it yet. - // * if the resourceSnapshotName is equal, - // just return the corresponding status. - if binding.Spec.ResourceSnapshotName == latestResourceSnapshot.Name { - for i := condition.RolloutStartedCondition; i < condition.TotalCondition; i++ { - if _, ok := skippedCondTypes[i]; ok { - // If the Applied and Available condition types are skipped (as the CRP uses - // the ReportDiff apply strategy), proceed to evaluate the DiffReported condition - // type. - continue - } - + rolloutStartedCond := binding.GetCondition(string(condition.RolloutStartedCondition.ResourceBindingConditionType())) + switch { + case binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name && condition.IsConditionStatusFalse(rolloutStartedCond, binding.Generation): + // The binding uses an out of date resource snapshot and rollout controller has reported + // that the rollout is being blocked (the RolloutStarted condition is of the False status). + cond := metav1.Condition{ + Type: string(condition.RolloutStartedCondition.ResourcePlacementConditionType()), + Status: metav1.ConditionFalse, + ObservedGeneration: crp.Generation, + Reason: condition.RolloutNotStartedYetReason, + Message: "The rollout is being blocked by the rollout strategy", + } + meta.SetStatusCondition(&status.Conditions, cond) + res[condition.RolloutStartedCondition] = metav1.ConditionFalse + return res, nil + case binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name: + // The binding uses an out of date resource snapshot, and the RolloutStarted condition is + // set to True, Unknown, or has become stale. Fleet might be observing an in-between state. + meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation)) + klog.V(5).InfoS("The cluster resource binding has a stale RolloutStarted condition, or it links to an out of date resource snapshot yet has the RolloutStarted condition set to True or Unknown status", "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp)) + res[condition.RolloutStartedCondition] = metav1.ConditionUnknown + return res, nil + default: + // The binding uses the latest resource snapshot. + for _, i := range expectedCondTypes { bindingCond := binding.GetCondition(string(i.ResourceBindingConditionType())) if !condition.IsConditionStatusTrue(bindingCond, binding.Generation) && !condition.IsConditionStatusFalse(bindingCond, binding.Generation) { @@ -347,24 +384,4 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster(crp *fleetv1beta1.Clus } return res, nil } - // handling stale binding if binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name - rolloutStartedCond := binding.GetCondition(string(condition.RolloutStartedCondition.ResourceBindingConditionType())) - if condition.IsConditionStatusFalse(rolloutStartedCond, binding.Generation) { - cond := metav1.Condition{ - Type: string(condition.RolloutStartedCondition.ResourcePlacementConditionType()), - Status: metav1.ConditionFalse, - ObservedGeneration: crp.Generation, - Reason: condition.RolloutNotStartedYetReason, - Message: "The rollout is being blocked by the rollout strategy", - } - meta.SetStatusCondition(&status.Conditions, cond) - res[condition.RolloutStartedCondition] = metav1.ConditionFalse - return res, nil - } - // At this point, either the generation is not the one in the binding spec or the status is true/unknown. - // It means the rollout controller has not handled the binding yet. - meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation)) - klog.V(5).InfoS("The staled binding rollout status is unknown", "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp)) - res[condition.RolloutStartedCondition] = metav1.ConditionUnknown - return res, nil } diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index fd633db1b..b806bad83 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -4423,6 +4423,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { binding *fleetv1beta1.ClusterResourceBinding wantConditionStatusMap map[condition.ResourceCondition]metav1.ConditionStatus wantResourcePlacementStatus fleetv1beta1.ResourcePlacementStatus + expectedCondTypes []condition.ResourceCondition }{ { name: "binding not found", @@ -4442,6 +4443,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "stale binding with false rollout started condition", @@ -4478,6 +4480,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "stale binding with true rollout started condition", @@ -4513,6 +4516,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "completed binding", @@ -4623,6 +4627,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "unknown rollout started condition", @@ -4659,6 +4664,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "false overridden condition", @@ -4729,6 +4735,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "unknown work created condition", @@ -4813,6 +4820,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "false applied condition", @@ -4941,6 +4949,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "false available condition", @@ -5082,6 +5091,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "drifts and configuration diffs", @@ -5284,6 +5294,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "always on drift detection", @@ -5421,6 +5432,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, }, { name: "ReportDiff apply strategy (diff reported)", @@ -5554,6 +5566,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForReportDiffApplyStrategy, }, { name: "ReportDiff apply strategy (diff not yet reported)", @@ -5660,6 +5673,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForReportDiffApplyStrategy, }, { name: "ReportDiff apply strategy (failed to report diff)", @@ -5766,6 +5780,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, + expectedCondTypes: condition.CondTypesForReportDiffApplyStrategy, }, } for _, tc := range tests { @@ -5779,7 +5794,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { Recorder: record.NewFakeRecorder(10), } status := fleetv1beta1.ResourcePlacementStatus{ClusterName: cluster} - got, err := r.setResourcePlacementStatusPerCluster(tc.crp, resourceSnapshot, tc.binding, &status) + got, err := r.setResourcePlacementStatusPerCluster(tc.crp, resourceSnapshot, tc.binding, &status, tc.expectedCondTypes) if err != nil { t.Fatalf("setResourcePlacementStatusPerCluster() got err %v, want nil", err) } diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index a1b8dce69..5e59a6fcf 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -254,9 +254,14 @@ func IsConditionStatusFalse(cond *metav1.Condition, latestGeneration int64) bool // ResourceCondition is all the resource related condition, for example, scheduled condition is not included. type ResourceCondition int -// The following conditions are in ordered. -// Once the placement is scheduled, it will be divided into following stages. -// Used to populate the CRP conditions. +// The full set of condition types that Fleet will populate on CRPs (the CRP itself and the +// resource placement status per cluster) and cluster resource bindings. +// +// - RolloutStarted, Overridden and WorkSynchronized apply to all objects; +// - Applied and Available apply only when the apply strategy in use is of the ClientSideApply +// and ServerSideApply type; +// - DiffReported applies only the apply strategy in use is of the ReportDiff type. +// - Total is a end marker (not used). const ( RolloutStartedCondition ResourceCondition = iota OverriddenCondition @@ -267,6 +272,24 @@ const ( TotalCondition ) +var ( + // Different set of condition types that Fleet will populate. + CondTypesForCSAAndSSAApplyStrategies = []ResourceCondition{ + RolloutStartedCondition, + OverriddenCondition, + WorkSynchronizedCondition, + AppliedCondition, + AvailableCondition, + } + + CondTypesForReportDiffApplyStrategy = []ResourceCondition{ + RolloutStartedCondition, + OverriddenCondition, + WorkSynchronizedCondition, + DiffReportedCondition, + } +) + func (c ResourceCondition) EventReasonForTrue() string { return []string{ "PlacementRolloutStarted", From 4540d2ffa88b2b42a1f2edf0f802af1e6b681f1e Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Sun, 19 Jan 2025 02:33:43 +0800 Subject: [PATCH 5/9] Minor fixes --- .../clusterresourceplacement/placement_status.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index 6a7a605a2..1a864869f 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -146,6 +146,12 @@ func (r *Reconciler) setScheduledResourcePlacementStatuses( clusterDecision := selected[idx] rps := &fleetv1beta1.ResourcePlacementStatus{} + // Port back the old conditions. + // This is necessary for Fleet to track the last transition times correctly. + if oldConds, ok := oldRPSMap[clusterDecision.ClusterName]; ok { + rps.Conditions = oldConds + } + // Set the scheduled condition. scheduledCondition := metav1.Condition{ Status: metav1.ConditionTrue, @@ -159,12 +165,6 @@ func (r *Reconciler) setScheduledResourcePlacementStatuses( // Set the cluster name. rps.ClusterName = clusterDecision.ClusterName - // Port back the old conditions. - // This is necessary for Fleet to track the last transition times correctly. - if oldConds, ok := oldRPSMap[clusterDecision.ClusterName]; ok { - rps.Conditions = oldConds - } - // Prepare the new conditions. binding := bindingMap[clusterDecision.ClusterName] setStatusByCondType, err := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, binding, rps, expectedCondTypes) From 30699b1153dc10604573e22721595f6a94546a0e Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Tue, 21 Jan 2025 01:39:59 +0800 Subject: [PATCH 6/9] Minor fixes --- .../clusterresourceplacement/controller.go | 4 +- .../placement_status.go | 29 +- .../placement_status_test.go | 373 +++++++++++++++--- pkg/utils/condition/condition.go | 7 +- 4 files changed, 342 insertions(+), 71 deletions(-) diff --git a/pkg/controllers/clusterresourceplacement/controller.go b/pkg/controllers/clusterresourceplacement/controller.go index 14d801abe..0963e246a 100644 --- a/pkg/controllers/clusterresourceplacement/controller.go +++ b/pkg/controllers/clusterresourceplacement/controller.go @@ -931,7 +931,7 @@ func (r *Reconciler) setPlacementStatus( // For clusters that have been selected, set the resource placement status based on the // respective resource binding status for each of them. expectedCondTypes := determineExpectedCRPAndResourcePlacementStatusCondType(crp) - allRPS, rpsSetCondTypeCounter, err := r.setScheduledResourcePlacementStatuses( + allRPS, rpsSetCondTypeCounter, err := r.appendScheduledResourcePlacementStatuses( ctx, allRPS, selected, expectedCondTypes, crp, latestSchedulingPolicySnapshot, latestResourceSnapshot) if err != nil { return false, err @@ -939,7 +939,7 @@ func (r *Reconciler) setPlacementStatus( // For clusters that failed to get scheduled, set a resource placement status with the // failed to schedule condition for each of them. - allRPS = setFailedToScheduleResourcePlacementStatuses(allRPS, unselected, failedToScheduleClusterCount, klog.KObj(crp), crp.Generation) + allRPS = appendFailedToScheduleResourcePlacementStatuses(allRPS, unselected, failedToScheduleClusterCount, crp) crp.Status.PlacementStatuses = allRPS diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index 1a864869f..56543800c 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -78,11 +78,11 @@ func calculateFailedToScheduleClusterCount(crp *fleetv1beta1.ClusterResourcePlac } // setFailedToScheduleResourcePlacementStatuses sets the resource placement statuses for the failed to schedule clusters. -func setFailedToScheduleResourcePlacementStatuses( +func appendFailedToScheduleResourcePlacementStatuses( allRPS []fleetv1beta1.ResourcePlacementStatus, unselected []*fleetv1beta1.ClusterDecision, failedToScheduleClusterCount int, - crpRef klog.ObjectRef, crpGeneration int64, + crp *fleetv1beta1.ClusterResourcePlacement, ) []fleetv1beta1.ResourcePlacementStatus { // In the earlier step it has been guaranteed that failedToScheduleClusterCount is less than or equal to the // total number of unselected clusters; here Fleet still performs a sanity check. @@ -94,13 +94,13 @@ func setFailedToScheduleResourcePlacementStatuses( Type: string(fleetv1beta1.ResourceScheduledConditionType), Reason: ResourceScheduleFailedReason, Message: unselected[i].Reason, - ObservedGeneration: crpGeneration, + ObservedGeneration: crp.Generation, } meta.SetStatusCondition(&rps.Conditions, failedToScheduleCond) // The allRPS slice has been pre-allocated, so the append call will never produce a new // slice; here, however, Fleet will still return the old slice just in case. allRPS = append(allRPS, *rps) - klog.V(2).InfoS("Populated the resource placement status for the unscheduled cluster", "clusterResourcePlacement", crpRef, "cluster", unselected[i].ClusterName) + klog.V(2).InfoS("Populated the resource placement status for the unscheduled cluster", "clusterResourcePlacement", klog.KObj(crp), "cluster", unselected[i].ClusterName) } return allRPS @@ -120,13 +120,13 @@ func determineExpectedCRPAndResourcePlacementStatusCondType(crp *fleetv1beta1.Cl } // setScheduledResourcePlacementStatuses sets the resource placement statuses for the scheduled clusters. -func (r *Reconciler) setScheduledResourcePlacementStatuses( +func (r *Reconciler) appendScheduledResourcePlacementStatuses( ctx context.Context, allRPS []fleetv1beta1.ResourcePlacementStatus, selected []*fleetv1beta1.ClusterDecision, expectedCondTypes []condition.ResourceCondition, crp *fleetv1beta1.ClusterResourcePlacement, - lastestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot, + latestSchedulingPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot, latestClusterResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot, ) ( []fleetv1beta1.ResourcePlacementStatus, @@ -137,7 +137,7 @@ func (r *Reconciler) setScheduledResourcePlacementStatuses( var rpsSetCondTypeCounter [condition.TotalCondition][condition.TotalConditionStatus]int oldRPSMap := buildResourcePlacementStatusMap(crp) - bindingMap, err := r.buildClusterResourceBindings(ctx, crp, lastestSchedulingPolicySnapshot) + bindingMap, err := r.buildClusterResourceBindings(ctx, crp, latestSchedulingPolicySnapshot) if err != nil { return allRPS, rpsSetCondTypeCounter, err } @@ -167,10 +167,7 @@ func (r *Reconciler) setScheduledResourcePlacementStatuses( // Prepare the new conditions. binding := bindingMap[clusterDecision.ClusterName] - setStatusByCondType, err := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, binding, rps, expectedCondTypes) - if err != nil { - return allRPS, rpsSetCondTypeCounter, err - } + setStatusByCondType := r.setResourcePlacementStatusPerCluster(crp, latestClusterResourceSnapshot, binding, rps, expectedCondTypes) // Update the counter. for condType, condStatus := range setStatusByCondType { @@ -304,14 +301,14 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster( binding *fleetv1beta1.ClusterResourceBinding, status *fleetv1beta1.ResourcePlacementStatus, expectedCondTypes []condition.ResourceCondition, -) (map[condition.ResourceCondition]metav1.ConditionStatus, error) { +) map[condition.ResourceCondition]metav1.ConditionStatus { res := make(map[condition.ResourceCondition]metav1.ConditionStatus) if binding == nil { // The binding cannot be found; Fleet might be observing an in-between state where // the cluster has been picked but the binding has not been created yet. meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation)) res[condition.RolloutStartedCondition] = metav1.ConditionUnknown - return res, nil + return res } rolloutStartedCond := binding.GetCondition(string(condition.RolloutStartedCondition.ResourceBindingConditionType())) @@ -328,14 +325,14 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster( } meta.SetStatusCondition(&status.Conditions, cond) res[condition.RolloutStartedCondition] = metav1.ConditionFalse - return res, nil + return res case binding.Spec.ResourceSnapshotName != latestResourceSnapshot.Name: // The binding uses an out of date resource snapshot, and the RolloutStarted condition is // set to True, Unknown, or has become stale. Fleet might be observing an in-between state. meta.SetStatusCondition(&status.Conditions, condition.RolloutStartedCondition.UnknownResourceConditionPerCluster(crp.Generation)) klog.V(5).InfoS("The cluster resource binding has a stale RolloutStarted condition, or it links to an out of date resource snapshot yet has the RolloutStarted condition set to True or Unknown status", "clusterResourceBinding", klog.KObj(binding), "clusterResourcePlacement", klog.KObj(crp)) res[condition.RolloutStartedCondition] = metav1.ConditionUnknown - return res, nil + return res default: // The binding uses the latest resource snapshot. for _, i := range expectedCondTypes { @@ -382,6 +379,6 @@ func (r *Reconciler) setResourcePlacementStatusPerCluster( break // if the current condition is false, no need to populate the rest conditions } } - return res, nil + return res } } diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index b806bad83..3a264697a 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -60,27 +60,6 @@ func TestSetPlacementStatus(t *testing.T) { currentTime := time.Now() oldTransitionTime := metav1.NewTime(currentTime.Add(-1 * time.Hour)) - crp := &fleetv1beta1.ClusterResourcePlacement{ - ObjectMeta: metav1.ObjectMeta{ - Name: testName, - }, - Spec: fleetv1beta1.ClusterResourcePlacementSpec{ - ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ - { - Group: corev1.GroupName, - Version: "v1", - Kind: "Service", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"region": "east"}, - }, - }, - }, - }, - } - crpWithReportDiffApplyStrategy := crp.DeepCopy() - crpWithReportDiffApplyStrategy.Spec.Strategy.ApplyStrategy = &fleetv1beta1.ApplyStrategy{ - Type: fleetv1beta1.ApplyStrategyTypeReportDiff, - } crpGeneration := int64(25) selectedResources := []fleetv1beta1.ResourceIdentifier{ { @@ -107,9 +86,9 @@ func TestSetPlacementStatus(t *testing.T) { } tests := []struct { name string - crp *fleetv1beta1.ClusterResourcePlacement crpStatus fleetv1beta1.ClusterResourcePlacementStatus policy *fleetv1beta1.PlacementPolicy + strategy fleetv1beta1.RolloutStrategy latestPolicySnapshot *fleetv1beta1.ClusterSchedulingPolicySnapshot latestResourceSnapshot *fleetv1beta1.ClusterResourceSnapshot clusterResourceBindings []fleetv1beta1.ClusterResourceBinding @@ -119,7 +98,6 @@ func TestSetPlacementStatus(t *testing.T) { }{ { name: "empty policy and resource status", - crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -165,7 +143,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "unknown status of policy snapshot", - crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -225,7 +202,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "scheduler does not report the latest status for policy snapshot (annotation change)", - crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -286,7 +262,6 @@ func TestSetPlacementStatus(t *testing.T) { { // should not happen in the production as the policySnapshot is immutable name: "scheduler does not report the latest status for policy snapshot and snapshot observation does not match", - crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -345,7 +320,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement has been scheduled and no clusterResourcebindings and works", - crp: crp.DeepCopy(), policy: placementPolicyForTest(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ @@ -492,7 +466,6 @@ func TestSetPlacementStatus(t *testing.T) { { // TODO special handling no cluster is selected name: "the placement has been scheduled for pickAll; none of clusters are selected; no clusterResourceBindings and works", - crp: crp.DeepCopy(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -561,7 +534,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement scheduling failed", - crp: crp.DeepCopy(), policy: placementPolicyForTest(), latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ @@ -688,7 +660,6 @@ func TestSetPlacementStatus(t *testing.T) { // TODO special handling when selected cluster is 0 { name: "the placement scheduling succeeded when numberOfClusters is 0", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(0)), @@ -781,7 +752,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement is completed with clusterResourceBindings and works", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -1008,7 +978,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement is completed with clusterResourceBindings and works (no overrides)", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -1315,7 +1284,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "one of the placement condition is unknown with multiple bindings", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(7)), @@ -1779,7 +1747,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "placement rollout started condition false", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -1902,7 +1869,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "placement apply condition false", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), @@ -2265,7 +2231,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "placement available condition false", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -2534,7 +2499,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "update the CRP and it rollout status becomes unknown (reset the existing conditions)", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(1)), @@ -2741,7 +2705,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement cannot be fulfilled for picFixed", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickFixedPlacementType, ClusterNames: []string{ @@ -2852,7 +2815,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement cannot be fulfilled for pickN", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(3)), @@ -2960,7 +2922,6 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "the placement cannot be fulfilled for pickN (reset existing status)", - crp: crp.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(3)), @@ -3165,11 +3126,15 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "ReportDiff apply strategy, all diff reported", - crp: crpWithReportDiffApplyStrategy.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), }, + strategy: fleetv1beta1.RolloutStrategy{ + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -3511,11 +3476,15 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "ReportDiff apply strategy, one cluster has not reported diff yet", - crp: crpWithReportDiffApplyStrategy.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), }, + strategy: fleetv1beta1.RolloutStrategy{ + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -3807,11 +3776,15 @@ func TestSetPlacementStatus(t *testing.T) { }, { name: "ReportDiff apply strategy, one cluster has failed to report diff", - crp: crpWithReportDiffApplyStrategy.DeepCopy(), policy: &fleetv1beta1.PlacementPolicy{ PlacementType: fleetv1beta1.PickNPlacementType, NumberOfClusters: ptr.To(int32(2)), }, + strategy: fleetv1beta1.RolloutStrategy{ + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), @@ -4113,13 +4086,316 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, + { + name: "ReportDiff apply strategy, one cluster has failed to synchronized work", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(2)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + { + ClusterName: "member-2", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: 1, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-2", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + ResourceOverrideSnapshots: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + ClusterResourceOverrideSnapshots: []string{"o-1", "o-2"}, + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-2", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: true, + wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkNotSynchronizedYetReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + { + ClusterName: "member-2", + ApplicableClusterResourceOverrides: []string{"o-1", "o-2"}, + ApplicableResourceOverrides: []fleetv1beta1.NamespacedName{ + { + Name: "override-1", + Namespace: "override-ns", + }, + { + Name: "override-2", + }, + }, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverriddenSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - crp := tc.crp - crp.Spec.Policy = tc.policy - crp.Status = tc.crpStatus + crp := &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: testName, + }, + Spec: fleetv1beta1.ClusterResourcePlacementSpec{ + ResourceSelectors: []fleetv1beta1.ClusterResourceSelector{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Service", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"region": "east"}, + }, + }, + }, + Policy: tc.policy, + Strategy: tc.strategy, + }, + Status: tc.crpStatus, + } scheme := serviceScheme(t) var objects []client.Object for i := range tc.clusterResourceBindings { @@ -5794,10 +6070,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { Recorder: record.NewFakeRecorder(10), } status := fleetv1beta1.ResourcePlacementStatus{ClusterName: cluster} - got, err := r.setResourcePlacementStatusPerCluster(tc.crp, resourceSnapshot, tc.binding, &status, tc.expectedCondTypes) - if err != nil { - t.Fatalf("setResourcePlacementStatusPerCluster() got err %v, want nil", err) - } + got := r.setResourcePlacementStatusPerCluster(tc.crp, resourceSnapshot, tc.binding, &status, tc.expectedCondTypes) if diff := cmp.Diff(got, tc.wantConditionStatusMap); diff != "" { t.Errorf("setResourcePlacementStatusPerCluster() conditionStatus mismatch (-got, +want):\n%s", diff) } diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index 5e59a6fcf..43da62ee6 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -71,7 +71,7 @@ const ( // DiffReportedStatusUnknownReason is the reason string of the DiffReported condition when the // diff reporting has just started and its status is not yet to be known. - DiffReportedStatusUnknownReason = "DiffReportingJustStarted" + DiffReportedStatusUnknownReason = "DiffReportingPending" // DiffReportedStatusFalseReason is the reason string of the DiffReported condition when the // diff reporting has not been fully completed. @@ -273,7 +273,8 @@ const ( ) var ( - // Different set of condition types that Fleet will populate. + // Different set of condition types that Fleet will populate in sequential order based on the + // apply strategy in use. CondTypesForCSAAndSSAApplyStrategies = []ResourceCondition{ RolloutStartedCondition, OverriddenCondition, @@ -438,7 +439,7 @@ func (c ResourceCondition) UnknownClusterResourcePlacementCondition(generation i Status: metav1.ConditionUnknown, Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), Reason: DiffReportedStatusUnknownReason, - Message: fmt.Sprintf("There are still %d cluster(s) which just started checking for configuration differences", clusterCount), + Message: fmt.Sprintf("There are still %d cluster(s) in the process of checking for configuration differences", clusterCount), ObservedGeneration: generation, }, }[c] From b58787051bc3158032e6344a8aa2f9ef7baec2b3 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 23 Jan 2025 03:23:18 +0800 Subject: [PATCH 7/9] Minor fixes --- .../placement_status.go | 8 +- .../placement_status_test.go | 608 +++++++++++++++++- 2 files changed, 601 insertions(+), 15 deletions(-) diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index 56543800c..477c03486 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -77,7 +77,7 @@ func calculateFailedToScheduleClusterCount(crp *fleetv1beta1.ClusterResourcePlac return failedToScheduleClusterCount } -// setFailedToScheduleResourcePlacementStatuses sets the resource placement statuses for the failed to schedule clusters. +// appendFailedToScheduleResourcePlacementStatuses sets the resource placement statuses for the failed to schedule clusters. func appendFailedToScheduleResourcePlacementStatuses( allRPS []fleetv1beta1.ResourcePlacementStatus, unselected []*fleetv1beta1.ClusterDecision, @@ -111,15 +111,15 @@ func appendFailedToScheduleResourcePlacementStatuses( func determineExpectedCRPAndResourcePlacementStatusCondType(crp *fleetv1beta1.ClusterResourcePlacement) []condition.ResourceCondition { switch { case crp.Spec.Strategy.ApplyStrategy == nil: - return condition.CondTypesForCSAAndSSAApplyStrategies + return condition.CondTypesForClientSideServerSideApplyStrategies case crp.Spec.Strategy.ApplyStrategy.Type == fleetv1beta1.ApplyStrategyTypeReportDiff: return condition.CondTypesForReportDiffApplyStrategy default: - return condition.CondTypesForCSAAndSSAApplyStrategies + return condition.CondTypesForClientSideServerSideApplyStrategies } } -// setScheduledResourcePlacementStatuses sets the resource placement statuses for the scheduled clusters. +// appendScheduledResourcePlacementStatuses sets the resource placement statuses for the scheduled clusters. func (r *Reconciler) appendScheduledResourcePlacementStatuses( ctx context.Context, allRPS []fleetv1beta1.ResourcePlacementStatus, diff --git a/pkg/controllers/clusterresourceplacement/placement_status_test.go b/pkg/controllers/clusterresourceplacement/placement_status_test.go index 3a264697a..1ec1ab2a7 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status_test.go +++ b/pkg/controllers/clusterresourceplacement/placement_status_test.go @@ -4372,6 +4372,592 @@ func TestSetPlacementStatus(t *testing.T) { }, }, }, + { + name: "Removing Applied/Available condition from status as apply strategy has changed", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeReportDiff, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionFalse, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplyFailedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingDiffReported), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: true, + crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + }, + { + name: "Removing DiffReported condition from status as apply strategy has changed", + policy: &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(1)), + }, + strategy: fleetv1beta1.RolloutStrategy{ + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + latestPolicySnapshot: &fleetv1beta1.ClusterSchedulingPolicySnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.PolicyIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.CRPTrackingLabel: testName, + }, + Annotations: map[string]string{ + fleetv1beta1.NumberOfClustersAnnotation: strconv.Itoa(1), + }, + Generation: 1, + }, + Status: fleetv1beta1.SchedulingPolicySnapshotStatus{ + ObservedCRPGeneration: crpGeneration, + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.PolicySnapshotScheduled), + Reason: "Scheduled", + Message: "message", + ObservedGeneration: 1, + }, + }, + ClusterDecisions: []fleetv1beta1.ClusterDecision{ + { + ClusterName: "member-1", + Selected: true, + Reason: "success", + }, + }, + }, + }, + latestResourceSnapshot: &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.CRPTrackingLabel: testName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hash", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + clusterResourceBindings: []fleetv1beta1.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-diff-reported-1", + Labels: map[string]string{ + fleetv1beta1.CRPTrackingLabel: testName, + }, + Generation: 1, + }, + Spec: fleetv1beta1.ResourceBindingSpec{ + ResourceSnapshotName: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testName, 0), + SchedulingPolicySnapshotName: fmt.Sprintf(fleetv1beta1.PolicySnapshotNameFmt, testName, 0), + TargetCluster: "member-1", + ApplyStrategy: &fleetv1beta1.ApplyStrategy{ + Type: fleetv1beta1.ApplyStrategyTypeServerSideApply, + }, + }, + Status: fleetv1beta1.ResourceBindingStatus{ + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingRolloutStarted), + Reason: condition.RolloutStartedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingOverridden), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingWorkSynchronized), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingApplied), + Reason: condition.ApplySucceededReason, + ObservedGeneration: 1, + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceBindingAvailable), + Reason: condition.AvailableReason, + ObservedGeneration: 1, + }, + }, + }, + }, + }, + want: true, + crpStatus: fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesDiffReportedConditionType), + Reason: condition.DiffReportedStatusTrueReason, + ObservedGeneration: crpGeneration - 1, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + wantStatus: &fleetv1beta1.ClusterResourcePlacementStatus{ + SelectedResources: selectedResources, + ObservedResourceIndex: "0", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementScheduledConditionType), + Reason: "Scheduled", + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ClusterResourcePlacementAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + PlacementStatuses: []fleetv1beta1.ResourcePlacementStatus{ + { + ClusterName: "member-1", + Conditions: []metav1.Condition{ + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceOverriddenConditionType), + Reason: condition.OverrideNotSpecifiedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceRolloutStartedConditionType), + Reason: condition.RolloutStartedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceScheduledConditionType), + Reason: condition.ScheduleSucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourceWorkSynchronizedConditionType), + Reason: condition.WorkSynchronizedReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAppliedConditionType), + Reason: condition.ApplySucceededReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + { + Status: metav1.ConditionTrue, + Type: string(fleetv1beta1.ResourcesAvailableConditionType), + Reason: condition.AvailableReason, + ObservedGeneration: crpGeneration, + LastTransitionTime: metav1.NewTime(currentTime), + }, + }, + }, + }, + }, + }, } for _, tc := range tests { @@ -4719,7 +5305,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "stale binding with false rollout started condition", @@ -4756,7 +5342,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "stale binding with true rollout started condition", @@ -4792,7 +5378,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "completed binding", @@ -4903,7 +5489,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "unknown rollout started condition", @@ -4940,7 +5526,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "false overridden condition", @@ -5011,7 +5597,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "unknown work created condition", @@ -5096,7 +5682,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "false applied condition", @@ -5225,7 +5811,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "false available condition", @@ -5367,7 +5953,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "drifts and configuration diffs", @@ -5570,7 +6156,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "always on drift detection", @@ -5708,7 +6294,7 @@ func TestSetResourcePlacementStatusPerCluster(t *testing.T) { }, }, }, - expectedCondTypes: condition.CondTypesForCSAAndSSAApplyStrategies, + expectedCondTypes: condition.CondTypesForClientSideServerSideApplyStrategies, }, { name: "ReportDiff apply strategy (diff reported)", From 88b2b714a13a8e9191ef521dcfe593781071d04d Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 23 Jan 2025 15:15:59 +0800 Subject: [PATCH 8/9] Adding a missing commit + some minor changes --- apis/placement/v1beta1/binding_types.go | 2 +- apis/placement/v1beta1/clusterresourceplacement_types.go | 4 ++-- .../clusterresourceplacement/placement_status.go | 6 ++++-- pkg/utils/condition/condition.go | 2 +- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/apis/placement/v1beta1/binding_types.go b/apis/placement/v1beta1/binding_types.go index 58f51e1b7..2fb12dbf4 100644 --- a/apis/placement/v1beta1/binding_types.go +++ b/apis/placement/v1beta1/binding_types.go @@ -182,7 +182,7 @@ const ( // * True: Fleet has successfully reported configuration differences for all resources. // * False: Fleet has not yet reported configuration differences for some resources, or an // error has occurred. - // * Unknown: The diff reporting has just started and its status is yet to be known. + // * Unknown: Fleet has not finished processing the diff reporting yet. ResourceBindingDiffReported ResourceBindingConditionType = "DiffReported" ) diff --git a/apis/placement/v1beta1/clusterresourceplacement_types.go b/apis/placement/v1beta1/clusterresourceplacement_types.go index e6cdb7054..dbc9e8e66 100644 --- a/apis/placement/v1beta1/clusterresourceplacement_types.go +++ b/apis/placement/v1beta1/clusterresourceplacement_types.go @@ -1151,7 +1151,7 @@ const ( // * True: Fleet has reported complete sets of configuration differences on all member clusters. // * False: Fleet has not yet reported complete sets of configuration differences on some member // clusters, or an error has occurred. - // * Unknown: The diff reporting has just started and its status is not yet to be known. + // * Unknown: Fleet has not finished processing the diff reporting yet. ClusterResourcePlacementDiffReportedConditionType ClusterResourcePlacementConditionType = "ClusterResourcePlacementDiffReported" ) @@ -1217,7 +1217,7 @@ const ( // * True: Fleet has reported the complete set of configuration differences on the member cluster. // * False: Fleet has not yet reported the complete set of configuration differences on the // member cluster, or an error has occurred. - // * Unknown: The diff reporting has just started and its status is yet to be known. + // * Unknown: Fleet has not finished processing the diff reporting yet. ResourcesDiffReportedConditionType ResourcePlacementConditionType = "DiffReported" ) diff --git a/pkg/controllers/clusterresourceplacement/placement_status.go b/pkg/controllers/clusterresourceplacement/placement_status.go index 477c03486..7b142665e 100644 --- a/pkg/controllers/clusterresourceplacement/placement_status.go +++ b/pkg/controllers/clusterresourceplacement/placement_status.go @@ -77,7 +77,8 @@ func calculateFailedToScheduleClusterCount(crp *fleetv1beta1.ClusterResourcePlac return failedToScheduleClusterCount } -// appendFailedToScheduleResourcePlacementStatuses sets the resource placement statuses for the failed to schedule clusters. +// appendFailedToScheduleResourcePlacementStatuses appends the resource placement statuses for +// the failed to schedule clusters to the list of all resource placement statuses. func appendFailedToScheduleResourcePlacementStatuses( allRPS []fleetv1beta1.ResourcePlacementStatus, unselected []*fleetv1beta1.ClusterDecision, @@ -119,7 +120,8 @@ func determineExpectedCRPAndResourcePlacementStatusCondType(crp *fleetv1beta1.Cl } } -// appendScheduledResourcePlacementStatuses sets the resource placement statuses for the scheduled clusters. +// appendScheduledResourcePlacementStatuses appends the resource placement statuses for the +// scheduled clusters to the list of all resource placement statuses. func (r *Reconciler) appendScheduledResourcePlacementStatuses( ctx context.Context, allRPS []fleetv1beta1.ResourcePlacementStatus, diff --git a/pkg/utils/condition/condition.go b/pkg/utils/condition/condition.go index 43da62ee6..6b7cb7369 100644 --- a/pkg/utils/condition/condition.go +++ b/pkg/utils/condition/condition.go @@ -275,7 +275,7 @@ const ( var ( // Different set of condition types that Fleet will populate in sequential order based on the // apply strategy in use. - CondTypesForCSAAndSSAApplyStrategies = []ResourceCondition{ + CondTypesForClientSideServerSideApplyStrategies = []ResourceCondition{ RolloutStartedCondition, OverriddenCondition, WorkSynchronizedCondition, From 5b18260616cf41c29f9432e46361762fb4f44fd9 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 23 Jan 2025 15:21:10 +0800 Subject: [PATCH 9/9] Use a diff word satisfy spelling check --- .../resourcechange/resourcechange_controller_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controllers/resourcechange/resourcechange_controller_test.go b/pkg/controllers/resourcechange/resourcechange_controller_test.go index 7c0528b14..da617566d 100644 --- a/pkg/controllers/resourcechange/resourcechange_controller_test.go +++ b/pkg/controllers/resourcechange/resourcechange_controller_test.go @@ -109,7 +109,7 @@ func TestFindPlacementsSelectedDeletedResV1Alpha1(t *testing.T) { Status: fleetv1alpha1.ClusterResourcePlacementStatus{ SelectedResources: []fleetv1alpha1.ResourceIdentifier{ { - Group: "abd", + Group: "xyz", Name: "not-deleted", Namespace: "bar", }, @@ -229,7 +229,7 @@ func TestFindPlacementsSelectedDeletedResV1Beta11(t *testing.T) { Status: placementv1beta1.ClusterResourcePlacementStatus{ SelectedResources: []placementv1beta1.ResourceIdentifier{ { - Group: "abd", + Group: "xyz", Name: "not-deleted", Namespace: "bar", },