From 08b66eeebf57f6f455717912eb972505c79235ae Mon Sep 17 00:00:00 2001 From: Wantong Date: Mon, 6 Jan 2025 09:46:17 -0800 Subject: [PATCH] upgrade clusterStagedUpdateRun APIs to v1beta1 (#1005) --- apis/placement/v1alpha1/common.go | 28 - apis/placement/v1beta1/commons.go | 25 + apis/placement/v1beta1/stageupdate_types.go | 469 +++++++ .../v1beta1/zz_generated.deepcopy.go | 438 ++++++ cmd/hubagent/workload/setup.go | 6 +- ...etes-fleet.io_clusterapprovalrequests.yaml | 131 ++ ...etes-fleet.io_clusterstagedupdateruns.yaml | 1194 +++++++++++++++++ ...leet.io_clusterstagedupdatestrategies.yaml | 139 ++ pkg/controllers/updaterun/controller.go | 37 +- .../updaterun/controller_integration_test.go | 96 +- pkg/controllers/updaterun/controller_test.go | 12 +- pkg/controllers/updaterun/execution.go | 93 +- .../updaterun/execution_integration_test.go | 96 +- pkg/controllers/updaterun/execution_test.go | 57 +- pkg/controllers/updaterun/initialization.go | 47 +- .../initialization_integration_test.go | 64 +- pkg/controllers/updaterun/suite_test.go | 4 +- pkg/controllers/updaterun/validation.go | 27 +- .../updaterun/validation_integration_test.go | 30 +- pkg/controllers/updaterun/validation_test.go | 109 +- 20 files changed, 2732 insertions(+), 370 deletions(-) create mode 100644 apis/placement/v1beta1/stageupdate_types.go diff --git a/apis/placement/v1alpha1/common.go b/apis/placement/v1alpha1/common.go index 0c69e50fd..91bb39652 100644 --- a/apis/placement/v1alpha1/common.go +++ b/apis/placement/v1alpha1/common.go @@ -15,34 +15,6 @@ const ( // ResourceOverrideSnapshotKind is the kind of the ResourceOverrideSnapshot. ResourceOverrideSnapshotKind = "ResourceOverrideSnapshot" - // ClusterStagedUpdateRunKind is the kind of the ClusterStagedUpdateRun. - ClusterStagedUpdateRunKind = "ClusterStagedUpdateRun" - - // ClusterStagedUpdateStrategyKind is the kind of the ClusterStagedUpdateStrategy. - ClusterStagedUpdateStrategyKind = "ClusterStagedUpdateStrategy" - - // ClusterApprovalRequestKind is the kind of the ClusterApprovalRequest. - ClusterApprovalRequestKind = "ClusterApprovalRequest" - - // ClusterStagedUpdateRunFinalizer is used by the ClusterStagedUpdateRun controller to make sure that the ClusterStagedUpdateRun - // object is not deleted until all its dependent resources are deleted. - ClusterStagedUpdateRunFinalizer = fleetPrefix + "stagedupdaterun-finalizer" - - // TargetUpdateRunLabel indicates the target update run on a staged run related object. - TargetUpdateRunLabel = fleetPrefix + "targetupdaterun" - - // UpdateRunDeleteStageName is the name of delete stage in the staged update run. - UpdateRunDeleteStageName = fleetPrefix + "deleteStage" - - // IsLatestUpdateRunApprovalLabel indicates if the approval is the latest approval on a staged run. - IsLatestUpdateRunApprovalLabel = fleetPrefix + "isLatestUpdateRunApproval" - - // TargetUpdatingStageNameLabel indicates the updating stage name on a staged run related object. - TargetUpdatingStageNameLabel = fleetPrefix + "targetUpdatingStage" - - // ApprovalTaskNameFmt is the format of the approval task name. - ApprovalTaskNameFmt = "%s-%s" - // OverrideClusterNameVariable is the reserved variable in the override value that will be replaced by the actual cluster name. OverrideClusterNameVariable = "${MEMBER-CLUSTER-NAME}" ) diff --git a/apis/placement/v1beta1/commons.go b/apis/placement/v1beta1/commons.go index a6c04b809..dcbbdee73 100644 --- a/apis/placement/v1beta1/commons.go +++ b/apis/placement/v1beta1/commons.go @@ -20,6 +20,12 @@ const ( WorkKind = "Work" // AppliedWorkKind represents the kind of AppliedWork. AppliedWorkKind = "AppliedWork" + // ClusterStagedUpdateRunKind is the kind of the ClusterStagedUpdateRun. + ClusterStagedUpdateRunKind = "ClusterStagedUpdateRun" + // ClusterStagedUpdateStrategyKind is the kind of the ClusterStagedUpdateStrategy. + ClusterStagedUpdateStrategyKind = "ClusterStagedUpdateStrategy" + // ClusterApprovalRequestKind is the kind of the ClusterApprovalRequest. + ClusterApprovalRequestKind = "ClusterApprovalRequest" ) const ( @@ -91,6 +97,25 @@ const ( // PreviousBindingStateAnnotation records the previous state of a binding. // This is used to remember if an "unscheduled" binding was moved from a "bound" state or a "scheduled" state. PreviousBindingStateAnnotation = fleetPrefix + "previous-binding-state" + + // ClusterStagedUpdateRunFinalizer is used by the ClusterStagedUpdateRun controller to make sure that the ClusterStagedUpdateRun + // object is not deleted until all its dependent resources are deleted. + ClusterStagedUpdateRunFinalizer = fleetPrefix + "stagedupdaterun-finalizer" + + // TargetUpdateRunLabel indicates the target update run on a staged run related object. + TargetUpdateRunLabel = fleetPrefix + "targetupdaterun" + + // UpdateRunDeleteStageName is the name of delete stage in the staged update run. + UpdateRunDeleteStageName = fleetPrefix + "deleteStage" + + // IsLatestUpdateRunApprovalLabel indicates if the approval is the latest approval on a staged run. + IsLatestUpdateRunApprovalLabel = fleetPrefix + "isLatestUpdateRunApproval" + + // TargetUpdatingStageNameLabel indicates the updating stage name on a staged run related object. + TargetUpdatingStageNameLabel = fleetPrefix + "targetUpdatingStage" + + // ApprovalTaskNameFmt is the format of the approval task name. + ApprovalTaskNameFmt = "%s-%s" ) // NamespacedName comprises a resource name, with a mandatory namespace. diff --git a/apis/placement/v1beta1/stageupdate_types.go b/apis/placement/v1beta1/stageupdate_types.go new file mode 100644 index 000000000..6b2362804 --- /dev/null +++ b/apis/placement/v1beta1/stageupdate_types.go @@ -0,0 +1,469 @@ +/* +Copyright (c) Microsoft Corporation. +Licensed under the MIT license. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:Cluster +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={fleet,fleet-placement},shortName=crsur +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// ClusterStagedUpdateRun represents a stage by stage update process that applies ClusterResourcePlacement +// selected resources to specified clusters. +// Resources from unselected clusters are removed after all stages in the update strategy are completed. +// Each ClusterStagedUpdateRun object corresponds to a single release of a specific resource version. +// The release is abandoned if the ClusterStagedUpdateRun object is deleted or the scheduling decision changes. +// The name of the ClusterStagedUpdateRun must conform to RFC 1123. +type ClusterStagedUpdateRun struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ClusterStagedUpdateRun. The spec is immutable. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="The spec field is immutable" + Spec StagedUpdateRunSpec `json:"spec"` + + // The observed status of ClusterStagedUpdateRun. + // +kubebuilder:validation:Optional + Status StagedUpdateRunStatus `json:"status,omitempty"` +} + +// StagedUpdateRunSpec defines the desired rollout strategy and the snapshot indices of the resources to be updated. +// It specifies a stage-by-stage update process across selected clusters for the given ResourcePlacement object. +type StagedUpdateRunSpec struct { + // PlacementName is the name of placement that this update run is applied to. + // There can be multiple active update runs for each placement, but + // it's up to the DevOps team to ensure they don't conflict with each other. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=255 + PlacementName string `json:"placementName"` + + // The resource snapshot index of the selected resources to be updated across clusters. + // The index represents a group of resource snapshots that includes all the resources a ResourcePlacement selected. + // +kubebuilder:validation:Required + ResourceSnapshotIndex string `json:"resourceSnapshotIndex"` + + // The name of the update strategy that specifies the stages and the sequence + // in which the selected resources will be updated on the member clusters. The stages + // are computed according to the referenced strategy when the update run starts + // and recorded in the status field. + // +kubebuilder:validation:Required + StagedUpdateStrategyName string `json:"stagedRolloutStrategyName"` +} + +// +genclient +// +genclient:cluster +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={fleet,fleet-placement},shortName=sus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// ClusterStagedUpdateStrategy defines a reusable strategy that specifies the stages and the sequence +// in which the selected cluster resources will be updated on the member clusters. +type ClusterStagedUpdateStrategy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ClusterStagedUpdateStrategy. + // +kubebuilder:validation:Required + Spec StagedUpdateStrategySpec `json:"spec"` +} + +// StagedUpdateStrategySpec defines the desired state of the StagedUpdateStrategy. +type StagedUpdateStrategySpec struct { + // Stage specifies the configuration for each update stage. + // +kubebuilder:validation:MaxItems=31 + // +kubebuilder:validation:Required + Stages []StageConfig `json:"stages"` +} + +// ClusterStagedUpdateStrategyList contains a list of StagedUpdateStrategy. +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterStagedUpdateStrategyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterStagedUpdateStrategy `json:"items"` +} + +// StageConfig describes a single update stage. +// The clusters in each stage are updated sequentially. +// The update stops if any of the updates fail. +type StageConfig struct { + // The name of the stage. This MUST be unique within the same StagedUpdateStrategy. + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern="[A-Za-z0-9]+$" + // +kubebuilder:validation:Required + Name string `json:"name"` + + // LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + // for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + // If the label selector is absent, the stage includes all the selected clusters. + // +kubebuilder:validation:Optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // The label key used to sort the selected clusters. + // The clusters within the stage are updated sequentially following the rule below: + // - primary: Ascending order based on the value of the label key, interpreted as integers if present. + // - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + // +kubebuilder:validation:Optional + SortingLabelKey *string `json:"sortingLabelKey,omitempty"` + + // The collection of tasks that each stage needs to complete successfully before moving to the next stage. + // Each task is executed in parallel and there cannot be more than one task of the same type. + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:Optional + AfterStageTasks []AfterStageTask `json:"afterStageTasks,omitempty"` +} + +// AfterStageTask is the collection of post-stage tasks that ALL need to be completed before moving to the next stage. +type AfterStageTask struct { + // The type of the after-stage task. + // +kubebuilder:validation:Enum=TimedWait;Approval + // +kubebuilder:validation:Required + Type AfterStageTaskType `json:"type"` + + // The time to wait after all the clusters in the current stage complete the update before moving to the next stage. + // +kubebuilder:default="1h" + // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(s|m|h))+$" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Optional + WaitTime metav1.Duration `json:"waitTime,omitempty"` +} + +// StagedUpdateRunStatus defines the observed state of the ClusterStagedUpdateRun. +type StagedUpdateRunStatus struct { + // PolicySnapShotIndexUsed records the policy snapshot index of the ClusterResourcePlacement (CRP) that + // the update run is based on. The index represents the latest policy snapshot at the start of the update run. + // If a newer policy snapshot is detected after the run starts, the staged update run is abandoned. + // The scheduler must identify all clusters that meet the current policy before the update run begins. + // All clusters involved in the update run are selected from the list of clusters scheduled by the CRP according + // to the current policy. + // +kubebuilder:validation:Optional + PolicySnapshotIndexUsed string `json:"policySnapshotIndexUsed,omitempty"` + + // PolicyObservedClusterCount records the number of observed clusters in the policy snapshot. + // It is recorded at the beginning of the update run from the policy snapshot object. + // If the `ObservedClusterCount` value is updated during the update run, the update run is abandoned. + // +kubebuilder:validation:Optional + PolicyObservedClusterCount int `json:"policyObservedClusterCount,omitempty"` + + // ApplyStrategy is the apply strategy that the stagedUpdateRun is using. + // It is the same as the apply strategy in the CRP when the staged update run starts. + // The apply strategy is not updated during the update run even if it changes in the CRP. + // +kubebuilder:validation:Optional + ApplyStrategy *ApplyStrategy `json:"appliedStrategy,omitempty"` + + // StagedUpdateStrategySnapshot is the snapshot of the StagedUpdateStrategy used for the update run. + // The snapshot is immutable during the update run. + // The strategy is applied to the list of clusters scheduled by the CRP according to the current policy. + // The update run fails to initialize if the strategy fails to produce a valid list of stages where each selected + // cluster is included in exactly one stage. + // +kubebuilder:validation:Optional + StagedUpdateStrategySnapshot *StagedUpdateStrategySpec `json:"stagedUpdateStrategySnapshot,omitempty"` + + // StagesStatus lists the current updating status of each stage. + // The list is empty if the update run is not started or failed to initialize. + // +kubebuilder:validation:Optional + StagesStatus []StageUpdatingStatus `json:"stagesStatus,omitempty"` + + // DeletionStageStatus lists the current status of the deletion stage. The deletion stage + // removes all the resources from the clusters that are not selected by the + // current policy after all the update stages are completed. + // +kubebuilder:validation:Optional + DeletionStageStatus *StageUpdatingStatus `json:"deletionStageStatus,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for StagedUpdateRun. + // Known conditions are "Initialized", "Progressing", "Succeeded". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// StagedUpdateRunConditionType identifies a specific condition of the StagedUpdateRun. +// +enum +type StagedUpdateRunConditionType string + +const ( + // StagedUpdateRunConditionInitialized indicates whether the staged update run is initialized, meaning it + // has computed all the stages according to the referenced strategy and is ready to start the update. + // Its condition status can be one of the following: + // - "True": The staged update run is initialized successfully. + // - "False": The staged update run encountered an error during initialization and aborted. + // - "Unknown": The staged update run initialization has started. + StagedUpdateRunConditionInitialized StagedUpdateRunConditionType = "Initialized" + + // StagedUpdateRunConditionProgressing indicates whether the staged update run is making progress. + // Its condition status can be one of the following: + // - "True": The staged update run is making progress. + // - "False": The staged update run is waiting/paused. + // - "Unknown" means it is unknown. + StagedUpdateRunConditionProgressing StagedUpdateRunConditionType = "Progressing" + + // StagedUpdateRunConditionSucceeded indicates whether the staged update run is completed successfully. + // Its condition status can be one of the following: + // - "True": The staged update run is completed successfully. + // - "False": The staged update run encountered an error and stopped. + StagedUpdateRunConditionSucceeded StagedUpdateRunConditionType = "Succeeded" +) + +// StageUpdatingStatus defines the status of the update run in a stage. +type StageUpdatingStatus struct { + // The name of the stage. + // +kubebuilder:validation:Required + StageName string `json:"stageName"` + + // The list of each cluster's updating status in this stage. + // +kubebuilder:validation:Required + Clusters []ClusterUpdatingStatus `json:"clusters"` + + // The status of the post-update tasks associated with the current stage. + // Empty if the stage has not finished updating all the clusters. + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:Optional + AfterStageTaskStatus []AfterStageTaskStatus `json:"afterStageTaskStatus,omitempty"` + + // The time when the update started on the stage. Empty if the stage has not started updating. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + StartTime *metav1.Time `json:"startTime,omitempty"` + + // The time when the update finished on the stage. Empty if the stage has not started updating. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + EndTime *metav1.Time `json:"endTime,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + // Known conditions are "Progressing", "Succeeded". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// StageUpdatingConditionType identifies a specific condition of the stage that is being updated. +// +enum +type StageUpdatingConditionType string + +const ( + // StageUpdatingConditionProgressing indicates whether the stage updating is making progress. + // Its condition status can be one of the following: + // - "True": The stage updating is making progress. + // - "False": The stage updating is waiting/pausing. + StageUpdatingConditionProgressing StageUpdatingConditionType = "Progressing" + + // StageUpdatingConditionSucceeded indicates whether the stage updating is completed successfully. + // Its condition status can be one of the following: + // - "True": The stage updating is completed successfully. + // - "False": The stage updating encountered an error and stopped. + StageUpdatingConditionSucceeded StageUpdatingConditionType = "Succeeded" +) + +// ClusterUpdatingStatus defines the status of the update run on a cluster. +type ClusterUpdatingStatus struct { + // The name of the cluster. + // +kubebuilder:validation:Required + ClusterName string `json:"clusterName"` + + // ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + // The list is computed at the beginning of the update run and not updated during the update run. + // The list is empty if there are no resource overrides associated with the cluster. + // +kubebuilder:validation:Optional + ResourceOverrideSnapshots []NamespacedName `json:"resourceOverrideSnapshots,omitempty"` + + // ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + // associated with the cluster. + // The list is computed at the beginning of the update run and not updated during the update run. + // The list is empty if there are no cluster overrides associated with the cluster. + // +kubebuilder:validation:Optional + ClusterResourceOverrideSnapshots []string `json:"clusterResourceOverrideSnapshots,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + // Known conditions are "Started", "Succeeded". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// ClusterUpdatingStatusConditionType identifies a specific condition of the UpdatingStatus of the cluster. +// +enum +type ClusterUpdatingStatusConditionType string + +const ( + // ClusterUpdatingConditionStarted indicates whether the cluster updating has started. + // Its condition status can be one of the following: + // - "True": The cluster updating has started. + ClusterUpdatingConditionStarted ClusterUpdatingStatusConditionType = "Started" + + // ClusterUpdatingConditionSucceeded indicates whether the cluster updating is completed successfully. + // Its condition status can be one of the following: + // - "True": The cluster updating is completed successfully. + // - "False": The cluster updating encountered an error and stopped. + ClusterUpdatingConditionSucceeded ClusterUpdatingStatusConditionType = "Succeeded" +) + +type AfterStageTaskStatus struct { + // The type of the post-update task. + // +kubebuilder:validation:Enum=TimedWait;Approval + // +kubebuilder:validation:Required + Type AfterStageTaskType `json:"type"` + + // The name of the approval request object that is created for this stage. + // Only valid if the AfterStageTaskType is Approval. + // +kubebuilder:validation:Optional + ApprovalRequestName string `json:"approvalRequestName,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for the specific type of post-update task. + // Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// AfterStageTaskType identifies a specific type of the AfterStageTask. +// +enum +type AfterStageTaskType string + +const ( + // AfterStageTaskTypeTimedWait indicates the post-stage task is a timed wait. + AfterStageTaskTypeTimedWait AfterStageTaskType = "TimedWait" + + // AfterStageTaskTypeApproval indicates the post-stage task is an approval. + AfterStageTaskTypeApproval AfterStageTaskType = "Approval" +) + +// AfterStageTaskConditionType identifies a specific condition of the AfterStageTask. +// +enum +type AfterStageTaskConditionType string + +const ( + // AfterStageTaskConditionApprovalRequestCreated indicates if the approval request has been created. + // Its condition status can be: + // - "True": The approval request has been created. + AfterStageTaskConditionApprovalRequestCreated AfterStageTaskConditionType = "ApprovalRequestCreated" + + // AfterStageTaskConditionApprovalRequestApproved indicates if the approval request has been approved. + // Its condition status can be: + // - "True": The approval request has been approved. + AfterStageTaskConditionApprovalRequestApproved AfterStageTaskConditionType = "ApprovalRequestApproved" + + // AfterStageTaskConditionWaitTimeElapsed indicates if the wait time after each stage has elapsed. + // If the status is "False", the condition message will include the remaining wait time. + // Its condition status can be: + // - "True": The wait time has elapsed. + // - "False": The wait time has not elapsed. + AfterStageTaskConditionWaitTimeElapsed AfterStageTaskConditionType = "WaitTimeElapsed" +) + +// ClusterStagedUpdateRunList contains a list of ClusterStagedUpdateRun. +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterStagedUpdateRunList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterStagedUpdateRun `json:"items"` +} + +// +genclient +// +genclient:Cluster +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={fleet,fleet-placement},shortName=careq +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:storageversion + +// ClusterApprovalRequest defines a request for user approval for cluster staged update run. +// The request object MUST have the following labels: +// - `TargetUpdateRun`: Points to the cluster staged update run that this approval request is for. +// - `TargetStage`: The name of the stage that this approval request is for. +// - `IsLatestUpdateRunApproval`: Indicates whether this approval request is the latest one related to this update run. +type ClusterApprovalRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ClusterApprovalRequest. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="The spec field is immutable" + // +kubebuilder:validation:Required + Spec ApprovalRequestSpec `json:"spec"` + + // The observed state of ClusterApprovalRequest. + // +kubebuilder:validation:Optional + Status ApprovalRequestStatus `json:"status,omitempty"` +} + +// ApprovalRequestSpec defines the desired state of the update run approval request. +// The entire spec is immutable. +type ApprovalRequestSpec struct { + // The name of the staged update run that this approval request is for. + // +kubebuilder:validation:Required + TargetUpdateRun string `json:"parentStageRollout"` + + // The name of the update stage that this approval request is for. + // +kubebuilder:validation:Required + TargetStage string `json:"targetStage"` +} + +// ApprovalRequestStatus defines the observed state of the ClusterApprovalRequest. +type ApprovalRequestStatus struct { + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for the specific type of post-update task. + // Known conditions are "Approved" and "ApprovalAccepted". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// ApprovalRequestConditionType identifies a specific condition of the ClusterApprovalRequest. +type ApprovalRequestConditionType string + +const ( + // ApprovalRequestConditionApproved indicates if the approval request was approved. + // Its condition status can be: + // - "True": The request is approved. + ApprovalRequestConditionApproved ApprovalRequestConditionType = "Approved" +) + +// ClusterApprovalRequestList contains a list of ClusterApprovalRequest. +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterApprovalRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterApprovalRequest `json:"items"` +} + +func init() { + SchemeBuilder.Register( + &ClusterStagedUpdateRun{}, &ClusterStagedUpdateRunList{}, &ClusterStagedUpdateStrategy{}, &ClusterStagedUpdateStrategyList{}, &ClusterApprovalRequest{}, &ClusterApprovalRequestList{}, + ) +} diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go index a3406caf3..392ab3571 100644 --- a/apis/placement/v1beta1/zz_generated.deepcopy.go +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -35,6 +35,44 @@ func (in *Affinity) DeepCopy() *Affinity { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AfterStageTask) DeepCopyInto(out *AfterStageTask) { + *out = *in + out.WaitTime = in.WaitTime +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AfterStageTask. +func (in *AfterStageTask) DeepCopy() *AfterStageTask { + if in == nil { + return nil + } + out := new(AfterStageTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AfterStageTaskStatus) DeepCopyInto(out *AfterStageTaskStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AfterStageTaskStatus. +func (in *AfterStageTaskStatus) DeepCopy() *AfterStageTaskStatus { + if in == nil { + return nil + } + out := new(AfterStageTaskStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AppliedResourceMeta) DeepCopyInto(out *AppliedResourceMeta) { *out = *in @@ -165,6 +203,43 @@ func (in *ApplyStrategy) DeepCopy() *ApplyStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApprovalRequestSpec) DeepCopyInto(out *ApprovalRequestSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApprovalRequestSpec. +func (in *ApprovalRequestSpec) DeepCopy() *ApprovalRequestSpec { + if in == nil { + return nil + } + out := new(ApprovalRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApprovalRequestStatus) DeepCopyInto(out *ApprovalRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApprovalRequestStatus. +func (in *ApprovalRequestStatus) DeepCopy() *ApprovalRequestStatus { + if in == nil { + return nil + } + out := new(ApprovalRequestStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterAffinity) DeepCopyInto(out *ClusterAffinity) { *out = *in @@ -192,6 +267,65 @@ func (in *ClusterAffinity) DeepCopy() *ClusterAffinity { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterApprovalRequest) DeepCopyInto(out *ClusterApprovalRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterApprovalRequest. +func (in *ClusterApprovalRequest) DeepCopy() *ClusterApprovalRequest { + if in == nil { + return nil + } + out := new(ClusterApprovalRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterApprovalRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterApprovalRequestList) DeepCopyInto(out *ClusterApprovalRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterApprovalRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterApprovalRequestList. +func (in *ClusterApprovalRequestList) DeepCopy() *ClusterApprovalRequestList { + if in == nil { + return nil + } + out := new(ClusterApprovalRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterApprovalRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDecision) DeepCopyInto(out *ClusterDecision) { *out = *in @@ -614,6 +748,155 @@ func (in *ClusterSelectorTerm) DeepCopy() *ClusterSelectorTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateRun) DeepCopyInto(out *ClusterStagedUpdateRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateRun. +func (in *ClusterStagedUpdateRun) DeepCopy() *ClusterStagedUpdateRun { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateRunList) DeepCopyInto(out *ClusterStagedUpdateRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterStagedUpdateRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateRunList. +func (in *ClusterStagedUpdateRunList) DeepCopy() *ClusterStagedUpdateRunList { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateStrategy) DeepCopyInto(out *ClusterStagedUpdateStrategy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateStrategy. +func (in *ClusterStagedUpdateStrategy) DeepCopy() *ClusterStagedUpdateStrategy { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateStrategy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateStrategyList) DeepCopyInto(out *ClusterStagedUpdateStrategyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterStagedUpdateStrategy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateStrategyList. +func (in *ClusterStagedUpdateStrategyList) DeepCopy() *ClusterStagedUpdateStrategyList { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateStrategyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateStrategyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUpdatingStatus) DeepCopyInto(out *ClusterUpdatingStatus) { + *out = *in + if in.ResourceOverrideSnapshots != nil { + in, out := &in.ResourceOverrideSnapshots, &out.ResourceOverrideSnapshots + *out = make([]NamespacedName, len(*in)) + copy(*out, *in) + } + if in.ClusterResourceOverrideSnapshots != nil { + in, out := &in.ClusterResourceOverrideSnapshots, &out.ClusterResourceOverrideSnapshots + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUpdatingStatus. +func (in *ClusterUpdatingStatus) DeepCopy() *ClusterUpdatingStatus { + if in == nil { + return nil + } + out := new(ClusterUpdatingStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiffDetails) DeepCopyInto(out *DiffDetails) { *out = *in @@ -1271,6 +1554,161 @@ func (in *ServerSideApplyConfig) DeepCopy() *ServerSideApplyConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageConfig) DeepCopyInto(out *StageConfig) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SortingLabelKey != nil { + in, out := &in.SortingLabelKey, &out.SortingLabelKey + *out = new(string) + **out = **in + } + if in.AfterStageTasks != nil { + in, out := &in.AfterStageTasks, &out.AfterStageTasks + *out = make([]AfterStageTask, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageConfig. +func (in *StageConfig) DeepCopy() *StageConfig { + if in == nil { + return nil + } + out := new(StageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageUpdatingStatus) DeepCopyInto(out *StageUpdatingStatus) { + *out = *in + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]ClusterUpdatingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AfterStageTaskStatus != nil { + in, out := &in.AfterStageTaskStatus, &out.AfterStageTaskStatus + *out = make([]AfterStageTaskStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageUpdatingStatus. +func (in *StageUpdatingStatus) DeepCopy() *StageUpdatingStatus { + if in == nil { + return nil + } + out := new(StageUpdatingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StagedUpdateRunSpec) DeepCopyInto(out *StagedUpdateRunSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StagedUpdateRunSpec. +func (in *StagedUpdateRunSpec) DeepCopy() *StagedUpdateRunSpec { + if in == nil { + return nil + } + out := new(StagedUpdateRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StagedUpdateRunStatus) DeepCopyInto(out *StagedUpdateRunStatus) { + *out = *in + if in.ApplyStrategy != nil { + in, out := &in.ApplyStrategy, &out.ApplyStrategy + *out = new(ApplyStrategy) + (*in).DeepCopyInto(*out) + } + if in.StagedUpdateStrategySnapshot != nil { + in, out := &in.StagedUpdateStrategySnapshot, &out.StagedUpdateStrategySnapshot + *out = new(StagedUpdateStrategySpec) + (*in).DeepCopyInto(*out) + } + if in.StagesStatus != nil { + in, out := &in.StagesStatus, &out.StagesStatus + *out = make([]StageUpdatingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionStageStatus != nil { + in, out := &in.DeletionStageStatus, &out.DeletionStageStatus + *out = new(StageUpdatingStatus) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StagedUpdateRunStatus. +func (in *StagedUpdateRunStatus) DeepCopy() *StagedUpdateRunStatus { + if in == nil { + return nil + } + out := new(StagedUpdateRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StagedUpdateStrategySpec) DeepCopyInto(out *StagedUpdateStrategySpec) { + *out = *in + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]StageConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StagedUpdateStrategySpec. +func (in *StagedUpdateStrategySpec) DeepCopy() *StagedUpdateStrategySpec { + if in == nil { + return nil + } + out := new(StagedUpdateStrategySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Toleration) DeepCopyInto(out *Toleration) { *out = *in diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 4b6e7d2a9..73c65696d 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -87,9 +87,9 @@ var ( } clusterStagedUpdateRunGVKs = []schema.GroupVersionKind{ - placementv1alpha1.GroupVersion.WithKind(placementv1alpha1.ClusterStagedUpdateRunKind), - placementv1alpha1.GroupVersion.WithKind(placementv1alpha1.ClusterStagedUpdateStrategyKind), - placementv1alpha1.GroupVersion.WithKind(placementv1alpha1.ClusterApprovalRequestKind), + placementv1alpha1.GroupVersion.WithKind(placementv1beta1.ClusterStagedUpdateRunKind), + placementv1alpha1.GroupVersion.WithKind(placementv1beta1.ClusterStagedUpdateStrategyKind), + placementv1alpha1.GroupVersion.WithKind(placementv1beta1.ClusterApprovalRequestKind), } clusterInventoryGVKs = []schema.GroupVersionKind{ diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml index 03d208d0a..b98c8f2e5 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml @@ -147,6 +147,137 @@ spec: - spec type: object served: true + storage: false + subresources: + status: {} + - name: v1beta1 + schema: + openAPIV3Schema: + description: |- + ClusterApprovalRequest defines a request for user approval for cluster staged update run. + The request object MUST have the following labels: + - `TargetUpdateRun`: Points to the cluster staged update run that this approval request is for. + - `TargetStage`: The name of the stage that this approval request is for. + - `IsLatestUpdateRunApproval`: Indicates whether this approval request is the latest one related to this update run. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ClusterApprovalRequest. + properties: + parentStageRollout: + description: The name of the staged update run that this approval + request is for. + type: string + targetStage: + description: The name of the update stage that this approval request + is for. + type: string + required: + - parentStageRollout + - targetStage + type: object + x-kubernetes-validations: + - message: The spec field is immutable + rule: self == oldSelf + status: + description: The observed state of ClusterApprovalRequest. + properties: + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of post-update task. + Known conditions are "Approved" and "ApprovalAccepted". + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml index f52084ea5..bc6b6bb44 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml @@ -1210,6 +1210,1200 @@ spec: - spec type: object served: true + storage: false + subresources: + status: {} + - name: v1beta1 + schema: + openAPIV3Schema: + description: |- + ClusterStagedUpdateRun represents a stage by stage update process that applies ClusterResourcePlacement + selected resources to specified clusters. + Resources from unselected clusters are removed after all stages in the update strategy are completed. + Each ClusterStagedUpdateRun object corresponds to a single release of a specific resource version. + The release is abandoned if the ClusterStagedUpdateRun object is deleted or the scheduling decision changes. + The name of the ClusterStagedUpdateRun must conform to RFC 1123. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ClusterStagedUpdateRun. The spec is + immutable. + properties: + placementName: + description: |- + PlacementName is the name of placement that this update run is applied to. + There can be multiple active update runs for each placement, but + it's up to the DevOps team to ensure they don't conflict with each other. + maxLength: 255 + type: string + resourceSnapshotIndex: + description: |- + The resource snapshot index of the selected resources to be updated across clusters. + The index represents a group of resource snapshots that includes all the resources a ResourcePlacement selected. + type: string + stagedRolloutStrategyName: + description: |- + The name of the update strategy that specifies the stages and the sequence + in which the selected resources will be updated on the member clusters. The stages + are computed according to the referenced strategy when the update run starts + and recorded in the status field. + type: string + required: + - placementName + - resourceSnapshotIndex + - stagedRolloutStrategyName + type: object + x-kubernetes-validations: + - message: The spec field is immutable + rule: self == oldSelf + status: + description: The observed status of ClusterStagedUpdateRun. + properties: + appliedStrategy: + description: |- + ApplyStrategy is the apply strategy that the stagedUpdateRun is using. + It is the same as the apply strategy in the CRP when the staged update run starts. + The apply strategy is not updated during the update run even if it changes in the CRP. + properties: + allowCoOwnership: + description: |- + AllowCoOwnership controls whether co-ownership between Fleet and other agents are allowed + on a Fleet-managed resource. If set to false, Fleet will refuse to apply manifests to + a resource that has been owned by one or more non-Fleet agents. + + + Note that Fleet does not support the case where one resource is being placed multiple + times by different CRPs on the same member cluster. An apply error will be returned if + Fleet finds that a resource has been owned by another placement attempt by Fleet, even + with the AllowCoOwnership setting set to true. + type: boolean + comparisonOption: + default: PartialComparison + description: |- + ComparisonOption controls how Fleet compares the desired state of a resource, as kept in + a hub cluster manifest, with the current state of the resource (if applicable) in the + member cluster. + + + Available options are: + + + * PartialComparison: with this option, Fleet will compare only fields that are managed by + Fleet, i.e., the fields that are specified explicitly in the hub cluster manifest. + Unmanaged fields are ignored. This is the default option. + + + * FullComparison: with this option, Fleet will compare all fields of the resource, + even if the fields are absent from the hub cluster manifest. + + + Consider using the PartialComparison option if you would like to: + + + * use the default values for certain fields; or + * let another agent, e.g., HPAs, VPAs, etc., on the member cluster side manage some fields; or + * allow ad-hoc or cluster-specific settings on the member cluster side. + + + To use the FullComparison option, it is recommended that you: + + + * specify all fields as appropriate in the hub cluster, even if you are OK with using default + values; + * make sure that no fields are managed by agents other than Fleet on the member cluster + side, such as HPAs, VPAs, or other controllers. + + + See the Fleet documentation for further explanations and usage examples. + enum: + - PartialComparison + - FullComparison + type: string + serverSideApplyConfig: + description: ServerSideApplyConfig defines the configuration for + server side apply. It is honored only when type is ServerSideApply. + properties: + force: + description: |- + Force represents to force apply to succeed when resolving the conflicts + For any conflicting fields, + - If true, use the values from the resource to be applied to overwrite the values of the existing resource in the + target cluster, as well as take over ownership of such fields. + - If false, apply will fail with the reason ApplyConflictWithOtherApplier. + + + For non-conflicting fields, values stay unchanged and ownership are shared between appliers. + type: boolean + type: object + type: + default: ClientSideApply + description: |- + Type is the apply strategy to use; it determines how Fleet applies manifests from the + hub cluster to a member cluster. + + + Available options are: + + + * ClientSideApply: Fleet uses three-way merge to apply manifests, similar to how kubectl + performs a client-side apply. This is the default option. + + + Note that this strategy requires that Fleet keep the last applied configuration in the + annotation of an applied resource. If the object gets so large that apply ops can no longer + be executed, Fleet will switch to server-side apply. + + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + + * ServerSideApply: Fleet uses server-side apply to apply manifests; Fleet itself will + become the field manager for specified fields in the manifests. Specify + ServerSideApplyConfig as appropriate if you would like Fleet to take over field + ownership upon conflicts. This is the recommended option for most scenarios; it might + help reduce object size and safely resolve conflicts between field values. For more + information, please refer to the Kubernetes documentation + (https://kubernetes.io/docs/reference/using-api/server-side-apply/#comparison-with-client-side-apply). + + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + + * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster + with its current state (if applicable) on the member cluster side, and report any + differences. No actual apply ops would be executed, and resources will be left alone as they + are on the member clusters. + + + If configuration differences are found on a resource, Fleet will consider this as an apply + error, which might block rollout depending on the specified rollout strategy. + + + Use ComparisonOption setting to control how the difference is calculated. + + + ClientSideApply and ServerSideApply apply strategies only work when Fleet can assume + ownership of a resource (e.g., the resource is created by Fleet, or Fleet has taken over + the resource). See the comments on the WhenToTakeOver field for more information. + ReportDiff apply strategy, however, will function regardless of Fleet's ownership + status. One may set up a CRP with the ReportDiff strategy and the Never takeover option, + and this will turn Fleet into a detection tool that reports only configuration differences + but do not touch any resources on the member cluster side. + + + For a comparison between the different strategies and usage examples, refer to the + Fleet documentation. + enum: + - ClientSideApply + - ServerSideApply + - ReportDiff + type: string + whenToApply: + default: Always + description: |- + WhenToApply controls when Fleet would apply the manifests on the hub cluster to the member + clusters. + + + Available options are: + + + * Always: with this option, Fleet will periodically apply hub cluster manifests + on the member cluster side; this will effectively overwrite any change in the fields + managed by Fleet (i.e., specified in the hub cluster manifest). This is the default + option. + + + Note that this option would revert any ad-hoc changes made on the member cluster side in + the managed fields; if you would like to make temporary edits on the member cluster side + in the managed fields, switch to IfNotDrifted option. Note that changes in unmanaged + fields will be left alone; if you use the FullDiff compare option, such changes will + be reported as drifts. + + + * IfNotDrifted: with this option, Fleet will stop applying hub cluster manifests on + clusters that have drifted from the desired state; apply ops would still continue on + the rest of the clusters. Drifts are calculated using the ComparisonOption, + as explained in the corresponding field. + + + Use this option if you would like Fleet to detect drifts in your multi-cluster setup. + A drift occurs when an agent makes an ad-hoc change on the member cluster side that + makes affected resources deviate from its desired state as kept in the hub cluster; + and this option grants you an opportunity to view the drift details and take actions + accordingly. The drift details will be reported in the CRP status. + + + To fix a drift, you may: + + + * revert the changes manually on the member cluster side + * update the hub cluster manifest; this will trigger Fleet to apply the latest revision + of the manifests, which will overwrite the drifted fields + (if they are managed by Fleet) + * switch to the Always option; this will trigger Fleet to apply the current revision + of the manifests, which will overwrite the drifted fields (if they are managed by Fleet). + * if applicable and necessary, delete the drifted resources on the member cluster side; Fleet + will attempt to re-create them using the hub cluster manifests + enum: + - Always + - IfNotDrifted + type: string + whenToTakeOver: + default: Always + description: |- + WhenToTakeOver determines the action to take when Fleet applies resources to a member + cluster for the first time and finds out that the resource already exists in the cluster. + + + This setting is most relevant in cases where you would like Fleet to manage pre-existing + resources on a member cluster. + + + Available options include: + + + * Always: with this action, Fleet will apply the hub cluster manifests to the member + clusters even if the affected resources already exist. This is the default action. + + + Note that this might lead to fields being overwritten on the member clusters, if they + are specified in the hub cluster manifests. + + + * IfNoDiff: with this action, Fleet will apply the hub cluster manifests to the member + clusters if (and only if) pre-existing resources look the same as the hub cluster manifests. + + + This is a safer option as pre-existing resources that are inconsistent with the hub cluster + manifests will not be overwritten; Fleet will ignore them until the inconsistencies + are resolved properly: any change you make to the hub cluster manifests would not be + applied, and if you delete the manifests or even the ClusterResourcePlacement itself + from the hub cluster, these pre-existing resources would not be taken away. + + + Fleet will check for inconsistencies in accordance with the ComparisonOption setting. See also + the comments on the ComparisonOption field for more information. + + + If a diff has been found in a field that is **managed** by Fleet (i.e., the field + **is specified ** in the hub cluster manifest), consider one of the following actions: + * set the field in the member cluster to be of the same value as that in the hub cluster + manifest. + * update the hub cluster manifest so that its field value matches with that in the member + cluster. + * switch to the Always action, which will allow Fleet to overwrite the field with the + value in the hub cluster manifest. + + + If a diff has been found in a field that is **not managed** by Fleet (i.e., the field + **is not specified** in the hub cluster manifest), consider one of the following actions: + * remove the field from the member cluster. + * update the hub cluster manifest so that the field is included in the hub cluster manifest. + + + If appropriate, you may also delete the object from the member cluster; Fleet will recreate + it using the hub cluster manifest. + + + * Never: with this action, Fleet will not apply a hub cluster manifest to the member + clusters if there is a corresponding pre-existing resource. However, if a manifest + has never been applied yet; or it has a corresponding resource which Fleet has assumed + ownership, apply op will still be executed. + + + This is the safest option; one will have to remove the pre-existing resources (so that + Fleet can re-create them) or switch to a different + WhenToTakeOver option before Fleet starts processing the corresponding hub cluster + manifests. + + + If you prefer Fleet stop processing all manifests, use this option along with the + ReportDiff apply strategy type. This setup would instruct Fleet to touch nothing + on the member cluster side but still report configuration differences between the + hub cluster and member clusters. Fleet will not give up ownership + that it has already assumed though. + enum: + - Always + - IfNoDiff + - Never + type: string + type: object + conditions: + description: |- + Conditions is an array of current observed conditions for StagedUpdateRun. + Known conditions are "Initialized", "Progressing", "Succeeded". + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + deletionStageStatus: + description: |- + DeletionStageStatus lists the current status of the deletion stage. The deletion stage + removes all the resources from the clusters that are not selected by the + current policy after all the update stages are completed. + properties: + afterStageTaskStatus: + description: |- + The status of the post-update tasks associated with the current stage. + Empty if the stage has not finished updating all the clusters. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of post-update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the post-update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 2 + type: array + clusters: + description: The list of each cluster's updating status in this + stage. + items: + description: ClusterUpdatingStatus defines the status of the + update run on a cluster. + properties: + clusterName: + description: The name of the cluster. + type: string + clusterResourceOverrideSnapshots: + description: |- + ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no cluster overrides associated with the cluster. + items: + type: string + type: array + conditions: + description: |- + Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + Known conditions are "Started", "Succeeded". + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + resourceOverrideSnapshots: + description: |- + ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no resource overrides associated with the cluster. + items: + description: NamespacedName comprises a resource name, + with a mandatory namespace. + properties: + name: + description: Name is the name of the namespaced scope + resource. + type: string + namespace: + description: Namespace is namespace of the namespaced + scope resource. + type: string + required: + - name + - namespace + type: object + type: array + required: + - clusterName + type: object + type: array + conditions: + description: |- + Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + Known conditions are "Progressing", "Succeeded". + items: + description: "Condition contains details for one aspect of the + current state of this API Resource.\n---\nThis struct is intended + for direct use as an array at the field path .status.conditions. + \ For example,\n\n\n\ttype FooStatus struct{\n\t // Represents + the observations of a foo's current state.\n\t // Known + .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\"\n\t // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t Conditions + []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" + patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endTime: + description: The time when the update finished on the stage. Empty + if the stage has not started updating. + format: date-time + type: string + stageName: + description: The name of the stage. + type: string + startTime: + description: The time when the update started on the stage. Empty + if the stage has not started updating. + format: date-time + type: string + required: + - clusters + - stageName + type: object + policyObservedClusterCount: + description: |- + PolicyObservedClusterCount records the number of observed clusters in the policy snapshot. + It is recorded at the beginning of the update run from the policy snapshot object. + If the `ObservedClusterCount` value is updated during the update run, the update run is abandoned. + type: integer + policySnapshotIndexUsed: + description: |- + PolicySnapShotIndexUsed records the policy snapshot index of the ClusterResourcePlacement (CRP) that + the update run is based on. The index represents the latest policy snapshot at the start of the update run. + If a newer policy snapshot is detected after the run starts, the staged update run is abandoned. + The scheduler must identify all clusters that meet the current policy before the update run begins. + All clusters involved in the update run are selected from the list of clusters scheduled by the CRP according + to the current policy. + type: string + stagedUpdateStrategySnapshot: + description: |- + StagedUpdateStrategySnapshot is the snapshot of the StagedUpdateStrategy used for the update run. + The snapshot is immutable during the update run. + The strategy is applied to the list of clusters scheduled by the CRP according to the current policy. + The update run fails to initialize if the strategy fails to produce a valid list of stages where each selected + cluster is included in exactly one stage. + properties: + stages: + description: Stage specifies the configuration for each update + stage. + items: + description: |- + StageConfig describes a single update stage. + The clusters in each stage are updated sequentially. + The update stops if any of the updates fail. + properties: + afterStageTasks: + description: |- + The collection of tasks that each stage needs to complete successfully before moving to the next stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: AfterStageTask is the collection of post-stage + tasks that ALL need to be completed before moving to + the next stage. + properties: + type: + description: The type of the after-stage task. + enum: + - TimedWait + - Approval + type: string + waitTime: + default: 1h + description: The time to wait after all the clusters + in the current stage complete the update before + moving to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 2 + type: array + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + If the label selector is absent, the stage includes all the selected clusters. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: The name of the stage. This MUST be unique + within the same StagedUpdateStrategy. + maxLength: 63 + pattern: '[A-Za-z0-9]+$' + type: string + sortingLabelKey: + description: |- + The label key used to sort the selected clusters. + The clusters within the stage are updated sequentially following the rule below: + - primary: Ascending order based on the value of the label key, interpreted as integers if present. + - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + type: string + required: + - name + type: object + maxItems: 31 + type: array + required: + - stages + type: object + stagesStatus: + description: |- + StagesStatus lists the current updating status of each stage. + The list is empty if the update run is not started or failed to initialize. + items: + description: StageUpdatingStatus defines the status of the update + run in a stage. + properties: + afterStageTaskStatus: + description: |- + The status of the post-update tasks associated with the current stage. + Empty if the stage has not finished updating all the clusters. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of post-update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the post-update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 2 + type: array + clusters: + description: The list of each cluster's updating status in this + stage. + items: + description: ClusterUpdatingStatus defines the status of the + update run on a cluster. + properties: + clusterName: + description: The name of the cluster. + type: string + clusterResourceOverrideSnapshots: + description: |- + ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no cluster overrides associated with the cluster. + items: + type: string + type: array + conditions: + description: |- + Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + Known conditions are "Started", "Succeeded". + items: + description: "Condition contains details for one aspect + of the current state of this API Resource.\n---\nThis + struct is intended for direct use as an array at the + field path .status.conditions. For example,\n\n\n\ttype + FooStatus struct{\n\t // Represents the observations + of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t + \ // +patchMergeKey=type\n\t // +patchStrategy=merge\n\t + \ // +listType=map\n\t // +listMapKey=type\n\t + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + resourceOverrideSnapshots: + description: |- + ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no resource overrides associated with the cluster. + items: + description: NamespacedName comprises a resource name, + with a mandatory namespace. + properties: + name: + description: Name is the name of the namespaced + scope resource. + type: string + namespace: + description: Namespace is namespace of the namespaced + scope resource. + type: string + required: + - name + - namespace + type: object + type: array + required: + - clusterName + type: object + type: array + conditions: + description: |- + Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + Known conditions are "Progressing", "Succeeded". + items: + description: "Condition contains details for one aspect of + the current state of this API Resource.\n---\nThis struct + is intended for direct use as an array at the field path + .status.conditions. For example,\n\n\n\ttype FooStatus + struct{\n\t // Represents the observations of a foo's + current state.\n\t // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // + +listType=map\n\t // +listMapKey=type\n\t Conditions + []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" + patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endTime: + description: The time when the update finished on the stage. + Empty if the stage has not started updating. + format: date-time + type: string + stageName: + description: The name of the stage. + type: string + startTime: + description: The time when the update started on the stage. + Empty if the stage has not started updating. + format: date-time + type: string + required: + - clusters + - stageName + type: object + type: array + type: object + required: + - spec + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml index 673ce5ce5..a47543fa4 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml @@ -155,6 +155,145 @@ spec: - spec type: object served: true + storage: false + subresources: + status: {} + - name: v1beta1 + schema: + openAPIV3Schema: + description: |- + ClusterStagedUpdateStrategy defines a reusable strategy that specifies the stages and the sequence + in which the selected cluster resources will be updated on the member clusters. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ClusterStagedUpdateStrategy. + properties: + stages: + description: Stage specifies the configuration for each update stage. + items: + description: |- + StageConfig describes a single update stage. + The clusters in each stage are updated sequentially. + The update stops if any of the updates fail. + properties: + afterStageTasks: + description: |- + The collection of tasks that each stage needs to complete successfully before moving to the next stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: AfterStageTask is the collection of post-stage + tasks that ALL need to be completed before moving to the + next stage. + properties: + type: + description: The type of the after-stage task. + enum: + - TimedWait + - Approval + type: string + waitTime: + default: 1h + description: The time to wait after all the clusters in + the current stage complete the update before moving + to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 2 + type: array + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + If the label selector is absent, the stage includes all the selected clusters. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: The name of the stage. This MUST be unique within + the same StagedUpdateStrategy. + maxLength: 63 + pattern: '[A-Za-z0-9]+$' + type: string + sortingLabelKey: + description: |- + The label key used to sort the selected clusters. + The clusters within the stage are updated sequentially following the rule below: + - primary: Ascending order based on the value of the label key, interpreted as integers if present. + - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + type: string + required: + - name + type: object + maxItems: 31 + type: array + required: + - stages + type: object + required: + - spec + type: object + served: true storage: true subresources: status: {} diff --git a/pkg/controllers/updaterun/controller.go b/pkg/controllers/updaterun/controller.go index 10a993bf4..1fdaab22e 100644 --- a/pkg/controllers/updaterun/controller.go +++ b/pkg/controllers/updaterun/controller.go @@ -27,7 +27,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/condition" @@ -59,7 +58,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim klog.V(2).InfoS("ClusterStagedUpdateRun reconciliation ends", "clusterStagedUpdateRun", req.NamespacedName, "latency", latency) }() - var updateRun placementv1alpha1.ClusterStagedUpdateRun + var updateRun placementv1beta1.ClusterStagedUpdateRun if err := r.Client.Get(ctx, req.NamespacedName, &updateRun); err != nil { klog.ErrorS(err, "Failed to get clusterStagedUpdateRun object", "clusterStagedUpdateRun", req.Name) return runtime.Result{}, client.IgnoreNotFound(err) @@ -88,7 +87,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim var updatingStageIndex int var toBeUpdatedBindings, toBeDeletedBindings []*placementv1beta1.ClusterResourceBinding var err error - initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1alpha1.StagedUpdateRunConditionInitialized)) + initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionInitialized)) if !condition.IsConditionStatusTrue(initCond, updateRun.Generation) { if condition.IsConditionStatusFalse(initCond, updateRun.Generation) { klog.V(2).InfoS("The clusterStagedUpdateRun has failed to initialize", "errorMsg", initCond.Message, "clusterStagedUpdateRun", runObjRef) @@ -107,7 +106,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim } else { klog.V(2).InfoS("The clusterStagedUpdateRun is initialized", "clusterStagedUpdateRun", runObjRef) // Check if the clusterStagedUpdateRun is finished. - finishedCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1alpha1.StagedUpdateRunConditionSucceeded)) + finishedCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionSucceeded)) if condition.IsConditionStatusTrue(finishedCond, updateRun.Generation) || condition.IsConditionStatusFalse(finishedCond, updateRun.Generation) { klog.V(2).InfoS("The clusterStagedUpdateRun is finished", "finishedSuccessfully", finishedCond.Status, "clusterStagedUpdateRun", runObjRef) return runtime.Result{}, nil @@ -157,16 +156,16 @@ func (r *Reconciler) Reconcile(ctx context.Context, req runtime.Request) (runtim // handleDelete handles the deletion of the clusterStagedUpdateRun object. // We delete all the dependent resources, including approvalRequest objects, of the clusterStagedUpdateRun object. -func (r *Reconciler) handleDelete(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun) (bool, time.Duration, error) { +func (r *Reconciler) handleDelete(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun) (bool, time.Duration, error) { runObjRef := klog.KObj(updateRun) // delete all the associated approvalRequests. - approvalRequest := &placementv1alpha1.ClusterApprovalRequest{} - if err := r.Client.DeleteAllOf(ctx, approvalRequest, client.MatchingLabels{placementv1alpha1.TargetUpdateRunLabel: updateRun.GetName()}); err != nil { + approvalRequest := &placementv1beta1.ClusterApprovalRequest{} + if err := r.Client.DeleteAllOf(ctx, approvalRequest, client.MatchingLabels{placementv1beta1.TargetUpdateRunLabel: updateRun.GetName()}); err != nil { klog.ErrorS(err, "Failed to delete all associated approvalRequests", "clusterStagedUpdateRun", runObjRef) return false, 0, controller.NewAPIServerError(false, err) } klog.V(2).InfoS("Deleted all approvalRequests associated with the clusterStagedUpdateRun", "clusterStagedUpdateRun", runObjRef) - controllerutil.RemoveFinalizer(updateRun, placementv1alpha1.ClusterStagedUpdateRunFinalizer) + controllerutil.RemoveFinalizer(updateRun, placementv1beta1.ClusterStagedUpdateRunFinalizer) if err := r.Client.Update(ctx, updateRun); err != nil { klog.ErrorS(err, "Failed to remove updateRun finalizer", "clusterStagedUpdateRun", runObjRef) return false, 0, controller.NewUpdateIgnoreConflictError(err) @@ -175,19 +174,19 @@ func (r *Reconciler) handleDelete(ctx context.Context, updateRun *placementv1alp } // ensureFinalizer makes sure that the ClusterStagedUpdateRun CR has a finalizer on it. -func (r *Reconciler) ensureFinalizer(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun) error { - if controllerutil.ContainsFinalizer(updateRun, placementv1alpha1.ClusterStagedUpdateRunFinalizer) { +func (r *Reconciler) ensureFinalizer(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun) error { + if controllerutil.ContainsFinalizer(updateRun, placementv1beta1.ClusterStagedUpdateRunFinalizer) { return nil } klog.InfoS("Added the staged update run finalizer", "stagedUpdateRun", klog.KObj(updateRun)) - controllerutil.AddFinalizer(updateRun, placementv1alpha1.ClusterStagedUpdateRunFinalizer) + controllerutil.AddFinalizer(updateRun, placementv1beta1.ClusterStagedUpdateRunFinalizer) return r.Update(ctx, updateRun, client.FieldOwner(utils.UpdateRunControllerFieldManagerName)) } // recordUpdateRunSucceeded records the succeeded condition in the ClusterStagedUpdateRun status. -func (r *Reconciler) recordUpdateRunSucceeded(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun) error { +func (r *Reconciler) recordUpdateRunSucceeded(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun) error { meta.SetStatusCondition(&updateRun.Status.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StagedUpdateRunConditionSucceeded), + Type: string(placementv1beta1.StagedUpdateRunConditionSucceeded), Status: metav1.ConditionTrue, ObservedGeneration: updateRun.Generation, Reason: condition.UpdateRunSucceededReason, @@ -201,9 +200,9 @@ func (r *Reconciler) recordUpdateRunSucceeded(ctx context.Context, updateRun *pl } // recordUpdateRunFailed records the failed condition in the ClusterStagedUpdateRun status. -func (r *Reconciler) recordUpdateRunFailed(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun, message string) error { +func (r *Reconciler) recordUpdateRunFailed(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun, message string) error { meta.SetStatusCondition(&updateRun.Status.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StagedUpdateRunConditionSucceeded), + Type: string(placementv1beta1.StagedUpdateRunConditionSucceeded), Status: metav1.ConditionFalse, ObservedGeneration: updateRun.Generation, Reason: condition.UpdateRunFailedReason, @@ -218,7 +217,7 @@ func (r *Reconciler) recordUpdateRunFailed(ctx context.Context, updateRun *place } // recordUpdateRunStatus records the ClusterStagedUpdateRun status. -func (r *Reconciler) recordUpdateRunStatus(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun) error { +func (r *Reconciler) recordUpdateRunStatus(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun) error { if updateErr := r.Client.Status().Update(ctx, updateRun); updateErr != nil { klog.ErrorS(updateErr, "Failed to update the ClusterStagedUpdateRun status", "clusterStagedUpdateRun", klog.KObj(updateRun)) return controller.NewUpdateIgnoreConflictError(updateErr) @@ -231,8 +230,8 @@ func (r *Reconciler) SetupWithManager(mgr runtime.Manager) error { r.recorder = mgr.GetEventRecorderFor("clusterresource-stagedupdaterun-controller") return runtime.NewControllerManagedBy(mgr). Named("clusterresource-stagedupdaterun-controller"). - For(&placementv1alpha1.ClusterStagedUpdateRun{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). - Watches(&placementv1alpha1.ClusterApprovalRequest{}, &handler.Funcs{ + For(&placementv1beta1.ClusterStagedUpdateRun{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches(&placementv1beta1.ClusterApprovalRequest{}, &handler.Funcs{ // We only care about when an approval request is approved. UpdateFunc: func(ctx context.Context, e event.UpdateEvent, q workqueue.RateLimitingInterface) { klog.V(2).InfoS("Handling a clusterApprovalRequest update event", "clusterApprovalRequest", klog.KObj(e.ObjectNew)) @@ -248,7 +247,7 @@ func (r *Reconciler) SetupWithManager(mgr runtime.Manager) error { // handleClusterApprovalRequest finds the ClusterStagedUpdateRun creating the ClusterApprovalRequest, // and enqueues it to the ClusterStagedUpdateRun controller queue. func handleClusterApprovalRequest(obj client.Object, q workqueue.RateLimitingInterface) { - approvalRequest, ok := obj.(*placementv1alpha1.ClusterApprovalRequest) + approvalRequest, ok := obj.(*placementv1beta1.ClusterApprovalRequest) if !ok { klog.V(2).ErrorS(controller.NewUnexpectedBehaviorError(fmt.Errorf("cannot cast runtime object to ClusterApprovalRequest")), "Invalid object type", "object", klog.KObj(obj)) diff --git a/pkg/controllers/updaterun/controller_integration_test.go b/pkg/controllers/updaterun/controller_integration_test.go index d22077d85..3b17d9b28 100644 --- a/pkg/controllers/updaterun/controller_integration_test.go +++ b/pkg/controllers/updaterun/controller_integration_test.go @@ -110,8 +110,8 @@ var _ = Describe("Test the clusterStagedUpdateRun controller", func() { validateUpdateRunHasFinalizer(ctx, updateRun) By("Updating the clusterStagedUpdateRun to failed") - startedcond := generateTrueCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionProgressing) - finishedcond := generateFalseCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionSucceeded) + startedcond := generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing) + finishedcond := generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded) meta.SetStatusCondition(&updateRun.Status.Conditions, startedcond) meta.SetStatusCondition(&updateRun.Status.Conditions, finishedcond) Expect(k8sClient.Status().Update(ctx, updateRun)).Should(Succeed(), "failed to update the clusterStagedUpdateRun") @@ -139,7 +139,7 @@ var _ = Describe("Test the clusterStagedUpdateRun controller", func() { validateUpdateRunHasFinalizer(ctx, updateRun) By("Updating the clusterStagedUpdateRun status to processing") - startedcond := generateTrueCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionProgressing) + startedcond := generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing) meta.SetStatusCondition(&updateRun.Status.Conditions, startedcond) Expect(k8sClient.Status().Update(ctx, updateRun)).Should(Succeed(), "failed to add condition to the clusterStagedUpdateRun") @@ -163,12 +163,12 @@ var _ = Describe("Test the clusterStagedUpdateRun controller", func() { Expect(k8sClient.Create(ctx, updateRun)).Should(Succeed()) By("Creating ClusterApprovalRequests") - approvalRequests := []*placementv1alpha1.ClusterApprovalRequest{ + approvalRequests := []*placementv1beta1.ClusterApprovalRequest{ { ObjectMeta: metav1.ObjectMeta{ Name: "req1", Labels: map[string]string{ - placementv1alpha1.TargetUpdateRunLabel: testUpdateRunName, + placementv1beta1.TargetUpdateRunLabel: testUpdateRunName, }, }, }, @@ -176,7 +176,7 @@ var _ = Describe("Test the clusterStagedUpdateRun controller", func() { ObjectMeta: metav1.ObjectMeta{ Name: "req2", Labels: map[string]string{ - placementv1alpha1.TargetUpdateRunLabel: testUpdateRunName, + placementv1beta1.TargetUpdateRunLabel: testUpdateRunName, }, }, }, @@ -184,7 +184,7 @@ var _ = Describe("Test the clusterStagedUpdateRun controller", func() { ObjectMeta: metav1.ObjectMeta{ Name: "req3", Labels: map[string]string{ - placementv1alpha1.TargetUpdateRunLabel: testUpdateRunName + "1", // different update run + placementv1beta1.TargetUpdateRunLabel: testUpdateRunName + "1", // different update run }, }, }, @@ -209,12 +209,12 @@ var _ = Describe("Test the clusterStagedUpdateRun controller", func() { }) }) -func generateTestClusterStagedUpdateRun() *placementv1alpha1.ClusterStagedUpdateRun { - return &placementv1alpha1.ClusterStagedUpdateRun{ +func generateTestClusterStagedUpdateRun() *placementv1beta1.ClusterStagedUpdateRun { + return &placementv1beta1.ClusterStagedUpdateRun{ ObjectMeta: metav1.ObjectMeta{ Name: testUpdateRunName, }, - Spec: placementv1alpha1.StagedUpdateRunSpec{ + Spec: placementv1beta1.StagedUpdateRunSpec{ PlacementName: testCRPName, ResourceSnapshotIndex: testResourceSnapshotName, StagedUpdateStrategyName: testUpdateStrategyName, @@ -307,14 +307,14 @@ func generateTestMemberCluster(idx int, clusterName string, labels map[string]st } } -func generateTestClusterStagedUpdateStrategy() *placementv1alpha1.ClusterStagedUpdateStrategy { +func generateTestClusterStagedUpdateStrategy() *placementv1beta1.ClusterStagedUpdateStrategy { sortingKey := "index" - return &placementv1alpha1.ClusterStagedUpdateStrategy{ + return &placementv1beta1.ClusterStagedUpdateStrategy{ ObjectMeta: metav1.ObjectMeta{ Name: testUpdateStrategyName, }, - Spec: placementv1alpha1.StagedUpdateStrategySpec{ - Stages: []placementv1alpha1.StageConfig{ + Spec: placementv1beta1.StagedUpdateStrategySpec{ + Stages: []placementv1beta1.StageConfig{ { Name: "stage1", LabelSelector: &metav1.LabelSelector{ @@ -324,9 +324,9 @@ func generateTestClusterStagedUpdateStrategy() *placementv1alpha1.ClusterStagedU }, }, SortingLabelKey: &sortingKey, - AfterStageTasks: []placementv1alpha1.AfterStageTask{ + AfterStageTasks: []placementv1beta1.AfterStageTask{ { - Type: placementv1alpha1.AfterStageTaskTypeTimedWait, + Type: placementv1beta1.AfterStageTaskTypeTimedWait, WaitTime: metav1.Duration{ Duration: time.Second * 4, }, @@ -342,9 +342,9 @@ func generateTestClusterStagedUpdateStrategy() *placementv1alpha1.ClusterStagedU }, }, // no sortingLabelKey, should sort by cluster name - AfterStageTasks: []placementv1alpha1.AfterStageTask{ + AfterStageTasks: []placementv1beta1.AfterStageTask{ { - Type: placementv1alpha1.AfterStageTaskTypeApproval, + Type: placementv1beta1.AfterStageTaskTypeApproval, }, }, }, @@ -426,24 +426,24 @@ func generateTestClusterResourceOverride() *placementv1alpha1.ClusterResourceOve } } -func generateTestApprovalRequest(name string) *placementv1alpha1.ClusterApprovalRequest { - return &placementv1alpha1.ClusterApprovalRequest{ +func generateTestApprovalRequest(name string) *placementv1beta1.ClusterApprovalRequest { + return &placementv1beta1.ClusterApprovalRequest{ ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: map[string]string{ - placementv1alpha1.TargetUpdateRunLabel: testUpdateRunName, + placementv1beta1.TargetUpdateRunLabel: testUpdateRunName, }, }, } } -func validateUpdateRunHasFinalizer(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun) { +func validateUpdateRunHasFinalizer(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun) { namespacedName := types.NamespacedName{Name: updateRun.Name} Eventually(func() error { if err := k8sClient.Get(ctx, namespacedName, updateRun); err != nil { return fmt.Errorf("failed to get clusterStagedUpdateRun %s: %w", namespacedName, err) } - if !controllerutil.ContainsFinalizer(updateRun, placementv1alpha1.ClusterStagedUpdateRunFinalizer) { + if !controllerutil.ContainsFinalizer(updateRun, placementv1beta1.ClusterStagedUpdateRunFinalizer) { return fmt.Errorf("finalizer not added to clusterStagedUpdateRun %s", namespacedName) } return nil @@ -452,7 +452,7 @@ func validateUpdateRunHasFinalizer(ctx context.Context, updateRun *placementv1al func validateUpdateRunIsDeleted(ctx context.Context, name types.NamespacedName) { Eventually(func() error { - updateRun := &placementv1alpha1.ClusterStagedUpdateRun{} + updateRun := &placementv1beta1.ClusterStagedUpdateRun{} if err := k8sClient.Get(ctx, name, updateRun); !errors.IsNotFound(err) { return fmt.Errorf("clusterStagedUpdateRun %s still exists or an unexpected error occurred: %w", name, err) } @@ -462,7 +462,7 @@ func validateUpdateRunIsDeleted(ctx context.Context, name types.NamespacedName) func validateApprovalRequestCount(ctx context.Context, count int) { Eventually(func() (int, error) { - appReqList := &placementv1alpha1.ClusterApprovalRequestList{} + appReqList := &placementv1beta1.ClusterApprovalRequestList{} if err := k8sClient.List(ctx, appReqList); err != nil { return -1, err } @@ -473,45 +473,45 @@ func validateApprovalRequestCount(ctx context.Context, count int) { func generateTrueCondition(obj client.Object, condType any) metav1.Condition { reason, typeStr := "", "" switch cond := condType.(type) { - case placementv1alpha1.StagedUpdateRunConditionType: + case placementv1beta1.StagedUpdateRunConditionType: switch cond { - case placementv1alpha1.StagedUpdateRunConditionInitialized: + case placementv1beta1.StagedUpdateRunConditionInitialized: reason = condition.UpdateRunInitializeSucceededReason - case placementv1alpha1.StagedUpdateRunConditionProgressing: + case placementv1beta1.StagedUpdateRunConditionProgressing: reason = condition.UpdateRunStartedReason - case placementv1alpha1.StagedUpdateRunConditionSucceeded: + case placementv1beta1.StagedUpdateRunConditionSucceeded: reason = condition.UpdateRunSucceededReason } typeStr = string(cond) - case placementv1alpha1.StageUpdatingConditionType: + case placementv1beta1.StageUpdatingConditionType: switch cond { - case placementv1alpha1.StageUpdatingConditionProgressing: + case placementv1beta1.StageUpdatingConditionProgressing: reason = condition.StageUpdatingStartedReason - case placementv1alpha1.StageUpdatingConditionSucceeded: + case placementv1beta1.StageUpdatingConditionSucceeded: reason = condition.StageUpdatingSucceededReason } typeStr = string(cond) - case placementv1alpha1.ClusterUpdatingStatusConditionType: + case placementv1beta1.ClusterUpdatingStatusConditionType: switch cond { - case placementv1alpha1.ClusterUpdatingConditionStarted: + case placementv1beta1.ClusterUpdatingConditionStarted: reason = condition.ClusterUpdatingStartedReason - case placementv1alpha1.ClusterUpdatingConditionSucceeded: + case placementv1beta1.ClusterUpdatingConditionSucceeded: reason = condition.ClusterUpdatingSucceededReason } typeStr = string(cond) - case placementv1alpha1.AfterStageTaskConditionType: + case placementv1beta1.AfterStageTaskConditionType: switch cond { - case placementv1alpha1.AfterStageTaskConditionWaitTimeElapsed: + case placementv1beta1.AfterStageTaskConditionWaitTimeElapsed: reason = condition.AfterStageTaskWaitTimeElapsedReason - case placementv1alpha1.AfterStageTaskConditionApprovalRequestCreated: + case placementv1beta1.AfterStageTaskConditionApprovalRequestCreated: reason = condition.AfterStageTaskApprovalRequestCreatedReason - case placementv1alpha1.AfterStageTaskConditionApprovalRequestApproved: + case placementv1beta1.AfterStageTaskConditionApprovalRequestApproved: reason = condition.AfterStageTaskApprovalRequestApprovedReason } typeStr = string(cond) - case placementv1alpha1.ApprovalRequestConditionType: + case placementv1beta1.ApprovalRequestConditionType: switch cond { - case placementv1alpha1.ApprovalRequestConditionApproved: + case placementv1beta1.ApprovalRequestConditionApproved: reason = "LGTM" } typeStr = string(cond) @@ -533,23 +533,23 @@ func generateTrueCondition(obj client.Object, condType any) metav1.Condition { func generateFalseCondition(obj client.Object, condType any) metav1.Condition { reason, typeStr := "", "" switch cond := condType.(type) { - case placementv1alpha1.StagedUpdateRunConditionType: + case placementv1beta1.StagedUpdateRunConditionType: switch cond { - case placementv1alpha1.StagedUpdateRunConditionInitialized: + case placementv1beta1.StagedUpdateRunConditionInitialized: reason = condition.UpdateRunInitializeFailedReason - case placementv1alpha1.StagedUpdateRunConditionSucceeded: + case placementv1beta1.StagedUpdateRunConditionSucceeded: reason = condition.UpdateRunFailedReason } typeStr = string(cond) - case placementv1alpha1.StageUpdatingConditionType: + case placementv1beta1.StageUpdatingConditionType: switch cond { - case placementv1alpha1.StageUpdatingConditionSucceeded: + case placementv1beta1.StageUpdatingConditionSucceeded: reason = condition.StageUpdatingFailedReason } typeStr = string(cond) - case placementv1alpha1.ClusterUpdatingStatusConditionType: + case placementv1beta1.ClusterUpdatingStatusConditionType: switch cond { - case placementv1alpha1.ClusterUpdatingConditionSucceeded: + case placementv1beta1.ClusterUpdatingConditionSucceeded: reason = condition.ClusterUpdatingFailedReason } typeStr = string(cond) diff --git a/pkg/controllers/updaterun/controller_test.go b/pkg/controllers/updaterun/controller_test.go index 2fd1b3032..7effec96b 100644 --- a/pkg/controllers/updaterun/controller_test.go +++ b/pkg/controllers/updaterun/controller_test.go @@ -13,7 +13,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllertest" "sigs.k8s.io/controller-runtime/pkg/reconcile" - placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" ) func TestHandleClusterApprovalRequest(t *testing.T) { @@ -23,20 +23,20 @@ func TestHandleClusterApprovalRequest(t *testing.T) { queuedName string }{ "it should not enqueue anything if the obj is not a ClusterApprovalRequest": { - obj: &placementv1alpha1.ClusterStagedUpdateRun{}, + obj: &placementv1beta1.ClusterStagedUpdateRun{}, shouldEnqueue: false, }, "it should not enqueue anything if targetUpdateRun in spec is empty": { - obj: &placementv1alpha1.ClusterApprovalRequest{ - Spec: placementv1alpha1.ApprovalRequestSpec{ + obj: &placementv1beta1.ClusterApprovalRequest{ + Spec: placementv1beta1.ApprovalRequestSpec{ TargetUpdateRun: "", }, }, shouldEnqueue: false, }, "it should enqueue the targetUpdateRun if it is not empty": { - obj: &placementv1alpha1.ClusterApprovalRequest{ - Spec: placementv1alpha1.ApprovalRequestSpec{ + obj: &placementv1beta1.ClusterApprovalRequest{ + Spec: placementv1beta1.ApprovalRequestSpec{ TargetUpdateRun: "test", }, }, diff --git a/pkg/controllers/updaterun/execution.go b/pkg/controllers/updaterun/execution.go index 544bdf180..9261002da 100644 --- a/pkg/controllers/updaterun/execution.go +++ b/pkg/controllers/updaterun/execution.go @@ -18,7 +18,6 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" - placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/pkg/utils/controller" @@ -39,7 +38,7 @@ var ( // the time to wait before rechecking the cluster update status, and any error encountered. func (r *Reconciler) execute( ctx context.Context, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, updatingStageIndex int, toBeUpdatedBindings, toBeDeletedBindings []*placementv1beta1.ClusterResourceBinding, ) (bool, time.Duration, error) { @@ -68,7 +67,7 @@ func (r *Reconciler) execute( // executeUpdatingStage executes a single updating stage by updating the clusterResourceBindings. func (r *Reconciler) executeUpdatingStage( ctx context.Context, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, updatingStageIndex int, toBeUpdatedBindings []*placementv1beta1.ClusterResourceBinding, ) (time.Duration, error) { @@ -85,8 +84,8 @@ func (r *Reconciler) executeUpdatingStage( // Go through each cluster in the stage and check if it's updated. for i := range updatingStageStatus.Clusters { clusterStatus := &updatingStageStatus.Clusters[i] - clusterStartedCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1alpha1.ClusterUpdatingConditionStarted)) - clusterUpdateSucceededCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1alpha1.ClusterUpdatingConditionSucceeded)) + clusterStartedCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionStarted)) + clusterUpdateSucceededCond := meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) if condition.IsConditionStatusFalse(clusterUpdateSucceededCond, updateRun.Generation) { // The cluster is marked as failed to update. failedErr := fmt.Errorf("the cluster `%s` in the stage %s has failed", clusterStatus.ClusterName, updatingStageStatus.StageName) @@ -189,12 +188,12 @@ func (r *Reconciler) executeUpdatingStage( // executeDeleteStage executes the delete stage by deleting the clusterResourceBindings. func (r *Reconciler) executeDeleteStage( ctx context.Context, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, toBeDeletedBindings []*placementv1beta1.ClusterResourceBinding, ) (bool, error) { updateRunRef := klog.KObj(updateRun) existingDeleteStageStatus := updateRun.Status.DeletionStageStatus - existingDeleteStageClusterMap := make(map[string]*placementv1alpha1.ClusterUpdatingStatus, len(existingDeleteStageStatus.Clusters)) + existingDeleteStageClusterMap := make(map[string]*placementv1beta1.ClusterUpdatingStatus, len(existingDeleteStageStatus.Clusters)) for i := range existingDeleteStageStatus.Clusters { existingDeleteStageClusterMap[existingDeleteStageStatus.Clusters[i].ClusterName] = &existingDeleteStageStatus.Clusters[i] } @@ -210,12 +209,12 @@ func (r *Reconciler) executeDeleteStage( // In validation, we already check the binding must exist in the status. delete(existingDeleteStageClusterMap, binding.Spec.TargetCluster) // Make sure the cluster is not marked as deleted as the binding is still there. - if condition.IsConditionStatusTrue(meta.FindStatusCondition(curCluster.Conditions, string(placementv1alpha1.ClusterUpdatingConditionSucceeded)), updateRun.Generation) { + if condition.IsConditionStatusTrue(meta.FindStatusCondition(curCluster.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)), updateRun.Generation) { unexpectedErr := controller.NewUnexpectedBehaviorError(fmt.Errorf("the deleted cluster `%s` in the deleting stage still has a clusterResourceBinding", binding.Spec.TargetCluster)) klog.ErrorS(unexpectedErr, "The cluster in the deleting stage is not removed yet but marked as deleted", "cluster", curCluster.ClusterName, "clusterStagedUpdateRun", updateRunRef) return false, fmt.Errorf("%w: %s", errStagedUpdatedAborted, unexpectedErr.Error()) } - if condition.IsConditionStatusTrue(meta.FindStatusCondition(curCluster.Conditions, string(placementv1alpha1.ClusterUpdatingConditionStarted)), updateRun.Generation) { + if condition.IsConditionStatusTrue(meta.FindStatusCondition(curCluster.Conditions, string(placementv1beta1.ClusterUpdatingConditionStarted)), updateRun.Generation) { // The cluster status is marked as being deleted. if binding.DeletionTimestamp.IsZero() { // The cluster is marked as deleting but the binding is not deleting. @@ -241,7 +240,7 @@ func (r *Reconciler) executeDeleteStage( // The rest of the clusters in the stage are not in the toBeDeletedBindings so it should be marked as delete succeeded. for _, clusterStatus := range existingDeleteStageClusterMap { // Make sure the cluster is marked as deleted. - if !condition.IsConditionStatusTrue(meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1alpha1.ClusterUpdatingConditionStarted)), updateRun.Generation) { + if !condition.IsConditionStatusTrue(meta.FindStatusCondition(clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionStarted)), updateRun.Generation) { markClusterUpdatingStarted(clusterStatus, updateRun.Generation) } markClusterUpdatingSucceeded(clusterStatus, updateRun.Generation) @@ -255,7 +254,7 @@ func (r *Reconciler) executeDeleteStage( // checkAfterStageTasksStatus checks if the after stage tasks have finished. // Tt returns if the after stage tasks have finished or error if the after stage tasks failed. -func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingStageIndex int, updateRun *placementv1alpha1.ClusterStagedUpdateRun) (bool, error) { +func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingStageIndex int, updateRun *placementv1beta1.ClusterStagedUpdateRun) (bool, error) { updateRunRef := klog.KObj(updateRun) updatingStageStatus := &updateRun.Status.StagesStatus[updatingStageIndex] updatingStage := &updateRun.Status.StagedUpdateStrategySnapshot.Stages[updatingStageIndex] @@ -265,8 +264,8 @@ func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingSta } for i, task := range updatingStage.AfterStageTasks { switch task.Type { - case placementv1alpha1.AfterStageTaskTypeTimedWait: - waitStartTime := meta.FindStatusCondition(updatingStageStatus.Conditions, string(placementv1alpha1.StageUpdatingConditionProgressing)).LastTransitionTime.Time + case placementv1beta1.AfterStageTaskTypeTimedWait: + waitStartTime := meta.FindStatusCondition(updatingStageStatus.Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)).LastTransitionTime.Time // Check if the wait time has passed. if waitStartTime.Add(task.WaitTime.Duration).After(time.Now()) { klog.V(2).InfoS("The after stage task still need to wait", "waitStartTime", waitStartTime, "waitTime", task.WaitTime, "stage", updatingStage.Name, "clusterStagedUpdateRun", updateRunRef) @@ -274,18 +273,18 @@ func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingSta } markAfterStageWaitTimeElapsed(&updatingStageStatus.AfterStageTaskStatus[i], updateRun.Generation) klog.V(2).InfoS("The after stage wait task has completed", "stage", updatingStage.Name, "clusterStagedUpdateRun", updateRunRef) - case placementv1alpha1.AfterStageTaskTypeApproval: + case placementv1beta1.AfterStageTaskTypeApproval: // Check if the approval request has been created. - approvalRequest := placementv1alpha1.ClusterApprovalRequest{ + approvalRequest := placementv1beta1.ClusterApprovalRequest{ ObjectMeta: metav1.ObjectMeta{ Name: updatingStageStatus.AfterStageTaskStatus[i].ApprovalRequestName, Labels: map[string]string{ - placementv1alpha1.TargetUpdatingStageNameLabel: updatingStage.Name, - placementv1alpha1.TargetUpdateRunLabel: updateRun.Name, - placementv1alpha1.IsLatestUpdateRunApprovalLabel: "true", + placementv1beta1.TargetUpdatingStageNameLabel: updatingStage.Name, + placementv1beta1.TargetUpdateRunLabel: updateRun.Name, + placementv1beta1.IsLatestUpdateRunApprovalLabel: "true", }, }, - Spec: placementv1alpha1.ApprovalRequestSpec{ + Spec: placementv1beta1.ApprovalRequestSpec{ TargetUpdateRun: updateRun.Name, TargetStage: updatingStage.Name, }, @@ -304,7 +303,7 @@ func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingSta klog.ErrorS(unexpectedErr, "Found an approval request targeting wrong stage", "approvalRequestTask", requestRef, "stage", updatingStage.Name, "clusterStagedUpdateRun", updateRunRef) return false, fmt.Errorf("%w: %s", errStagedUpdatedAborted, unexpectedErr.Error()) } - if !condition.IsConditionStatusTrue(meta.FindStatusCondition(approvalRequest.Status.Conditions, string(placementv1alpha1.ApprovalRequestConditionApproved)), approvalRequest.Generation) { + if !condition.IsConditionStatusTrue(meta.FindStatusCondition(approvalRequest.Status.Conditions, string(placementv1beta1.ApprovalRequestConditionApproved)), approvalRequest.Generation) { klog.V(2).InfoS("The approval request has not been approved yet", "approvalRequestTask", requestRef, "stage", updatingStage.Name, "clusterStagedUpdateRun", updateRunRef) return false, nil } @@ -328,7 +327,7 @@ func (r *Reconciler) checkAfterStageTasksStatus(ctx context.Context, updatingSta } // updateBindingRolloutStarted updates the binding status to indicate the rollout has started. -func (r *Reconciler) updateBindingRolloutStarted(ctx context.Context, binding *placementv1beta1.ClusterResourceBinding, updateRun *placementv1alpha1.ClusterStagedUpdateRun) error { +func (r *Reconciler) updateBindingRolloutStarted(ctx context.Context, binding *placementv1beta1.ClusterResourceBinding, updateRun *placementv1beta1.ClusterStagedUpdateRun) error { // first reset the condition to reflect the latest lastTransitionTime binding.RemoveCondition(string(placementv1beta1.ResourceBindingRolloutStarted)) cond := metav1.Condition{ @@ -348,7 +347,7 @@ func (r *Reconciler) updateBindingRolloutStarted(ctx context.Context, binding *p } // isBindingSyncedWithClusterStatus checks if the binding is up-to-date with the cluster status. -func isBindingSyncedWithClusterStatus(updateRun *placementv1alpha1.ClusterStagedUpdateRun, binding *placementv1beta1.ClusterResourceBinding, cluster *placementv1alpha1.ClusterUpdatingStatus) bool { +func isBindingSyncedWithClusterStatus(updateRun *placementv1beta1.ClusterStagedUpdateRun, binding *placementv1beta1.ClusterResourceBinding, cluster *placementv1beta1.ClusterUpdatingStatus) bool { if binding.Spec.ResourceSnapshotName != updateRun.Spec.ResourceSnapshotIndex { klog.ErrorS(fmt.Errorf("binding has different resourceSnapshotName, want: %s, got: %s", updateRun.Spec.ResourceSnapshotIndex, binding.Spec.ResourceSnapshotName), "ClusterResourceBinding is not up-to-date", "clusterResourceBinding", klog.KObj(binding), "clusterStagedUpdateRun", klog.KObj(updateRun)) return false @@ -372,9 +371,9 @@ func isBindingSyncedWithClusterStatus(updateRun *placementv1alpha1.ClusterStaged // It returns true if the resources have been updated successfully or any error if the update failed. func checkClusterUpdateResult( binding *placementv1beta1.ClusterResourceBinding, - clusterStatus *placementv1alpha1.ClusterUpdatingStatus, - updatingStage *placementv1alpha1.StageUpdatingStatus, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + clusterStatus *placementv1beta1.ClusterUpdatingStatus, + updatingStage *placementv1beta1.StageUpdatingStatus, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) (bool, error) { availCond := binding.GetCondition(string(placementv1beta1.ResourceBindingAvailable)) if condition.IsConditionStatusTrue(availCond, binding.Generation) { @@ -396,9 +395,9 @@ func checkClusterUpdateResult( } // markUpdateRunStarted marks the update run as started in memory. -func markUpdateRunStarted(updateRun *placementv1alpha1.ClusterStagedUpdateRun) { +func markUpdateRunStarted(updateRun *placementv1beta1.ClusterStagedUpdateRun) { meta.SetStatusCondition(&updateRun.Status.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StagedUpdateRunConditionProgressing), + Type: string(placementv1beta1.StagedUpdateRunConditionProgressing), Status: metav1.ConditionTrue, ObservedGeneration: updateRun.Generation, Reason: condition.UpdateRunStartedReason, @@ -406,12 +405,12 @@ func markUpdateRunStarted(updateRun *placementv1alpha1.ClusterStagedUpdateRun) { } // markStageUpdatingStarted marks the stage updating status as started in memory. -func markStageUpdatingStarted(stageUpdatingStatus *placementv1alpha1.StageUpdatingStatus, generation int64) { +func markStageUpdatingStarted(stageUpdatingStatus *placementv1beta1.StageUpdatingStatus, generation int64) { if stageUpdatingStatus.StartTime == nil { stageUpdatingStatus.StartTime = &metav1.Time{Time: time.Now()} } meta.SetStatusCondition(&stageUpdatingStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StageUpdatingConditionProgressing), + Type: string(placementv1beta1.StageUpdatingConditionProgressing), Status: metav1.ConditionTrue, ObservedGeneration: generation, Reason: condition.StageUpdatingStartedReason, @@ -419,9 +418,9 @@ func markStageUpdatingStarted(stageUpdatingStatus *placementv1alpha1.StageUpdati } // markStageUpdatingWaiting marks the stage updating status as waiting in memory. -func markStageUpdatingWaiting(stageUpdatingStatus *placementv1alpha1.StageUpdatingStatus, generation int64) { +func markStageUpdatingWaiting(stageUpdatingStatus *placementv1beta1.StageUpdatingStatus, generation int64) { meta.SetStatusCondition(&stageUpdatingStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StageUpdatingConditionProgressing), + Type: string(placementv1beta1.StageUpdatingConditionProgressing), Status: metav1.ConditionFalse, ObservedGeneration: generation, Reason: condition.StageUpdatingWaitingReason, @@ -429,12 +428,12 @@ func markStageUpdatingWaiting(stageUpdatingStatus *placementv1alpha1.StageUpdati } // markStageUpdatingSucceeded marks the stage updating status as succeeded in memory. -func markStageUpdatingSucceeded(stageUpdatingStatus *placementv1alpha1.StageUpdatingStatus, generation int64) { +func markStageUpdatingSucceeded(stageUpdatingStatus *placementv1beta1.StageUpdatingStatus, generation int64) { if stageUpdatingStatus.EndTime == nil { stageUpdatingStatus.EndTime = &metav1.Time{Time: time.Now()} } meta.SetStatusCondition(&stageUpdatingStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StageUpdatingConditionSucceeded), + Type: string(placementv1beta1.StageUpdatingConditionSucceeded), Status: metav1.ConditionTrue, ObservedGeneration: generation, Reason: condition.StageUpdatingSucceededReason, @@ -442,12 +441,12 @@ func markStageUpdatingSucceeded(stageUpdatingStatus *placementv1alpha1.StageUpda } // markStageUpdatingFailed marks the stage updating status as failed in memory. -func markStageUpdatingFailed(stageUpdatingStatus *placementv1alpha1.StageUpdatingStatus, generation int64, message string) { +func markStageUpdatingFailed(stageUpdatingStatus *placementv1beta1.StageUpdatingStatus, generation int64, message string) { if stageUpdatingStatus.EndTime == nil { stageUpdatingStatus.EndTime = &metav1.Time{Time: time.Now()} } meta.SetStatusCondition(&stageUpdatingStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StageUpdatingConditionSucceeded), + Type: string(placementv1beta1.StageUpdatingConditionSucceeded), Status: metav1.ConditionFalse, ObservedGeneration: generation, Reason: condition.StageUpdatingFailedReason, @@ -456,9 +455,9 @@ func markStageUpdatingFailed(stageUpdatingStatus *placementv1alpha1.StageUpdatin } // markClusterUpdatingStarted marks the cluster updating status as started in memory. -func markClusterUpdatingStarted(clusterUpdatingStatus *placementv1alpha1.ClusterUpdatingStatus, generation int64) { +func markClusterUpdatingStarted(clusterUpdatingStatus *placementv1beta1.ClusterUpdatingStatus, generation int64) { meta.SetStatusCondition(&clusterUpdatingStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.ClusterUpdatingConditionStarted), + Type: string(placementv1beta1.ClusterUpdatingConditionStarted), Status: metav1.ConditionTrue, ObservedGeneration: generation, Reason: condition.ClusterUpdatingStartedReason, @@ -466,9 +465,9 @@ func markClusterUpdatingStarted(clusterUpdatingStatus *placementv1alpha1.Cluster } // markClusterUpdatingSucceeded marks the cluster updating status as succeeded in memory. -func markClusterUpdatingSucceeded(clusterUpdatingStatus *placementv1alpha1.ClusterUpdatingStatus, generation int64) { +func markClusterUpdatingSucceeded(clusterUpdatingStatus *placementv1beta1.ClusterUpdatingStatus, generation int64) { meta.SetStatusCondition(&clusterUpdatingStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.ClusterUpdatingConditionSucceeded), + Type: string(placementv1beta1.ClusterUpdatingConditionSucceeded), Status: metav1.ConditionTrue, ObservedGeneration: generation, Reason: condition.ClusterUpdatingSucceededReason, @@ -476,9 +475,9 @@ func markClusterUpdatingSucceeded(clusterUpdatingStatus *placementv1alpha1.Clust } // markClusterUpdatingFailed marks the cluster updating status as failed in memory. -func markClusterUpdatingFailed(clusterUpdatingStatus *placementv1alpha1.ClusterUpdatingStatus, generation int64, message string) { +func markClusterUpdatingFailed(clusterUpdatingStatus *placementv1beta1.ClusterUpdatingStatus, generation int64, message string) { meta.SetStatusCondition(&clusterUpdatingStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.ClusterUpdatingConditionSucceeded), + Type: string(placementv1beta1.ClusterUpdatingConditionSucceeded), Status: metav1.ConditionFalse, ObservedGeneration: generation, Reason: condition.ClusterUpdatingFailedReason, @@ -487,9 +486,9 @@ func markClusterUpdatingFailed(clusterUpdatingStatus *placementv1alpha1.ClusterU } // markAfterStageRequestCreated marks the Approval after stage task as ApprovalRequestCreated in memory. -func markAfterStageRequestCreated(afterStageTaskStatus *placementv1alpha1.AfterStageTaskStatus, generation int64) { +func markAfterStageRequestCreated(afterStageTaskStatus *placementv1beta1.AfterStageTaskStatus, generation int64) { meta.SetStatusCondition(&afterStageTaskStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.AfterStageTaskConditionApprovalRequestCreated), + Type: string(placementv1beta1.AfterStageTaskConditionApprovalRequestCreated), Status: metav1.ConditionTrue, ObservedGeneration: generation, Reason: condition.AfterStageTaskApprovalRequestCreatedReason, @@ -497,9 +496,9 @@ func markAfterStageRequestCreated(afterStageTaskStatus *placementv1alpha1.AfterS } // markAfterStageRequestApproved marks the Approval after stage task as Approved in memory. -func markAfterStageRequestApproved(afterStageTaskStatus *placementv1alpha1.AfterStageTaskStatus, generation int64) { +func markAfterStageRequestApproved(afterStageTaskStatus *placementv1beta1.AfterStageTaskStatus, generation int64) { meta.SetStatusCondition(&afterStageTaskStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.AfterStageTaskConditionApprovalRequestApproved), + Type: string(placementv1beta1.AfterStageTaskConditionApprovalRequestApproved), Status: metav1.ConditionTrue, ObservedGeneration: generation, Reason: condition.AfterStageTaskApprovalRequestApprovedReason, @@ -507,9 +506,9 @@ func markAfterStageRequestApproved(afterStageTaskStatus *placementv1alpha1.After } // markAfterStageWaitTimeElapsed marks the TimeWait after stage task as TimeElapsed in memory. -func markAfterStageWaitTimeElapsed(afterStageTaskStatus *placementv1alpha1.AfterStageTaskStatus, generation int64) { +func markAfterStageWaitTimeElapsed(afterStageTaskStatus *placementv1beta1.AfterStageTaskStatus, generation int64) { meta.SetStatusCondition(&afterStageTaskStatus.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.AfterStageTaskConditionWaitTimeElapsed), + Type: string(placementv1beta1.AfterStageTaskConditionWaitTimeElapsed), Status: metav1.ConditionTrue, ObservedGeneration: generation, Reason: condition.AfterStageTaskWaitTimeElapsedReason, diff --git a/pkg/controllers/updaterun/execution_integration_test.go b/pkg/controllers/updaterun/execution_integration_test.go index 2eef72130..aa09a5aed 100644 --- a/pkg/controllers/updaterun/execution_integration_test.go +++ b/pkg/controllers/updaterun/execution_integration_test.go @@ -29,16 +29,16 @@ import ( ) var _ = Describe("UpdateRun execution tests", func() { - var updateRun *placementv1alpha1.ClusterStagedUpdateRun + var updateRun *placementv1beta1.ClusterStagedUpdateRun var crp *placementv1beta1.ClusterResourcePlacement var policySnapshot *placementv1beta1.ClusterSchedulingPolicySnapshot - var updateStrategy *placementv1alpha1.ClusterStagedUpdateStrategy + var updateStrategy *placementv1beta1.ClusterStagedUpdateStrategy var resourceBindings []*placementv1beta1.ClusterResourceBinding var targetClusters []*clusterv1beta1.MemberCluster var unscheduledCluster []*clusterv1beta1.MemberCluster var resourceSnapshot *placementv1beta1.ClusterResourceSnapshot var clusterResourceOverride *placementv1alpha1.ClusterResourceOverrideSnapshot - var wantStatus *placementv1alpha1.StagedUpdateRunStatus + var wantStatus *placementv1beta1.StagedUpdateRunStatus BeforeEach(OncePerOrdered, func() { testUpdateRunName = "updaterun-" + utils.RandStr() @@ -192,8 +192,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 1st cluster has succeeded and 2nd cluster has started") - wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") By("Validating the 1st stage has startTime set") @@ -210,8 +210,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 2nd cluster has succeeded and 3rd cluster has started") - wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[0].Clusters[1].Conditions = append(wantStatus.StagesStatus[0].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -225,8 +225,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 3rd cluster has succeeded and 4th cluster has started") - wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Clusters[3].Conditions = append(wantStatus.StagesStatus[0].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[0].Clusters[2].Conditions = append(wantStatus.StagesStatus[0].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[3].Conditions = append(wantStatus.StagesStatus[0].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -240,8 +240,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 4th cluster has succeeded and 5th cluster has started") - wantStatus.StagesStatus[0].Clusters[3].Conditions = append(wantStatus.StagesStatus[0].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Clusters[4].Conditions = append(wantStatus.StagesStatus[0].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[0].Clusters[3].Conditions = append(wantStatus.StagesStatus[0].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[4].Conditions = append(wantStatus.StagesStatus[0].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -255,9 +255,9 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 5th cluster has succeeded and stage waiting for AfterStageTask") - stageWaitingCondition := generateFalseCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing) + stageWaitingCondition := generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing) stageWaitingCondition.Reason = condition.StageUpdatingWaitingReason - wantStatus.StagesStatus[0].Clusters[4].Conditions = append(wantStatus.StagesStatus[0].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[4].Conditions = append(wantStatus.StagesStatus[0].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) wantStatus.StagesStatus[0].Conditions[0] = stageWaitingCondition // The progressing condition now becomes false with waiting reason. validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -266,21 +266,21 @@ var _ = Describe("UpdateRun execution tests", func() { By("Validating the waitTime after stage task has completed and 2nd stage has started") // AfterStageTask completed. wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions = append(wantStatus.StagesStatus[0].AfterStageTaskStatus[0].Conditions, - generateTrueCondition(updateRun, placementv1alpha1.AfterStageTaskConditionWaitTimeElapsed)) + generateTrueCondition(updateRun, placementv1beta1.AfterStageTaskConditionWaitTimeElapsed)) // 1st stage completed. - wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) // 2nd stage started. - wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)) + wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)) // 1st cluster in 2nd stage started. - wantStatus.StagesStatus[1].Clusters[0].Conditions = append(wantStatus.StagesStatus[1].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[1].Clusters[0].Conditions = append(wantStatus.StagesStatus[1].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") By("Validating the 1st stage has endTime set") Expect(updateRun.Status.StagesStatus[0].EndTime).ShouldNot(BeNil()) By("Validating the waitTime after stage task only completes after the wait time") - waitStartTime := meta.FindStatusCondition(updateRun.Status.StagesStatus[0].Conditions, string(placementv1alpha1.StageUpdatingConditionProgressing)).LastTransitionTime.Time - waitEndTime := meta.FindStatusCondition(updateRun.Status.StagesStatus[0].AfterStageTaskStatus[0].Conditions, string(placementv1alpha1.AfterStageTaskConditionWaitTimeElapsed)).LastTransitionTime.Time + waitStartTime := meta.FindStatusCondition(updateRun.Status.StagesStatus[0].Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)).LastTransitionTime.Time + waitEndTime := meta.FindStatusCondition(updateRun.Status.StagesStatus[0].AfterStageTaskStatus[0].Conditions, string(placementv1beta1.AfterStageTaskConditionWaitTimeElapsed)).LastTransitionTime.Time // In this test, I set wait time to be 4 seconds, while stageClusterUpdatingWaitTime is 3 seconds. // So it needs 2 rounds of reconcile to wait for the waitTime to elapse, waitEndTime - waitStartTime should be around 6 seconds. Expect(waitStartTime.Add(updateStrategy.Spec.Stages[0].AfterStageTasks[0].WaitTime.Duration).Before(waitEndTime)).Should(BeTrue()) @@ -296,8 +296,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 1st cluster has succeeded and 2nd cluster has started") - wantStatus.StagesStatus[1].Clusters[0].Conditions = append(wantStatus.StagesStatus[1].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[1].Clusters[1].Conditions = append(wantStatus.StagesStatus[1].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[1].Clusters[0].Conditions = append(wantStatus.StagesStatus[1].Clusters[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[1].Clusters[1].Conditions = append(wantStatus.StagesStatus[1].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") By("Validating the 2nd stage has startTime set") @@ -314,8 +314,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 2nd cluster has succeeded and 3rd cluster has started") - wantStatus.StagesStatus[1].Clusters[1].Conditions = append(wantStatus.StagesStatus[1].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[1].Clusters[2].Conditions = append(wantStatus.StagesStatus[1].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[1].Clusters[1].Conditions = append(wantStatus.StagesStatus[1].Clusters[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[1].Clusters[2].Conditions = append(wantStatus.StagesStatus[1].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -329,8 +329,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 3rd cluster has succeeded and 4th cluster has started") - wantStatus.StagesStatus[1].Clusters[2].Conditions = append(wantStatus.StagesStatus[1].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[1].Clusters[3].Conditions = append(wantStatus.StagesStatus[1].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[1].Clusters[2].Conditions = append(wantStatus.StagesStatus[1].Clusters[2].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[1].Clusters[3].Conditions = append(wantStatus.StagesStatus[1].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -344,8 +344,8 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 4th cluster has succeeded and 5th cluster has started") - wantStatus.StagesStatus[1].Clusters[3].Conditions = append(wantStatus.StagesStatus[1].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[1].Clusters[4].Conditions = append(wantStatus.StagesStatus[1].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.StagesStatus[1].Clusters[3].Conditions = append(wantStatus.StagesStatus[1].Clusters[3].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[1].Clusters[4].Conditions = append(wantStatus.StagesStatus[1].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -359,28 +359,28 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Status().Update(ctx, binding)).Should(Succeed(), "failed to update the binding status") By("Validating the 5th cluster has succeeded and the stage waiting for AfterStageTask") - wantStatus.StagesStatus[1].Clusters[4].Conditions = append(wantStatus.StagesStatus[1].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - stageWaitingCondition := generateFalseCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing) + wantStatus.StagesStatus[1].Clusters[4].Conditions = append(wantStatus.StagesStatus[1].Clusters[4].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + stageWaitingCondition := generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing) stageWaitingCondition.Reason = condition.StageUpdatingWaitingReason wantStatus.StagesStatus[1].Conditions[0] = stageWaitingCondition // The progressing condition now becomes false with waiting reason. wantStatus.StagesStatus[1].AfterStageTaskStatus[0].Conditions = append(wantStatus.StagesStatus[1].AfterStageTaskStatus[0].Conditions, - generateTrueCondition(updateRun, placementv1alpha1.AfterStageTaskConditionApprovalRequestCreated)) + generateTrueCondition(updateRun, placementv1beta1.AfterStageTaskConditionApprovalRequestCreated)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) It("Should complete the 2nd stage after the ApprovalRequest is approved and move on to the delete stage", func() { By("Validating the approvalRequest has been created") - approvalRequest := &placementv1alpha1.ClusterApprovalRequest{} - wantApprovalRequest := &placementv1alpha1.ClusterApprovalRequest{ + approvalRequest := &placementv1beta1.ClusterApprovalRequest{} + wantApprovalRequest := &placementv1beta1.ClusterApprovalRequest{ ObjectMeta: metav1.ObjectMeta{ Name: updateRun.Status.StagesStatus[1].AfterStageTaskStatus[0].ApprovalRequestName, Labels: map[string]string{ - placementv1alpha1.TargetUpdatingStageNameLabel: updateRun.Status.StagesStatus[1].StageName, - placementv1alpha1.TargetUpdateRunLabel: updateRun.Name, - placementv1alpha1.IsLatestUpdateRunApprovalLabel: "true", + placementv1beta1.TargetUpdatingStageNameLabel: updateRun.Status.StagesStatus[1].StageName, + placementv1beta1.TargetUpdateRunLabel: updateRun.Name, + placementv1beta1.IsLatestUpdateRunApprovalLabel: "true", }, }, - Spec: placementv1alpha1.ApprovalRequestSpec{ + Spec: placementv1beta1.ApprovalRequestSpec{ TargetUpdateRun: updateRun.Name, TargetStage: updateRun.Status.StagesStatus[1].StageName, }, @@ -399,17 +399,17 @@ var _ = Describe("UpdateRun execution tests", func() { }, timeout, interval).Should(Succeed(), "failed to validate the approvalRequest") By("Approving the approvalRequest") - meta.SetStatusCondition(&approvalRequest.Status.Conditions, generateTrueCondition(approvalRequest, placementv1alpha1.ApprovalRequestConditionApproved)) + meta.SetStatusCondition(&approvalRequest.Status.Conditions, generateTrueCondition(approvalRequest, placementv1beta1.ApprovalRequestConditionApproved)) Expect(k8sClient.Status().Update(ctx, approvalRequest)).Should(Succeed(), "failed to update the approvalRequest status") By("Validating the 2nd stage has completed and the delete stage has started") wantStatus.StagesStatus[1].AfterStageTaskStatus[0].Conditions = append(wantStatus.StagesStatus[1].AfterStageTaskStatus[0].Conditions, - generateTrueCondition(updateRun, placementv1alpha1.AfterStageTaskConditionApprovalRequestApproved)) - wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)) + generateTrueCondition(updateRun, placementv1beta1.AfterStageTaskConditionApprovalRequestApproved)) + wantStatus.StagesStatus[1].Conditions = append(wantStatus.StagesStatus[1].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)) for i := range wantStatus.DeletionStageStatus.Clusters { - wantStatus.DeletionStageStatus.Clusters[i].Conditions = append(wantStatus.DeletionStageStatus.Clusters[i].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)) + wantStatus.DeletionStageStatus.Clusters[i].Conditions = append(wantStatus.DeletionStageStatus.Clusters[i].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)) } validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) @@ -432,10 +432,10 @@ var _ = Describe("UpdateRun execution tests", func() { By("Validating the delete stage and the clusterStagedUpdateRun has completed") for i := range wantStatus.DeletionStageStatus.Clusters { - wantStatus.DeletionStageStatus.Clusters[i].Conditions = append(wantStatus.DeletionStageStatus.Clusters[i].Conditions, generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) + wantStatus.DeletionStageStatus.Clusters[i].Conditions = append(wantStatus.DeletionStageStatus.Clusters[i].Conditions, generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) } - wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)) - wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionSucceeded)) + wantStatus.DeletionStageStatus.Conditions = append(wantStatus.DeletionStageStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) + wantStatus.Conditions = append(wantStatus.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) }) @@ -465,15 +465,15 @@ var _ = Describe("UpdateRun execution tests", func() { Expect(k8sClient.Update(ctx, binding)).Should(Succeed(), "failed to update the binding state") By("Validating the updateRun has failed") - wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateFalseCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)) - wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateFalseCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)) - wantStatus.Conditions = append(wantStatus.Conditions, generateFalseCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionSucceeded)) + wantStatus.StagesStatus[0].Clusters[0].Conditions = append(wantStatus.StagesStatus[0].Clusters[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)) + wantStatus.StagesStatus[0].Conditions = append(wantStatus.StagesStatus[0].Conditions, generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)) + wantStatus.Conditions = append(wantStatus.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) validateClusterStagedUpdateRunStatus(ctx, updateRun, wantStatus, "") }) }) }) -func validateBindingState(ctx context.Context, binding *placementv1beta1.ClusterResourceBinding, resourceSnapshotName string, updateRun *placementv1alpha1.ClusterStagedUpdateRun, stage int) { +func validateBindingState(ctx context.Context, binding *placementv1beta1.ClusterResourceBinding, resourceSnapshotName string, updateRun *placementv1beta1.ClusterStagedUpdateRun, stage int) { Eventually(func() error { if err := k8sClient.Get(ctx, types.NamespacedName{Name: binding.Name}, binding); err != nil { return err diff --git a/pkg/controllers/updaterun/execution_test.go b/pkg/controllers/updaterun/execution_test.go index 6023ab52f..3b81c7ff9 100644 --- a/pkg/controllers/updaterun/execution_test.go +++ b/pkg/controllers/updaterun/execution_test.go @@ -11,7 +11,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils/condition" ) @@ -19,15 +18,15 @@ import ( func TestIsBindingSyncedWithClusterStatus(t *testing.T) { tests := []struct { name string - updateRun *placementv1alpha1.ClusterStagedUpdateRun + updateRun *placementv1beta1.ClusterStagedUpdateRun binding *placementv1beta1.ClusterResourceBinding - cluster *placementv1alpha1.ClusterUpdatingStatus + cluster *placementv1beta1.ClusterUpdatingStatus wantEqual bool }{ { name: "isBindingSyncedWithClusterStatus should return false if binding and updateRun have different resourceSnapshot", - updateRun: &placementv1alpha1.ClusterStagedUpdateRun{ - Spec: placementv1alpha1.StagedUpdateRunSpec{ + updateRun: &placementv1beta1.ClusterStagedUpdateRun{ + Spec: placementv1beta1.StagedUpdateRunSpec{ ResourceSnapshotIndex: "test-snapshot", }, }, @@ -40,8 +39,8 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { }, { name: "isBindingSyncedWithClusterStatus should return false if binding and cluster status have different resourceOverrideSnapshot list", - updateRun: &placementv1alpha1.ClusterStagedUpdateRun{ - Spec: placementv1alpha1.StagedUpdateRunSpec{ + updateRun: &placementv1beta1.ClusterStagedUpdateRun{ + Spec: placementv1beta1.StagedUpdateRunSpec{ ResourceSnapshotIndex: "test-snapshot", }, }, @@ -60,7 +59,7 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { }, }, }, - cluster: &placementv1alpha1.ClusterUpdatingStatus{ + cluster: &placementv1beta1.ClusterUpdatingStatus{ ResourceOverrideSnapshots: []placementv1beta1.NamespacedName{ { Name: "ro1", @@ -76,8 +75,8 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { }, { name: "isBindingSyncedWithClusterStatus should return false if binding and cluster status have different clusterResourceOverrideSnapshot list", - updateRun: &placementv1alpha1.ClusterStagedUpdateRun{ - Spec: placementv1alpha1.StagedUpdateRunSpec{ + updateRun: &placementv1beta1.ClusterStagedUpdateRun{ + Spec: placementv1beta1.StagedUpdateRunSpec{ ResourceSnapshotIndex: "test-snapshot", }, }, @@ -91,7 +90,7 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { ClusterResourceOverrideSnapshots: []string{"cr1", "cr2"}, }, }, - cluster: &placementv1alpha1.ClusterUpdatingStatus{ + cluster: &placementv1beta1.ClusterUpdatingStatus{ ResourceOverrideSnapshots: []placementv1beta1.NamespacedName{ {Name: "ro1", Namespace: "ns1"}, {Name: "ro2", Namespace: "ns2"}, @@ -102,11 +101,11 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { }, { name: "isBindingSyncedWithClusterStatus should return false if binding and updateRun have different applyStrategy", - updateRun: &placementv1alpha1.ClusterStagedUpdateRun{ - Spec: placementv1alpha1.StagedUpdateRunSpec{ + updateRun: &placementv1beta1.ClusterStagedUpdateRun{ + Spec: placementv1beta1.StagedUpdateRunSpec{ ResourceSnapshotIndex: "test-snapshot", }, - Status: placementv1alpha1.StagedUpdateRunStatus{ + Status: placementv1beta1.StagedUpdateRunStatus{ ApplyStrategy: &placementv1beta1.ApplyStrategy{ Type: placementv1beta1.ApplyStrategyTypeClientSideApply, }, @@ -125,7 +124,7 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { }, }, }, - cluster: &placementv1alpha1.ClusterUpdatingStatus{ + cluster: &placementv1beta1.ClusterUpdatingStatus{ ResourceOverrideSnapshots: []placementv1beta1.NamespacedName{ {Name: "ro1", Namespace: "ns1"}, {Name: "ro2", Namespace: "ns2"}, @@ -136,11 +135,11 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { }, { name: "isBindingSyncedWithClusterStatus should return true if resourceSnapshot, applyStrategy, and override lists are all deep equal", - updateRun: &placementv1alpha1.ClusterStagedUpdateRun{ - Spec: placementv1alpha1.StagedUpdateRunSpec{ + updateRun: &placementv1beta1.ClusterStagedUpdateRun{ + Spec: placementv1beta1.StagedUpdateRunSpec{ ResourceSnapshotIndex: "test-snapshot", }, - Status: placementv1alpha1.StagedUpdateRunStatus{ + Status: placementv1beta1.StagedUpdateRunStatus{ ApplyStrategy: &placementv1beta1.ApplyStrategy{ Type: placementv1beta1.ApplyStrategyTypeReportDiff, }, @@ -159,7 +158,7 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { }, }, }, - cluster: &placementv1alpha1.ClusterUpdatingStatus{ + cluster: &placementv1beta1.ClusterUpdatingStatus{ ResourceOverrideSnapshots: []placementv1beta1.NamespacedName{ {Name: "ro1", Namespace: "ns1"}, {Name: "ro2", Namespace: "ns2"}, @@ -180,10 +179,10 @@ func TestIsBindingSyncedWithClusterStatus(t *testing.T) { } func TestCheckClusterUpdateResult(t *testing.T) { - updatingStage := &placementv1alpha1.StageUpdatingStatus{ + updatingStage := &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", } - updateRun := &placementv1alpha1.ClusterStagedUpdateRun{ + updateRun := &placementv1beta1.ClusterStagedUpdateRun{ ObjectMeta: metav1.ObjectMeta{ Generation: 1, }, @@ -191,7 +190,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { tests := []struct { name string binding *placementv1beta1.ClusterResourceBinding - clusterStatus *placementv1alpha1.ClusterUpdatingStatus + clusterStatus *placementv1beta1.ClusterUpdatingStatus wantSucceeded bool wantErr bool }{ @@ -210,7 +209,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { }, }, }, - clusterStatus: &placementv1alpha1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, + clusterStatus: &placementv1beta1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, wantSucceeded: true, wantErr: false, }, @@ -229,7 +228,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { }, }, }, - clusterStatus: &placementv1alpha1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, + clusterStatus: &placementv1beta1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, wantSucceeded: false, wantErr: true, }, @@ -248,7 +247,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { }, }, }, - clusterStatus: &placementv1alpha1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, + clusterStatus: &placementv1beta1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, wantSucceeded: false, wantErr: true, }, @@ -267,7 +266,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { }, }, }, - clusterStatus: &placementv1alpha1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, + clusterStatus: &placementv1beta1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, wantSucceeded: false, wantErr: true, }, @@ -298,7 +297,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { }, }, }, - clusterStatus: &placementv1alpha1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, + clusterStatus: &placementv1beta1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, wantSucceeded: false, wantErr: false, }, @@ -310,7 +309,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { Conditions: []metav1.Condition{}, }, }, - clusterStatus: &placementv1alpha1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, + clusterStatus: &placementv1beta1.ClusterUpdatingStatus{ClusterName: "test-cluster"}, wantSucceeded: false, wantErr: false, }, @@ -325,7 +324,7 @@ func TestCheckClusterUpdateResult(t *testing.T) { t.Fatalf("checkClusterUpdateResult() got error %v; want error %v", gotErr, test.wantErr) } if test.wantSucceeded { - if !condition.IsConditionStatusTrue(meta.FindStatusCondition(test.clusterStatus.Conditions, string(placementv1alpha1.ClusterUpdatingConditionSucceeded)), updateRun.Generation) { + if !condition.IsConditionStatusTrue(meta.FindStatusCondition(test.clusterStatus.Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)), updateRun.Generation) { t.Fatalf("checkClusterUpdateResult() failed to set ClusterUpdatingConditionSucceeded condition") } } diff --git a/pkg/controllers/updaterun/initialization.go b/pkg/controllers/updaterun/initialization.go index 5db90a40f..7c60d1c89 100644 --- a/pkg/controllers/updaterun/initialization.go +++ b/pkg/controllers/updaterun/initialization.go @@ -18,7 +18,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" - placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils/annotations" "go.goms.io/fleet/pkg/utils/condition" @@ -30,7 +29,7 @@ import ( // This function is called only once during the initialization of the ClusterStagedUpdateRun. func (r *Reconciler) initialize( ctx context.Context, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) ([]*placementv1beta1.ClusterResourceBinding, []*placementv1beta1.ClusterResourceBinding, error) { // Validate the ClusterResourcePlace object referenced by the ClusterStagedUpdateRun. placementName, err := r.validateCRP(ctx, updateRun) @@ -60,7 +59,7 @@ func (r *Reconciler) initialize( } // validateCRP validates the ClusterResourcePlacement object referenced by the ClusterStagedUpdateRun. -func (r *Reconciler) validateCRP(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun) (string, error) { +func (r *Reconciler) validateCRP(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun) (string, error) { updateRunRef := klog.KObj(updateRun) // Fetch the ClusterResourcePlacement object. placementName := updateRun.Spec.PlacementName @@ -89,7 +88,7 @@ func (r *Reconciler) validateCRP(ctx context.Context, updateRun *placementv1alph func (r *Reconciler) determinePolicySnapshot( ctx context.Context, placementName string, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) (*placementv1beta1.ClusterSchedulingPolicySnapshot, int, error) { updateRunRef := klog.KObj(updateRun) // Get the latest policy snapshot. @@ -158,7 +157,7 @@ func (r *Reconciler) collectScheduledClusters( ctx context.Context, placementName string, latestPolicySnapshot *placementv1beta1.ClusterSchedulingPolicySnapshot, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) ([]*placementv1beta1.ClusterResourceBinding, []*placementv1beta1.ClusterResourceBinding, error) { updateRunRef := klog.KObj(updateRun) // List all the bindings according to the ClusterResourcePlacement. @@ -208,11 +207,11 @@ func (r *Reconciler) generateStagesByStrategy( ctx context.Context, scheduledBindings []*placementv1beta1.ClusterResourceBinding, toBeDeletedBindings []*placementv1beta1.ClusterResourceBinding, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) error { updateRunRef := klog.KObj(updateRun) // Fetch the StagedUpdateStrategy referenced by StagedUpdateStrategyName. - var updateStrategy placementv1alpha1.ClusterStagedUpdateStrategy + var updateStrategy placementv1beta1.ClusterStagedUpdateStrategy if err := r.Client.Get(ctx, client.ObjectKey{Name: updateRun.Spec.StagedUpdateStrategyName}, &updateStrategy); err != nil { klog.ErrorS(err, "Failed to get StagedUpdateStrategy", "stagedUpdateStrategy", updateRun.Spec.StagedUpdateStrategyName, "clusterStagedUpdateRun", updateRunRef) if apierrors.IsNotFound(err) { @@ -230,7 +229,7 @@ func (r *Reconciler) generateStagesByStrategy( if err := r.computeRunStageStatus(ctx, scheduledBindings, updateRun); err != nil { return err } - toBeDeletedClusters := make([]placementv1alpha1.ClusterUpdatingStatus, len(toBeDeletedBindings)) + toBeDeletedClusters := make([]placementv1beta1.ClusterUpdatingStatus, len(toBeDeletedBindings)) for i, binding := range toBeDeletedBindings { klog.V(2).InfoS("Adding a cluster to the delete stage", "cluster", binding.Spec.TargetCluster, "clusterStagedUpdateStrategy", updateStrategy.Name, "clusterStagedUpdateRun", updateRunRef) toBeDeletedClusters[i].ClusterName = binding.Spec.TargetCluster @@ -239,8 +238,8 @@ func (r *Reconciler) generateStagesByStrategy( sort.Slice(toBeDeletedClusters, func(i, j int) bool { return toBeDeletedClusters[i].ClusterName < toBeDeletedClusters[j].ClusterName }) - updateRun.Status.DeletionStageStatus = &placementv1alpha1.StageUpdatingStatus{ - StageName: placementv1alpha1.UpdateRunDeleteStageName, + updateRun.Status.DeletionStageStatus = &placementv1beta1.StageUpdatingStatus{ + StageName: placementv1beta1.UpdateRunDeleteStageName, Clusters: toBeDeletedClusters, } return nil @@ -250,7 +249,7 @@ func (r *Reconciler) generateStagesByStrategy( func (r *Reconciler) computeRunStageStatus( ctx context.Context, scheduledBindings []*placementv1beta1.ClusterResourceBinding, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) error { updateRunRef := klog.KObj(updateRun) updateStrategyName := updateRun.Spec.StagedUpdateStrategyName @@ -261,7 +260,7 @@ func (r *Reconciler) computeRunStageStatus( for _, binding := range scheduledBindings { allSelectedClusters[binding.Spec.TargetCluster] = struct{}{} } - stagesStatus := make([]placementv1alpha1.StageUpdatingStatus, 0, len(updateRun.Status.StagedUpdateStrategySnapshot.Stages)) + stagesStatus := make([]placementv1beta1.StageUpdatingStatus, 0, len(updateRun.Status.StagedUpdateStrategySnapshot.Stages)) // Apply the label selectors from the ClusterStagedUpdateStrategy to filter the clusters. for _, stage := range updateRun.Status.StagedUpdateStrategySnapshot.Stages { @@ -272,7 +271,7 @@ func (r *Reconciler) computeRunStageStatus( return fmt.Errorf("%w: %s", errInitializedFailed, invalidAfterStageErr.Error()) } - curStageUpdatingStatus := placementv1alpha1.StageUpdatingStatus{StageName: stage.Name} + curStageUpdatingStatus := placementv1beta1.StageUpdatingStatus{StageName: stage.Name} var curStageClusters []clusterv1beta1.MemberCluster labelSelector, err := metav1.LabelSelectorAsSelector(stage.LabelSelector) if err != nil { @@ -333,18 +332,18 @@ func (r *Reconciler) computeRunStageStatus( } // Record the clusters in the stage. - curStageUpdatingStatus.Clusters = make([]placementv1alpha1.ClusterUpdatingStatus, len(curStageClusters)) + curStageUpdatingStatus.Clusters = make([]placementv1beta1.ClusterUpdatingStatus, len(curStageClusters)) for i, cluster := range curStageClusters { klog.V(2).InfoS("Adding a cluster to the stage", "cluster", cluster.Name, "clusterStagedUpdateStrategy", updateStrategyName, "stage name", stage.Name, "clusterStagedUpdateRun", updateRunRef) curStageUpdatingStatus.Clusters[i].ClusterName = cluster.Name } // Create the after stage tasks. - curStageUpdatingStatus.AfterStageTaskStatus = make([]placementv1alpha1.AfterStageTaskStatus, len(stage.AfterStageTasks)) + curStageUpdatingStatus.AfterStageTaskStatus = make([]placementv1beta1.AfterStageTaskStatus, len(stage.AfterStageTasks)) for i, task := range stage.AfterStageTasks { curStageUpdatingStatus.AfterStageTaskStatus[i].Type = task.Type - if task.Type == placementv1alpha1.AfterStageTaskTypeApproval { - curStageUpdatingStatus.AfterStageTaskStatus[i].ApprovalRequestName = fmt.Sprintf(placementv1alpha1.ApprovalTaskNameFmt, updateRun.Name, stage.Name) + if task.Type == placementv1beta1.AfterStageTaskTypeApproval { + curStageUpdatingStatus.AfterStageTaskStatus[i].ApprovalRequestName = fmt.Sprintf(placementv1beta1.ApprovalTaskNameFmt, updateRun.Name, stage.Name) } } stagesStatus = append(stagesStatus, curStageUpdatingStatus) @@ -367,12 +366,12 @@ func (r *Reconciler) computeRunStageStatus( // validateAfterStageTask valides the afterStageTasks in the stage defined in the clusterStagedUpdateStrategy. // The error returned from this function is not retryable. -func validateAfterStageTask(tasks []placementv1alpha1.AfterStageTask) error { +func validateAfterStageTask(tasks []placementv1beta1.AfterStageTask) error { if len(tasks) == 2 && tasks[0].Type == tasks[1].Type { return fmt.Errorf("afterStageTasks cannot have two tasks of the same type: %s", tasks[0].Type) } for i, task := range tasks { - if task.Type == placementv1alpha1.AfterStageTaskTypeTimedWait { + if task.Type == placementv1beta1.AfterStageTaskTypeTimedWait { if task.WaitTime.Duration <= 0 { return fmt.Errorf("task %d has wait duration <= 0", i) } @@ -382,7 +381,7 @@ func validateAfterStageTask(tasks []placementv1alpha1.AfterStageTask) error { } // recordOverrideSnapshots finds all the override snapshots that are associated with each cluster and record them in the ClusterStagedUpdateRun status. -func (r *Reconciler) recordOverrideSnapshots(ctx context.Context, placementName string, updateRun *placementv1alpha1.ClusterStagedUpdateRun) error { +func (r *Reconciler) recordOverrideSnapshots(ctx context.Context, placementName string, updateRun *placementv1beta1.ClusterStagedUpdateRun) error { updateRunRef := klog.KObj(updateRun) var masterResourceSnapshot placementv1beta1.ClusterResourceSnapshot if err := r.Client.Get(ctx, client.ObjectKey{Name: updateRun.Spec.ResourceSnapshotIndex}, &masterResourceSnapshot); err != nil { @@ -434,9 +433,9 @@ func (r *Reconciler) recordOverrideSnapshots(ctx context.Context, placementName } // recordInitializationSucceeded records the successful initialization condition in the ClusterStagedUpdateRun status. -func (r *Reconciler) recordInitializationSucceeded(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun) error { +func (r *Reconciler) recordInitializationSucceeded(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun) error { meta.SetStatusCondition(&updateRun.Status.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StagedUpdateRunConditionInitialized), + Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), Status: metav1.ConditionTrue, ObservedGeneration: updateRun.Generation, Reason: condition.UpdateRunInitializeSucceededReason, @@ -451,9 +450,9 @@ func (r *Reconciler) recordInitializationSucceeded(ctx context.Context, updateRu } // recordInitializationFailed records the failed initialization condition in the ClusterStagedUpdateRun status. -func (r *Reconciler) recordInitializationFailed(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun, message string) error { +func (r *Reconciler) recordInitializationFailed(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun, message string) error { meta.SetStatusCondition(&updateRun.Status.Conditions, metav1.Condition{ - Type: string(placementv1alpha1.StagedUpdateRunConditionInitialized), + Type: string(placementv1beta1.StagedUpdateRunConditionInitialized), Status: metav1.ConditionFalse, ObservedGeneration: updateRun.Generation, Reason: condition.UpdateRunInitializeFailedReason, diff --git a/pkg/controllers/updaterun/initialization_integration_test.go b/pkg/controllers/updaterun/initialization_integration_test.go index d849c4ec1..3befa65e0 100644 --- a/pkg/controllers/updaterun/initialization_integration_test.go +++ b/pkg/controllers/updaterun/initialization_integration_test.go @@ -32,7 +32,7 @@ var ( cmpOptions = []cmp.Option{ cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), cmpopts.IgnoreFields(metav1.Condition{}, "Message"), - cmpopts.IgnoreFields(placementv1alpha1.StageUpdatingStatus{}, "StartTime", "EndTime"), + cmpopts.IgnoreFields(placementv1beta1.StageUpdatingStatus{}, "StartTime", "EndTime"), } ) @@ -42,10 +42,10 @@ const ( ) var _ = Describe("Updaterun initialization tests", func() { - var updateRun *placementv1alpha1.ClusterStagedUpdateRun + var updateRun *placementv1beta1.ClusterStagedUpdateRun var crp *placementv1beta1.ClusterResourcePlacement var policySnapshot *placementv1beta1.ClusterSchedulingPolicySnapshot - var updateStrategy *placementv1alpha1.ClusterStagedUpdateStrategy + var updateStrategy *placementv1beta1.ClusterStagedUpdateStrategy var resourceBindings []*placementv1beta1.ClusterResourceBinding var targetClusters []*clusterv1beta1.MemberCluster var unscheduledCluster []*clusterv1beta1.MemberCluster @@ -465,9 +465,9 @@ var _ = Describe("Updaterun initialization tests", func() { Context("Test validateAfterStageTask", func() { It("Should fail to initialize if any after stage task has 2 same tasks", func() { By("Creating a clusterStagedUpdateStrategy with 2 same after stage tasks") - updateStrategy.Spec.Stages[0].AfterStageTasks = []placementv1alpha1.AfterStageTask{ - {Type: placementv1alpha1.AfterStageTaskTypeTimedWait}, - {Type: placementv1alpha1.AfterStageTaskTypeTimedWait}, + updateStrategy.Spec.Stages[0].AfterStageTasks = []placementv1beta1.AfterStageTask{ + {Type: placementv1beta1.AfterStageTaskTypeTimedWait}, + {Type: placementv1beta1.AfterStageTaskTypeTimedWait}, } Expect(k8sClient.Create(ctx, updateStrategy)).To(Succeed()) @@ -480,8 +480,8 @@ var _ = Describe("Updaterun initialization tests", func() { It("Should fail to initialize if the wait time is not valid", func() { By("Creating a clusterStagedUpdateStrategy with invalid wait time duration") - updateStrategy.Spec.Stages[0].AfterStageTasks = []placementv1alpha1.AfterStageTask{ - {Type: placementv1alpha1.AfterStageTaskTypeTimedWait, WaitTime: metav1.Duration{Duration: time.Second * 0}}, + updateStrategy.Spec.Stages[0].AfterStageTasks = []placementv1beta1.AfterStageTask{ + {Type: placementv1beta1.AfterStageTaskTypeTimedWait, WaitTime: metav1.Duration{Duration: time.Second * 0}}, } Expect(k8sClient.Create(ctx, updateStrategy)).To(Succeed()) @@ -553,7 +553,7 @@ var _ = Describe("Updaterun initialization tests", func() { } // initialization should fail. want.Conditions = []metav1.Condition{ - generateFalseCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionInitialized), + generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionInitialized), } if diff := cmp.Diff(*want, updateRun.Status, cmpOptions...); diff != "" { @@ -651,16 +651,16 @@ var _ = Describe("Updaterun initialization tests", func() { }) }) -func validateFailedInitCondition(ctx context.Context, updateRun *placementv1alpha1.ClusterStagedUpdateRun, message string) { +func validateFailedInitCondition(ctx context.Context, updateRun *placementv1beta1.ClusterStagedUpdateRun, message string) { Eventually(func() error { if err := k8sClient.Get(ctx, updateRunNamespacedName, updateRun); err != nil { return err } - wantConditions := []metav1.Condition{generateFalseCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionInitialized)} + wantConditions := []metav1.Condition{generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionInitialized)} if diff := cmp.Diff(wantConditions, updateRun.Status.Conditions, cmpOptions...); diff != "" { return fmt.Errorf("condition mismatch: (-want +got):\n%s", diff) } - initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1alpha1.StagedUpdateRunConditionInitialized)) + initCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionInitialized)) if !strings.Contains(initCond.Message, message) { return fmt.Errorf("condition message mismatch: got %s, want %s", initCond.Message, message) } @@ -670,50 +670,50 @@ func validateFailedInitCondition(ctx context.Context, updateRun *placementv1alph func generateSucceededInitializationStatus( crp *placementv1beta1.ClusterResourcePlacement, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, policySnapshot *placementv1beta1.ClusterSchedulingPolicySnapshot, - updateStrategy *placementv1alpha1.ClusterStagedUpdateStrategy, + updateStrategy *placementv1beta1.ClusterStagedUpdateStrategy, clusterResourceOverride *placementv1alpha1.ClusterResourceOverrideSnapshot, -) *placementv1alpha1.StagedUpdateRunStatus { - return &placementv1alpha1.StagedUpdateRunStatus{ +) *placementv1beta1.StagedUpdateRunStatus { + return &placementv1beta1.StagedUpdateRunStatus{ PolicySnapshotIndexUsed: policySnapshot.Name, PolicyObservedClusterCount: numberOfClustersAnnotation, ApplyStrategy: crp.Spec.Strategy.ApplyStrategy.DeepCopy(), StagedUpdateStrategySnapshot: &updateStrategy.Spec, - StagesStatus: []placementv1alpha1.StageUpdatingStatus{ + StagesStatus: []placementv1beta1.StageUpdatingStatus{ { StageName: "stage1", - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Clusters: []placementv1beta1.ClusterUpdatingStatus{ {ClusterName: "cluster-9", ClusterResourceOverrideSnapshots: []string{clusterResourceOverride.Name}}, {ClusterName: "cluster-7", ClusterResourceOverrideSnapshots: []string{clusterResourceOverride.Name}}, {ClusterName: "cluster-5", ClusterResourceOverrideSnapshots: []string{clusterResourceOverride.Name}}, {ClusterName: "cluster-3", ClusterResourceOverrideSnapshots: []string{clusterResourceOverride.Name}}, {ClusterName: "cluster-1", ClusterResourceOverrideSnapshots: []string{clusterResourceOverride.Name}}, }, - AfterStageTaskStatus: []placementv1alpha1.AfterStageTaskStatus{ - {Type: placementv1alpha1.AfterStageTaskTypeTimedWait}, + AfterStageTaskStatus: []placementv1beta1.AfterStageTaskStatus{ + {Type: placementv1beta1.AfterStageTaskTypeTimedWait}, }, }, { StageName: "stage2", - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Clusters: []placementv1beta1.ClusterUpdatingStatus{ {ClusterName: "cluster-0"}, {ClusterName: "cluster-2"}, {ClusterName: "cluster-4"}, {ClusterName: "cluster-6"}, {ClusterName: "cluster-8"}, }, - AfterStageTaskStatus: []placementv1alpha1.AfterStageTaskStatus{ + AfterStageTaskStatus: []placementv1beta1.AfterStageTaskStatus{ { - Type: placementv1alpha1.AfterStageTaskTypeApproval, + Type: placementv1beta1.AfterStageTaskTypeApproval, ApprovalRequestName: updateRun.Name + "-stage2", }, }, }, }, - DeletionStageStatus: &placementv1alpha1.StageUpdatingStatus{ + DeletionStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "kubernetes-fleet.io/deleteStage", - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Clusters: []placementv1beta1.ClusterUpdatingStatus{ {ClusterName: "unscheduled-cluster-0"}, {ClusterName: "unscheduled-cluster-1"}, {ClusterName: "unscheduled-cluster-2"}, @@ -721,20 +721,20 @@ func generateSucceededInitializationStatus( }, Conditions: []metav1.Condition{ // initialization should succeed! - generateTrueCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionInitialized), + generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionInitialized), }, } } func generateExecutionStartedStatus( - updateRun *placementv1alpha1.ClusterStagedUpdateRun, - initialized *placementv1alpha1.StagedUpdateRunStatus, -) *placementv1alpha1.StagedUpdateRunStatus { + updateRun *placementv1beta1.ClusterStagedUpdateRun, + initialized *placementv1beta1.StagedUpdateRunStatus, +) *placementv1beta1.StagedUpdateRunStatus { // Mark updateRun execution has started. - initialized.Conditions = append(initialized.Conditions, generateTrueCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionProgressing)) + initialized.Conditions = append(initialized.Conditions, generateTrueCondition(updateRun, placementv1beta1.StagedUpdateRunConditionProgressing)) // Mark updateRun 1st stage has started. - initialized.StagesStatus[0].Conditions = append(initialized.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)) + initialized.StagesStatus[0].Conditions = append(initialized.StagesStatus[0].Conditions, generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)) // Mark updateRun 1st cluster in the 1st stage has started. - initialized.StagesStatus[0].Clusters[0].Conditions = []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)} + initialized.StagesStatus[0].Clusters[0].Conditions = []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)} return initialized } diff --git a/pkg/controllers/updaterun/suite_test.go b/pkg/controllers/updaterun/suite_test.go index 9cbc93256..987e1ace8 100644 --- a/pkg/controllers/updaterun/suite_test.go +++ b/pkg/controllers/updaterun/suite_test.go @@ -28,7 +28,7 @@ import ( clusterv1beta1 "go.goms.io/fleet/apis/cluster/v1beta1" placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" - fleetv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" + placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils" "go.goms.io/fleet/pkg/utils/informer" ) @@ -68,8 +68,8 @@ var _ = BeforeSuite(func() { Expect(cfg).NotTo(BeNil()) Expect(clusterv1beta1.AddToScheme(scheme.Scheme)).Should(Succeed()) - Expect(fleetv1beta1.AddToScheme(scheme.Scheme)).Should(Succeed()) Expect(placementv1alpha1.AddToScheme(scheme.Scheme)).Should(Succeed()) + Expect(placementv1beta1.AddToScheme(scheme.Scheme)).Should(Succeed()) Expect(err).NotTo(HaveOccurred()) By("starting the controller manager") diff --git a/pkg/controllers/updaterun/validation.go b/pkg/controllers/updaterun/validation.go index bff1086f9..919618240 100644 --- a/pkg/controllers/updaterun/validation.go +++ b/pkg/controllers/updaterun/validation.go @@ -13,7 +13,6 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/klog/v2" - placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils/condition" "go.goms.io/fleet/pkg/utils/controller" @@ -26,7 +25,7 @@ import ( // If the updating stage index is len(updateRun.Status.StagesStatus), the next stage to be updated will be the delete stage. func (r *Reconciler) validate( ctx context.Context, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) (int, []*placementv1beta1.ClusterResourceBinding, []*placementv1beta1.ClusterResourceBinding, error) { // Some of the validating function changes the object, so we need to make a copy of the object. updateRunRef := klog.KObj(updateRun) @@ -85,7 +84,7 @@ func (r *Reconciler) validate( func (r *Reconciler) validateStagesStatus( ctx context.Context, scheduledBindings, toBeDeletedBindings []*placementv1beta1.ClusterResourceBinding, - updateRun, updateRunCopy *placementv1alpha1.ClusterStagedUpdateRun, + updateRun, updateRunCopy *placementv1beta1.ClusterStagedUpdateRun, ) (int, error) { updateRunRef := klog.KObj(updateRun) @@ -119,7 +118,7 @@ func (r *Reconciler) validateStagesStatus( // validateUpdateStagesStatus is a helper function to validate the updating stages in the clusterStagedUpdateRun. // It compares the existing stage status with the latest list of clusters to be updated. // It returns the index of the updating stage, the index of the last finished stage and any error encountered. -func validateUpdateStagesStatus(existingStageStatus []placementv1alpha1.StageUpdatingStatus, updateRun *placementv1alpha1.ClusterStagedUpdateRun) (int, int, error) { +func validateUpdateStagesStatus(existingStageStatus []placementv1beta1.StageUpdatingStatus, updateRun *placementv1beta1.ClusterStagedUpdateRun) (int, int, error) { updatingStageIndex := -1 lastFinishedStageIndex := -1 // Remember the newly computed stage status. @@ -166,11 +165,11 @@ func validateUpdateStagesStatus(existingStageStatus []placementv1alpha1.StageUpd // It returns `curStage` as updatingStageIndex if the stage is updating or advances `lastFinishedStageIndex` if the stage has finished. func validateClusterUpdatingStatus( curStage, updatingStageIndex, lastFinishedStageIndex int, - stageStatus *placementv1alpha1.StageUpdatingStatus, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + stageStatus *placementv1beta1.StageUpdatingStatus, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) (int, int, error) { - stageSucceedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1alpha1.StageUpdatingConditionSucceeded)) - stageStartedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1alpha1.StageUpdatingConditionProgressing)) + stageSucceedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1beta1.StageUpdatingConditionSucceeded)) + stageStartedCond := meta.FindStatusCondition(stageStatus.Conditions, string(placementv1beta1.StageUpdatingConditionProgressing)) if condition.IsConditionStatusTrue(stageSucceedCond, updateRun.Generation) { // The stage has finished. if updatingStageIndex != -1 && curStage > updatingStageIndex { @@ -184,7 +183,7 @@ func validateClusterUpdatingStatus( // Check if the cluster is still updating. if !condition.IsConditionStatusTrue(meta.FindStatusCondition( stageStatus.Clusters[curCluster].Conditions, - string(placementv1alpha1.ClusterUpdatingConditionSucceeded)), + string(placementv1beta1.ClusterUpdatingConditionSucceeded)), updateRun.Generation) { // The clusters in the finished stage should all have finished too. unexpectedErr := controller.NewUnexpectedBehaviorError(fmt.Errorf("cluster `%s` in the finished stage `%s` has not succeeded", stageStatus.Clusters[curCluster].ClusterName, stageStatus.StageName)) @@ -223,8 +222,8 @@ func validateClusterUpdatingStatus( // Collect the updating clusters. var updatingClusters []string for j := range stageStatus.Clusters { - clusterStartedCond := meta.FindStatusCondition(stageStatus.Clusters[j].Conditions, string(placementv1alpha1.ClusterUpdatingConditionStarted)) - clusterFinishedCond := meta.FindStatusCondition(stageStatus.Clusters[j].Conditions, string(placementv1alpha1.ClusterUpdatingConditionSucceeded)) + clusterStartedCond := meta.FindStatusCondition(stageStatus.Clusters[j].Conditions, string(placementv1beta1.ClusterUpdatingConditionStarted)) + clusterFinishedCond := meta.FindStatusCondition(stageStatus.Clusters[j].Conditions, string(placementv1beta1.ClusterUpdatingConditionSucceeded)) if condition.IsConditionStatusTrue(clusterStartedCond, updateRun.Generation) && !(condition.IsConditionStatusTrue(clusterFinishedCond, updateRun.Generation) || condition.IsConditionStatusFalse(clusterFinishedCond, updateRun.Generation)) { updatingClusters = append(updatingClusters, stageStatus.Clusters[j].ClusterName) @@ -246,7 +245,7 @@ func validateClusterUpdatingStatus( func validateDeleteStageStatus( updatingStageIndex, lastFinishedStageIndex, totalStages int, toBeDeletedBindings []*placementv1beta1.ClusterResourceBinding, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, + updateRun *placementv1beta1.ClusterStagedUpdateRun, ) (int, error) { updateRunRef := klog.KObj(updateRun) existingDeleteStageStatus := updateRun.Status.DeletionStageStatus @@ -271,8 +270,8 @@ func validateDeleteStageStatus( } } - deleteStageFinishedCond := meta.FindStatusCondition(existingDeleteStageStatus.Conditions, string(placementv1alpha1.StagedUpdateRunConditionSucceeded)) - deleteStageProgressingCond := meta.FindStatusCondition(existingDeleteStageStatus.Conditions, string(placementv1alpha1.StagedUpdateRunConditionProgressing)) + deleteStageFinishedCond := meta.FindStatusCondition(existingDeleteStageStatus.Conditions, string(placementv1beta1.StagedUpdateRunConditionSucceeded)) + deleteStageProgressingCond := meta.FindStatusCondition(existingDeleteStageStatus.Conditions, string(placementv1beta1.StagedUpdateRunConditionProgressing)) // Check if there is any active updating stage if updatingStageIndex != -1 || lastFinishedStageIndex < totalStages-1 { // There are still stages updating before the delete stage, make sure the delete stage is not active/finished. diff --git a/pkg/controllers/updaterun/validation_integration_test.go b/pkg/controllers/updaterun/validation_integration_test.go index b1f960c07..161eeaf76 100644 --- a/pkg/controllers/updaterun/validation_integration_test.go +++ b/pkg/controllers/updaterun/validation_integration_test.go @@ -28,16 +28,16 @@ import ( ) var _ = Describe("UpdateRun validation tests", func() { - var updateRun *placementv1alpha1.ClusterStagedUpdateRun + var updateRun *placementv1beta1.ClusterStagedUpdateRun var crp *placementv1beta1.ClusterResourcePlacement var policySnapshot *placementv1beta1.ClusterSchedulingPolicySnapshot - var updateStrategy *placementv1alpha1.ClusterStagedUpdateStrategy + var updateStrategy *placementv1beta1.ClusterStagedUpdateStrategy var resourceBindings []*placementv1beta1.ClusterResourceBinding var targetClusters []*clusterv1beta1.MemberCluster var unscheduledCluster []*clusterv1beta1.MemberCluster var resourceSnapshot *placementv1beta1.ClusterResourceSnapshot var clusterResourceOverride *placementv1alpha1.ClusterResourceOverrideSnapshot - var wantStatus *placementv1alpha1.StagedUpdateRunStatus + var wantStatus *placementv1beta1.StagedUpdateRunStatus BeforeEach(func() { testUpdateRunName = "updaterun-" + utils.RandStr() @@ -296,7 +296,7 @@ var _ = Describe("UpdateRun validation tests", func() { It("Should fail to validate if the number of stages has changed", func() { By("Adding a stage to the updateRun status") - updateRun.Status.StagedUpdateStrategySnapshot.Stages = append(updateRun.Status.StagedUpdateStrategySnapshot.Stages, placementv1alpha1.StageConfig{ + updateRun.Status.StagedUpdateStrategySnapshot.Stages = append(updateRun.Status.StagedUpdateStrategySnapshot.Stages, placementv1beta1.StageConfig{ Name: "stage3", LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -309,7 +309,7 @@ var _ = Describe("UpdateRun validation tests", func() { By("Validating the validation failed") wantStatus = generateFailedValidationStatus(updateRun, wantStatus) - wantStatus.StagedUpdateStrategySnapshot.Stages = append(wantStatus.StagedUpdateStrategySnapshot.Stages, placementv1alpha1.StageConfig{ + wantStatus.StagedUpdateStrategySnapshot.Stages = append(wantStatus.StagedUpdateStrategySnapshot.Stages, placementv1beta1.StageConfig{ Name: "stage3", LabelSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ @@ -358,8 +358,8 @@ var _ = Describe("UpdateRun validation tests", func() { func validateClusterStagedUpdateRunStatus( ctx context.Context, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, - want *placementv1alpha1.StagedUpdateRunStatus, + updateRun *placementv1beta1.ClusterStagedUpdateRun, + want *placementv1beta1.StagedUpdateRunStatus, message string, ) { Eventually(func() error { @@ -371,7 +371,7 @@ func validateClusterStagedUpdateRunStatus( return fmt.Errorf("status mismatch: (-want +got):\n%s", diff) } if message != "" { - succeedCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1alpha1.StagedUpdateRunConditionSucceeded)) + succeedCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionSucceeded)) if !strings.Contains(succeedCond.Message, message) { return fmt.Errorf("condition message mismatch: got %s, want %s", succeedCond.Message, message) } @@ -382,8 +382,8 @@ func validateClusterStagedUpdateRunStatus( func validateClusterStagedUpdateRunStatusConsistently( ctx context.Context, - updateRun *placementv1alpha1.ClusterStagedUpdateRun, - want *placementv1alpha1.StagedUpdateRunStatus, + updateRun *placementv1beta1.ClusterStagedUpdateRun, + want *placementv1beta1.StagedUpdateRunStatus, message string, ) { Consistently(func() error { @@ -395,7 +395,7 @@ func validateClusterStagedUpdateRunStatusConsistently( return fmt.Errorf("status mismatch: (-want +got):\n%s", diff) } if message != "" { - succeedCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1alpha1.StagedUpdateRunConditionSucceeded)) + succeedCond := meta.FindStatusCondition(updateRun.Status.Conditions, string(placementv1beta1.StagedUpdateRunConditionSucceeded)) if !strings.Contains(succeedCond.Message, message) { return fmt.Errorf("condition message mismatch: got %s, want %s", succeedCond.Message, message) } @@ -405,9 +405,9 @@ func validateClusterStagedUpdateRunStatusConsistently( } func generateFailedValidationStatus( - updateRun *placementv1alpha1.ClusterStagedUpdateRun, - initialized *placementv1alpha1.StagedUpdateRunStatus, -) *placementv1alpha1.StagedUpdateRunStatus { - initialized.Conditions = append(initialized.Conditions, generateFalseCondition(updateRun, placementv1alpha1.StagedUpdateRunConditionSucceeded)) + updateRun *placementv1beta1.ClusterStagedUpdateRun, + initialized *placementv1beta1.StagedUpdateRunStatus, +) *placementv1beta1.StagedUpdateRunStatus { + initialized.Conditions = append(initialized.Conditions, generateFalseCondition(updateRun, placementv1beta1.StagedUpdateRunConditionSucceeded)) return initialized } diff --git a/pkg/controllers/updaterun/validation_test.go b/pkg/controllers/updaterun/validation_test.go index ffe05cb1a..8538f0e82 100644 --- a/pkg/controllers/updaterun/validation_test.go +++ b/pkg/controllers/updaterun/validation_test.go @@ -11,13 +11,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - placementv1alpha1 "go.goms.io/fleet/apis/placement/v1alpha1" placementv1beta1 "go.goms.io/fleet/apis/placement/v1beta1" "go.goms.io/fleet/pkg/utils/controller" ) func TestValidateClusterUpdatingStatus(t *testing.T) { - updateRun := &placementv1alpha1.ClusterStagedUpdateRun{ + updateRun := &placementv1beta1.ClusterStagedUpdateRun{ ObjectMeta: metav1.ObjectMeta{ Name: "test-run", Generation: 1, @@ -29,7 +28,7 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage int updatingStageIndex int lastFinishedStageIndex int - stageStatus *placementv1alpha1.StageUpdatingStatus + stageStatus *placementv1beta1.StageUpdatingStatus wantErr error wantUpdatingStageIndex int wantLastFinishedStageIndex int @@ -39,9 +38,9 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 2, updatingStageIndex: 1, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, }, wantErr: wrapErr(true, fmt.Errorf("the finished stage `2` is after the updating stage `1`")), wantUpdatingStageIndex: -1, @@ -52,13 +51,13 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 0, updatingStageIndex: -1, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, + Clusters: []placementv1beta1.ClusterUpdatingStatus{ { ClusterName: "cluster-1", - Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1beta1.ClusterUpdatingConditionSucceeded)}, }, }, }, @@ -71,10 +70,10 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 0, updatingStageIndex: -1, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, + Clusters: []placementv1beta1.ClusterUpdatingStatus{ {ClusterName: "cluster-1"}, }, }, @@ -87,9 +86,9 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 2, updatingStageIndex: -1, lastFinishedStageIndex: 0, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, }, wantErr: wrapErr(true, fmt.Errorf("the finished stage `test-stage` is not right after the last finished stage with index `0`")), wantUpdatingStageIndex: -1, @@ -100,9 +99,9 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 0, updatingStageIndex: -1, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, }, wantErr: wrapErr(false, fmt.Errorf("the stage `test-stage` has failed, err: ")), wantUpdatingStageIndex: -1, @@ -113,9 +112,9 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 1, updatingStageIndex: 0, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)}, }, wantErr: wrapErr(true, fmt.Errorf("the stage `test-stage` is updating, but there is already a stage with index `0` updating")), wantUpdatingStageIndex: -1, @@ -126,9 +125,9 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 1, updatingStageIndex: -1, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)}, }, wantErr: wrapErr(true, fmt.Errorf("the updating stage `test-stage` is not right after the last finished stage with index `-1`")), wantUpdatingStageIndex: -1, @@ -139,17 +138,17 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 0, updatingStageIndex: -1, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)}, - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)}, + Clusters: []placementv1beta1.ClusterUpdatingStatus{ { ClusterName: "cluster-1", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)}, }, { ClusterName: "cluster-2", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.ClusterUpdatingConditionStarted)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.ClusterUpdatingConditionStarted)}, }, }, }, @@ -162,7 +161,7 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 0, updatingStageIndex: -1, lastFinishedStageIndex: -1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", }, wantErr: nil, @@ -174,9 +173,9 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 2, updatingStageIndex: -1, lastFinishedStageIndex: 1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)}, }, wantErr: nil, wantUpdatingStageIndex: 2, @@ -187,11 +186,11 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { curStage: 2, updatingStageIndex: -1, lastFinishedStageIndex: 1, - stageStatus: &placementv1alpha1.StageUpdatingStatus{ + stageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "test-stage", Conditions: []metav1.Condition{ - generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing), - generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded), + generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing), + generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded), }, }, wantErr: nil, @@ -223,7 +222,7 @@ func TestValidateClusterUpdatingStatus(t *testing.T) { func TestValidateDeleteStageStatus(t *testing.T) { totalStages := 3 - updateRun := &placementv1alpha1.ClusterStagedUpdateRun{ + updateRun := &placementv1beta1.ClusterStagedUpdateRun{ ObjectMeta: metav1.ObjectMeta{ Name: "test-run", Generation: 1, @@ -235,7 +234,7 @@ func TestValidateDeleteStageStatus(t *testing.T) { updatingStageIndex int lastFinishedStageIndex int toBeDeletedBindings []*placementv1beta1.ClusterResourceBinding - deleteStageStatus *placementv1alpha1.StageUpdatingStatus + deleteStageStatus *placementv1beta1.StageUpdatingStatus wantErr error wantUpdatingStageIndex int }{ @@ -257,9 +256,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { Spec: placementv1beta1.ResourceBindingSpec{TargetCluster: "cluster-2"}, }, }, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Clusters: []placementv1beta1.ClusterUpdatingStatus{ {ClusterName: "cluster-1"}, }, }, @@ -276,9 +275,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { Spec: placementv1beta1.ResourceBindingSpec{TargetCluster: "cluster-1"}, }, }, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Clusters: []placementv1beta1.ClusterUpdatingStatus{ {ClusterName: "cluster-1"}, {ClusterName: "cluster-2"}, }, @@ -296,9 +295,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { Spec: placementv1beta1.ResourceBindingSpec{TargetCluster: "cluster-1"}, }, }, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Clusters: []placementv1alpha1.ClusterUpdatingStatus{ + Clusters: []placementv1beta1.ClusterUpdatingStatus{ {ClusterName: "cluster-1"}, }, }, @@ -309,7 +308,7 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return 0 when both updatingStageIndex and lastFinishedStageIndex are -1", updatingStageIndex: -1, lastFinishedStageIndex: -1, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{StageName: "delete-stage"}, + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{StageName: "delete-stage"}, wantErr: nil, wantUpdatingStageIndex: 0, }, @@ -317,9 +316,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return error if there's stage updating but the delete stage has started", updatingStageIndex: 2, lastFinishedStageIndex: 1, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)}, }, wantErr: wrapErr(true, fmt.Errorf("the delete stage is active, but there are still stages updating, updatingStageIndex: 2, lastFinishedStageIndex: 1")), wantUpdatingStageIndex: -1, @@ -328,9 +327,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return error if there's stage not started yet but the delete stage has finished", updatingStageIndex: -1, lastFinishedStageIndex: 1, // < totalStages - 1 - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, }, wantErr: wrapErr(true, fmt.Errorf("the delete stage is active, but there are still stages updating, updatingStageIndex: -1, lastFinishedStageIndex: 1")), wantUpdatingStageIndex: -1, @@ -339,9 +338,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return error if there's stage not started yet but the delete stage has failed", updatingStageIndex: -1, lastFinishedStageIndex: 1, // < totalStages - 1 - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, }, wantErr: wrapErr(true, fmt.Errorf("the delete stage is active, but there are still stages updating, updatingStageIndex: -1, lastFinishedStageIndex: 1")), wantUpdatingStageIndex: -1, @@ -349,7 +348,7 @@ func TestValidateDeleteStageStatus(t *testing.T) { { name: "validateDeleteStageStatus should return the updatingStageIndex if there's still stage updating", updatingStageIndex: 2, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{StageName: "delete-stage"}, + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{StageName: "delete-stage"}, wantErr: nil, wantUpdatingStageIndex: 2, }, @@ -357,7 +356,7 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return the next stage after lastUpdatingStageIndex if there's no stage updating but stage not started yet", updatingStageIndex: -1, lastFinishedStageIndex: 0, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{StageName: "delete-stage"}, + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{StageName: "delete-stage"}, wantErr: nil, wantUpdatingStageIndex: 1, }, @@ -365,9 +364,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return -1 if all stages have finished", updatingStageIndex: -1, lastFinishedStageIndex: totalStages - 1, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, }, wantErr: nil, wantUpdatingStageIndex: -1, @@ -376,9 +375,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return error if the delete stage has failed", updatingStageIndex: -1, lastFinishedStageIndex: totalStages - 1, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1alpha1.StageUpdatingConditionSucceeded)}, + Conditions: []metav1.Condition{generateFalseCondition(updateRun, placementv1beta1.StageUpdatingConditionSucceeded)}, }, wantErr: wrapErr(false, fmt.Errorf("the delete stage has failed, err: ")), wantUpdatingStageIndex: -1, @@ -387,9 +386,9 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return totalStages if the delete stage is still running", updatingStageIndex: -1, lastFinishedStageIndex: totalStages - 1, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{ + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{ StageName: "delete-stage", - Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1alpha1.StageUpdatingConditionProgressing)}, + Conditions: []metav1.Condition{generateTrueCondition(updateRun, placementv1beta1.StageUpdatingConditionProgressing)}, }, wantErr: nil, wantUpdatingStageIndex: totalStages, @@ -398,7 +397,7 @@ func TestValidateDeleteStageStatus(t *testing.T) { name: "validateDeleteStageStatus should return totalStages if all updating stages have finished but the delete stage is not active or finished", updatingStageIndex: -1, lastFinishedStageIndex: totalStages - 1, - deleteStageStatus: &placementv1alpha1.StageUpdatingStatus{StageName: "delete-stage"}, + deleteStageStatus: &placementv1beta1.StageUpdatingStatus{StageName: "delete-stage"}, wantErr: nil, wantUpdatingStageIndex: totalStages, },