Skip to content

Commit

Permalink
Merge pull request #4759 from muraee/rosa-controlplane-status
Browse files Browse the repository at this point in the history
✨ ROSA: Reconcile ROSAControlePlane status
  • Loading branch information
k8s-ci-robot authored Jan 25, 2024
2 parents e439ae2 + 3f434bb commit aa0da32
Show file tree
Hide file tree
Showing 5 changed files with 61 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ spec:
supportRoleARN:
type: string
version:
description: Openshift version, for example "openshift-v4.12.15".
description: Openshift version, for example "openshift-v4.14.5".
type: string
workerRoleARN:
type: string
Expand Down Expand Up @@ -337,6 +337,9 @@ spec:
- type
type: object
type: array
consoleURL:
description: ConsoleURL is the url for the openshift console.
type: string
externalManagedControlPlane:
default: true
description: ExternalManagedControlPlane indicates to cluster-api
Expand All @@ -354,10 +357,14 @@ spec:
description: Initialized denotes whether or not the control plane
has the uploaded kubernetes config-map.
type: boolean
oidcEndpointURL:
description: OIDCEndpointURL is the endpoint url for the managed OIDC
porvider.
type: string
ready:
default: false
description: Ready denotes that the AWSManagedControlPlane API Server
is ready to receive requests and that the VPC infra is ready.
description: Ready denotes that the ROSAControlPlane API Server is
ready to receive requests.
type: boolean
required:
- ready
Expand Down
20 changes: 10 additions & 10 deletions controllers/rosacluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,25 +155,25 @@ func (r *ROSAClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M

func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Logger) handler.MapFunc {
return func(ctx context.Context, o client.Object) []ctrl.Request {
ROSAControlPlane, ok := o.(*rosacontrolplanev1.ROSAControlPlane)
rosaControlPlane, ok := o.(*rosacontrolplanev1.ROSAControlPlane)
if !ok {
log.Error(errors.Errorf("expected an ROSAControlPlane, got %T instead", o), "failed to map ROSAControlPlane")
log.Error(errors.Errorf("expected a ROSAControlPlane, got %T instead", o), "failed to map ROSAControlPlane")
return nil
}

log := log.WithValues("objectMapper", "awsmcpTomc", "ROSAcontrolplane", klog.KRef(ROSAControlPlane.Namespace, ROSAControlPlane.Name))
log := log.WithValues("objectMapper", "rosacpTorosac", "ROSAcontrolplane", klog.KRef(rosaControlPlane.Namespace, rosaControlPlane.Name))

if !ROSAControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
if !rosaControlPlane.ObjectMeta.DeletionTimestamp.IsZero() {
log.Info("ROSAControlPlane has a deletion timestamp, skipping mapping")
return nil
}

if ROSAControlPlane.Spec.ControlPlaneEndpoint.IsZero() {
if rosaControlPlane.Spec.ControlPlaneEndpoint.IsZero() {
log.Debug("ROSAControlPlane has no control plane endpoint, skipping mapping")
return nil
}

cluster, err := util.GetOwnerCluster(ctx, r.Client, ROSAControlPlane.ObjectMeta)
cluster, err := util.GetOwnerCluster(ctx, r.Client, rosaControlPlane.ObjectMeta)
if err != nil {
log.Error(err, "failed to get owning cluster")
return nil
Expand All @@ -183,17 +183,17 @@ func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Log
return nil
}

managedClusterRef := cluster.Spec.InfrastructureRef
if managedClusterRef == nil || managedClusterRef.Kind != "ROSACluster" {
rosaClusterRef := cluster.Spec.InfrastructureRef
if rosaClusterRef == nil || rosaClusterRef.Kind != "ROSACluster" {
log.Info("InfrastructureRef is nil or not ROSACluster, skipping mapping")
return nil
}

return []ctrl.Request{
{
NamespacedName: types.NamespacedName{
Name: managedClusterRef.Name,
Namespace: managedClusterRef.Namespace,
Name: rosaClusterRef.Name,
Namespace: rosaClusterRef.Namespace,
},
},
}
Expand Down
9 changes: 6 additions & 3 deletions controlplane/rosa/api/v1beta2/rosacontrolplane_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ type RosaControlPlaneSpec struct { //nolint: maligned
// The AWS Region the cluster lives in.
Region *string `json:"region"`

// Openshift version, for example "openshift-v4.12.15".
// Openshift version, for example "openshift-v4.14.5".
Version *string `json:"version"`

// ControlPlaneEndpoint represents the endpoint used to communicate with the control plane.
Expand Down Expand Up @@ -463,8 +463,7 @@ type RosaControlPlaneStatus struct {
// uploaded kubernetes config-map.
// +optional
Initialized bool `json:"initialized"`
// Ready denotes that the AWSManagedControlPlane API Server is ready to
// receive requests and that the VPC infra is ready.
// Ready denotes that the ROSAControlPlane API Server is ready to receive requests.
// +kubebuilder:default=false
Ready bool `json:"ready"`
// ErrorMessage indicates that there is a terminal problem reconciling the
Expand All @@ -476,6 +475,10 @@ type RosaControlPlaneStatus struct {

// ID is the cluster ID given by ROSA.
ID *string `json:"id,omitempty"`
// ConsoleURL is the url for the openshift console.
ConsoleURL string `json:"consoleURL,omitempty"`
// OIDCEndpointURL is the endpoint url for the managed OIDC porvider.
OIDCEndpointURL string `json:"oidcEndpointURL,omitempty"`
}

// +kubebuilder:object:root=true
Expand Down
43 changes: 34 additions & 9 deletions controlplane/rosa/controllers/rosacontrolplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
Expand Down Expand Up @@ -134,6 +135,8 @@ func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Req
return ctrl.Result{}, nil
}

log = log.WithValues("cluster", klog.KObj(cluster))

if capiannotations.IsPaused(cluster, rosaControlPlane) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
Expand All @@ -144,6 +147,7 @@ func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Req
Cluster: cluster,
ControlPlane: rosaControlPlane,
ControllerName: strings.ToLower(rosaControlPlaneKind),
Logger: log,
})
if err != nil {
return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err)
Expand Down Expand Up @@ -191,7 +195,12 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc

if clusterID := cluster.ID(); clusterID != "" {
rosaScope.ControlPlane.Status.ID = &clusterID
if cluster.Status().State() == cmv1.ClusterStateReady {
rosaScope.ControlPlane.Status.ConsoleURL = cluster.Console().URL()
rosaScope.ControlPlane.Status.OIDCEndpointURL = cluster.AWS().STS().OIDCEndpointURL()
rosaScope.ControlPlane.Status.Ready = false

switch cluster.Status().State() {
case cmv1.ClusterStateReady:
conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition)
rosaScope.ControlPlane.Status.Ready = true

Expand All @@ -204,15 +213,25 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc
if err := r.reconcileKubeconfig(ctx, rosaScope, rosaClient, cluster); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile kubeconfig: %w", err)
}

return ctrl.Result{}, nil
case cmv1.ClusterStateError:
errorMessage := cluster.Status().ProvisionErrorMessage()
rosaScope.ControlPlane.Status.FailureMessage = &errorMessage

conditions.MarkFalse(rosaScope.ControlPlane,
rosacontrolplanev1.ROSAControlPlaneReadyCondition,
string(cluster.Status().State()),
clusterv1.ConditionSeverityError,
cluster.Status().ProvisionErrorCode())
// Cluster is in an unrecoverable state, returning nil error so that the request doesn't get requeued.
return ctrl.Result{}, nil
}

conditions.MarkFalse(rosaScope.ControlPlane,
rosacontrolplanev1.ROSAControlPlaneReadyCondition,
string(cluster.Status().State()),
clusterv1.ConditionSeverityInfo,
"")
cluster.Status().Description())

rosaScope.Info("waiting for cluster to become ready", "state", cluster.Status().State())
// Requeue so that status.ready is set to true when the cluster is fully created.
Expand Down Expand Up @@ -333,8 +352,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc

newCluster, err := rosaClient.CreateCluster(clusterSpec)
if err != nil {
rosaScope.Info("error", "error", err)
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
return ctrl.Result{}, fmt.Errorf("failed to create ROSA cluster: %w", err)
}

rosaScope.Info("cluster created", "state", newCluster.Status().State())
Expand All @@ -358,15 +376,22 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc
return ctrl.Result{}, err
}

if cluster != nil {
if cluster == nil {
// cluster is fully deleted, remove finalizer.
controllerutil.RemoveFinalizer(rosaScope.ControlPlane, ROSAControlPlaneFinalizer)
return ctrl.Result{}, nil
}

if cluster.Status().State() != cmv1.ClusterStateUninstalling {
if err := rosaClient.DeleteCluster(cluster.ID()); err != nil {
return ctrl.Result{}, err
}
}

controllerutil.RemoveFinalizer(rosaScope.ControlPlane, ROSAControlPlaneFinalizer)

return ctrl.Result{}, nil
rosaScope.ControlPlane.Status.Ready = false
rosaScope.Info("waiting for cluster to be deleted")
// Requeue to remove the finalizer when the cluster is fully deleted.
return ctrl.Result{RequeueAfter: time.Second * 60}, nil
}

func (r *ROSAControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope, rosaClient *rosa.RosaClient, cluster *cmv1.Cluster) error {
Expand Down
1 change: 1 addition & 0 deletions exp/controllers/rosamachinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ
ControlPlane: controlPlane,
MachinePool: machinePool,
RosaMachinePool: rosaMachinePool,
Logger: log,
})
if err != nil {
return ctrl.Result{}, errors.Wrap(err, "failed to create scope")
Expand Down

0 comments on commit aa0da32

Please sign in to comment.