From bb0cf1073a0643fabe237a42fb677e56e8460709 Mon Sep 17 00:00:00 2001 From: r Date: Sun, 3 Sep 2023 12:05:33 +0300 Subject: [PATCH] switch to structured logging update logging update tests logging update logging update logging --- controllers/openstackdataplane_controller.go | 405 ++++++++++++++++++ .../openstackdataplanenode_controller.go | 335 +++++++++++++++ .../openstackdataplanenodeset_controller.go | 31 +- main.go | 4 +- tests/functional/suite_test.go | 4 +- 5 files changed, 761 insertions(+), 18 deletions(-) create mode 100644 controllers/openstackdataplane_controller.go create mode 100644 controllers/openstackdataplanenode_controller.go diff --git a/controllers/openstackdataplane_controller.go b/controllers/openstackdataplane_controller.go new file mode 100644 index 000000000..53d958925 --- /dev/null +++ b/controllers/openstackdataplane_controller.go @@ -0,0 +1,405 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/go-logr/logr" + dataplanev1 "github.com/openstack-k8s-operators/dataplane-operator/api/v1beta1" + "github.com/openstack-k8s-operators/dataplane-operator/pkg/deployment" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + baremetalv1 "github.com/openstack-k8s-operators/openstack-baremetal-operator/api/v1beta1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// OpenStackDataPlaneReconciler reconciles a OpenStackDataPlane object +type OpenStackDataPlaneReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme +} + +// Getlogger returns a logger object with a prefix of "conroller.name" and aditional controller context fields +func (r *OpenStackDataPlaneReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("OpenStackDataPlane") +} + +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanes,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodes;openstackdataplaneroles,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanes/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanes/finalizers,verbs=update +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplaneservices,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete; + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the OpenStackDataPlane object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile +func (r *OpenStackDataPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + Log := r.GetLogger(ctx) + + // Fetch the OpenStackDataPlane instance + instance := &dataplanev1.OpenStackDataPlane{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + helper, err := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + if err != nil { + // helper might be nil, so can't use util.LogErrorForObject since it requires helper as first arg + Log.Error(err, fmt.Sprintf("unable to acquire helper for OpenStackDataPlane %s", instance.Name)) + return ctrl.Result{}, err + } + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { + // update the Ready condition based on the sub conditions + if instance.Status.Conditions.AllSubConditionIsTrue() { + instance.Status.Conditions.MarkTrue( + condition.ReadyCondition, dataplanev1.DataPlaneReadyMessage) + } else { + // something is not ready so reset the Ready condition + instance.Status.Conditions.MarkUnknown( + condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage) + // and recalculate it based on the state of the rest of the conditions + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + err := helper.PatchInstance(ctx, instance) + if err != nil { + _err = err + return + } + }() + + if instance.Status.Conditions == nil { + instance.InitConditions() + // Register overall status immediately to have an early feedback e.g. in the cli + return ctrl.Result{}, nil + } + + if instance.Status.Conditions.IsUnknown(dataplanev1.SetupReadyCondition) { + instance.Status.Conditions.MarkFalse(dataplanev1.SetupReadyCondition, condition.RequestedReason, condition.SeverityInfo, condition.ReadyInitMessage) + } + + ctrlResult, err := createOrPatchDataPlaneResources(ctx, instance, r, helper) + if err != nil { + return ctrl.Result{}, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // all setup tasks complete, mark SetupReadyCondition True + instance.Status.Conditions.MarkTrue(dataplanev1.SetupReadyCondition, condition.ReadyMessage) + + var deployErrors []string + shouldRequeue := false + if instance.Spec.DeployStrategy.Deploy { + Log.Info("Starting DataPlane deploy") + Log.Info("Set DeploymentReadyCondition false", "instance", instance) + instance.Status.Conditions.Set(condition.FalseCondition(condition.DeploymentReadyCondition, condition.RequestedReason, condition.SeverityInfo, condition.DeploymentReadyRunningMessage)) + roles := &dataplanev1.OpenStackDataPlaneRoleList{} + + listOpts := []client.ListOption{ + client.InNamespace(instance.GetNamespace()), + } + labelSelector := map[string]string{ + "openstackdataplane": instance.Name, + } + if len(labelSelector) > 0 { + labels := client.MatchingLabels(labelSelector) + listOpts = append(listOpts, labels) + } + err = r.Client.List(ctx, roles, listOpts...) + if err != nil { + return ctrl.Result{}, err + } + Log.Info("found roles", "total", len(roles.Items)) + if len(instance.Spec.Roles) > len(roles.Items) { + shouldRequeue = true + } + + for _, role := range roles.Items { + Log.Info("DataPlane deploy", "role.Name", role.Name) + if role.Spec.DataPlane != instance.Name { + err = fmt.Errorf("role %s: role.DataPlane does not match with role.Label", role.Name) + deployErrors = append(deployErrors, "role.Name: "+role.Name+" error: "+err.Error()) + } + Log.Info("Role", "DeployStrategy.Deploy", role.Spec.DeployStrategy.Deploy, "Role.Namespace", instance.Namespace, "Role.Name", role.Name) + if !role.Spec.DeployStrategy.Deploy { + _, err := controllerutil.CreateOrPatch(ctx, helper.GetClient(), &role, func() error { + Log.Info("Reconciling Role", "Role.Namespace", instance.Namespace, "Role.Name", role.Name) + helper.GetLogger().Info("CreateOrPatch Role.DeployStrategy.Deploy", "Role.Namespace", instance.Namespace, "Role.Name", role.Name) + role.Spec.DeployStrategy.Deploy = instance.Spec.DeployStrategy.Deploy + if err != nil { + deployErrors = append(deployErrors, "role.Name: "+role.Name+" error: "+err.Error()) + } + return nil + }) + if err != nil { + deployErrors = append(deployErrors, "role.Name: "+role.Name+" error: "+err.Error()) + } + } + if !role.IsReady() { + Log.Info("Role", "IsReady", role.IsReady(), "Role.Namespace", instance.Namespace, "Role.Name", role.Name) + shouldRequeue = true + mirroredCondition := role.Status.Conditions.Mirror(condition.ReadyCondition) + if mirroredCondition != nil { + Log.Info("Role", "Status", mirroredCondition.Message, "Role.Namespace", instance.Namespace, "Role.Name", role.Name) + if condition.IsError(mirroredCondition) { + deployErrors = append(deployErrors, "role.Name: "+role.Name+" error: "+mirroredCondition.Message) + } + } + + } + } + } + + if len(deployErrors) > 0 { + util.LogErrorForObject(helper, err, fmt.Sprintf("Unable to deploy %s", instance.Name), instance) + err = fmt.Errorf(fmt.Sprintf("DeployDataplane error(s): %s", deployErrors)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if shouldRequeue { + Log.Info("one or more roles aren't ready, requeueing") + return ctrl.Result{RequeueAfter: time.Second * 5}, nil + } + if instance.Spec.DeployStrategy.Deploy && len(deployErrors) == 0 { + instance.Status.Deployed = true + Log.Info("Set DeploymentReadyCondition true", "instance", instance) + instance.Status.Conditions.Set(condition.TrueCondition(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage)) + } + + // Set DeploymentReadyCondition to False if it was unknown. + // Handles the case where the DataPlane is created with + // DeployStrategy.Deploy=false. + if instance.Status.Conditions.IsUnknown(condition.DeploymentReadyCondition) { + Log.Info("Set DeploymentReadyCondition false") + instance.Status.Conditions.Set(condition.FalseCondition(condition.DeploymentReadyCondition, condition.NotRequestedReason, condition.SeverityInfo, condition.DeploymentReadyInitMessage)) + } + + // Explicitly set instance.Spec.Deploy = false + // We don't want another deploy triggered by any reconcile request, it should + // only be triggered when the user (or another controller) specifically + // sets it to true. + instance.Spec.DeployStrategy.Deploy = false + + // Set DeploymentReadyCondition to False if it was unknown. + // Handles the case where the Node is created with + // DeployStrategy.Deploy=false. + if instance.Status.Conditions.IsUnknown(condition.DeploymentReadyCondition) { + Log.Info("Set DeploymentReadyCondition false") + instance.Status.Conditions.Set(condition.FalseCondition(condition.DeploymentReadyCondition, condition.NotRequestedReason, condition.SeverityInfo, condition.DeploymentReadyInitMessage)) + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OpenStackDataPlaneReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&dataplanev1.OpenStackDataPlane{}). + Owns(&dataplanev1.OpenStackDataPlaneNode{}). + Owns(&dataplanev1.OpenStackDataPlaneRole{}). + Complete(r) +} + +// createOrPatchDataPlaneResources - +func createOrPatchDataPlaneResources(ctx context.Context, instance *dataplanev1.OpenStackDataPlane, r *OpenStackDataPlaneReconciler, helper *helper.Helper) (ctrl.Result, error) { + // create DataPlaneRoles + roleManagedHostMap := make(map[string]map[string]baremetalv1.InstanceSpec) + err := createOrPatchDataPlaneRoles(ctx, instance, r, helper, roleManagedHostMap) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.ReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + + // Create DataPlaneNodes + err = createOrPatchDataPlaneNodes(ctx, instance, r, helper) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.ReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneNodeErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + + // Get All Nodes + nodes := &dataplanev1.OpenStackDataPlaneNodeList{} + listOpts := []client.ListOption{ + client.InNamespace(instance.GetNamespace()), + } + + err = helper.GetClient().List(ctx, nodes, listOpts...) + if err != nil { + return ctrl.Result{}, err + } + + if len(nodes.Items) < len(instance.Spec.Nodes) { + util.LogForObject(helper, "All nodes not yet created, requeueing", instance) + return ctrl.Result{RequeueAfter: time.Second * 10}, nil + } + + // Order the nodes based on Name + sort.SliceStable(nodes.Items, func(i, j int) bool { + return nodes.Items[i].Name < nodes.Items[j].Name + }) + + err = deployment.BuildBMHHostMap(ctx, helper, instance, nodes, roleManagedHostMap) + if err != nil { + return ctrl.Result{}, err + } + + // Patch the role again to provision the nodes + err = createOrPatchDataPlaneRoles(ctx, instance, r, helper, roleManagedHostMap) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.ReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// createOrPatchDataPlaneNodes Create or Patch DataPlaneNodes +func createOrPatchDataPlaneNodes(ctx context.Context, instance *dataplanev1.OpenStackDataPlane, r *OpenStackDataPlaneReconciler, helper *helper.Helper) error { + Log := r.GetLogger(ctx) + client := helper.GetClient() + + for nodeName, nodeSpec := range instance.Spec.Nodes { + Log.Info("CreateDataPlaneNode", "nodeName", nodeName) + node := &dataplanev1.OpenStackDataPlaneNode{ + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: instance.Namespace, + }, + } + _, err := controllerutil.CreateOrPatch(ctx, client, node, func() error { + nodeSpec.DeepCopyInto(&node.Spec) + err := controllerutil.SetControllerReference(instance, node, helper.GetScheme()) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + + return nil +} + +// createOrPatchDataPlaneRoles Create or Patch DataPlaneRole +func createOrPatchDataPlaneRoles(ctx context.Context, + instance *dataplanev1.OpenStackDataPlane, r *OpenStackDataPlaneReconciler, helper *helper.Helper, + roleManagedHostMap map[string]map[string]baremetalv1.InstanceSpec) error { + client := helper.GetClient() + Log := r.GetLogger(ctx) + for roleName, roleSpec := range instance.Spec.Roles { + role := &dataplanev1.OpenStackDataPlaneRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: instance.Namespace, + }, + } + err := client.Get( + ctx, types.NamespacedName{Name: roleName, Namespace: instance.Namespace}, role) + + if err != nil && !k8s_errors.IsNotFound(err) { + return err + } + + Log.Info("Create Or Patch DataPlaneRole", "roleName", roleName) + _, err = controllerutil.CreateOrPatch(ctx, client, role, func() error { + // role.Spec.DeployStrategy is explicitly omitted. Otherwise, it + // could get reset to False, and if the DataPlane deploy sets it to + // True, the DataPlane and DataPlaneRole controllers will be stuck + // looping trying to reconcile. + role.Spec.DataPlane = instance.Name + role.Spec.NodeTemplate = roleSpec.NodeTemplate + role.Spec.NetworkAttachments = roleSpec.NetworkAttachments + role.Spec.Env = roleSpec.Env + role.Spec.Services = roleSpec.Services + hostMap, ok := roleManagedHostMap[roleName] + if ok { + bmsTemplate := roleSpec.BaremetalSetTemplate.DeepCopy() + bmsTemplate.BaremetalHosts = hostMap + role.Spec.BaremetalSetTemplate = *bmsTemplate + } + err := controllerutil.SetControllerReference(instance, role, helper.GetScheme()) + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + return nil +} diff --git a/controllers/openstackdataplanenode_controller.go b/controllers/openstackdataplanenode_controller.go new file mode 100644 index 000000000..22f92a562 --- /dev/null +++ b/controllers/openstackdataplanenode_controller.go @@ -0,0 +1,335 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/go-logr/logr" + dataplanev1 "github.com/openstack-k8s-operators/dataplane-operator/api/v1beta1" + "github.com/openstack-k8s-operators/dataplane-operator/pkg/deployment" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + "github.com/openstack-k8s-operators/openstack-ansibleee-operator/api/v1alpha1" +) + +// OpenStackDataPlaneNodeReconciler reconciles a OpenStackDataPlaneNode object +type OpenStackDataPlaneNodeReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme +} + +// Getlogger returns a logger object with a prefix of "conroller.name" and aditional controller context fields +func (r *OpenStackDataPlaneNodeReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("OpenStackDataPlaneNode") +} + +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodes,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodes/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodes/finalizers,verbs=update +//+kubebuilder:rbac:groups=ansibleee.openstack.org,resources=openstackansibleees,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the OpenStackDataPlaneNode object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile +func (r *OpenStackDataPlaneNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + + Log := r.GetLogger(ctx) + Log.Info("Reconciling Node") + + // Fetch the OpenStackDataPlaneNode instance + instance := &dataplanev1.OpenStackDataPlaneNode{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + helper, _ := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + if err != nil { + return ctrl.Result{}, err + } + + instanceRole, err := r.GetInstanceRole(ctx, instance) + if err != nil { + return ctrl.Result{}, err + } + + if len(instance.Spec.Role) > 0 { + if instance.ObjectMeta.Labels == nil { + instance.ObjectMeta.Labels = make(map[string]string) + } + Log.Info(fmt.Sprintf("Adding label %s=%s", "openstackdataplanerole", instance.Spec.Role)) + instance.ObjectMeta.Labels["openstackdataplanerole"] = instance.Spec.Role + } else if instance.ObjectMeta.Labels != nil { + Log.Info(fmt.Sprintf("Removing label %s", "openstackdataplanerole")) + delete(instance.ObjectMeta.Labels, "openstackdataplanerole") + } + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { + // update the Ready condition based on the sub conditions + if instance.Status.Conditions.AllSubConditionIsTrue() || instanceRole.IsReady() { + instance.Status.Deployed = true + instance.Status.Conditions.MarkTrue( + condition.ReadyCondition, dataplanev1.DataPlaneNodeReadyMessage) + } else { + // something is not ready so reset the Ready condition + instance.Status.Conditions.MarkUnknown( + condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage) + // and recalculate it based on the state of the rest of the conditions + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + err := helper.PatchInstance(ctx, instance) + if err != nil { + Log.Error(err, "Error updating instance status conditions") + _err = err + return + } + }() + + // Initialize Status + if instance.Status.Conditions == nil { + instance.InitConditions(instanceRole) + // Register overall status immediately to have an early feedback e.g. + // in the cli + return ctrl.Result{}, nil + } + + if instance.Status.Conditions.IsUnknown(dataplanev1.SetupReadyCondition) { + instance.Status.Conditions.MarkFalse(dataplanev1.SetupReadyCondition, condition.RequestedReason, condition.SeverityInfo, condition.ReadyInitMessage) + } + ansibleSSHPrivateKeySecret := instance.Spec.Node.AnsibleSSHPrivateKeySecret + + _, result, err = secret.VerifySecret( + ctx, + types.NamespacedName{Namespace: instance.Namespace, Name: ansibleSSHPrivateKeySecret}, + []string{ + AnsibleSSHPrivateKey, + }, + helper.GetClient(), + time.Second*5, + ) + + if err != nil { + if (result != ctrl.Result{}) { + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + dataplanev1.InputReadyWaitingMessage, + "secret/"+ansibleSSHPrivateKeySecret) + } else { + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityWarning, + err.Error()) + } + return result, err + } + + // check if provided network attachments exist + for _, netAtt := range instance.Spec.NetworkAttachments { + _, err := nad.GetNADWithName(ctx, helper, netAtt, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + dataplanev1.InputReadyWaitingMessage, + "network-attachment-definition/"+netAtt) + return ctrl.Result{RequeueAfter: time.Second * 10}, fmt.Errorf("network-attachment-definition %s not found", netAtt) + } + instance.Status.Conditions.MarkFalse( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + } + + // all our input checks out so report InputReady + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + + nodeConfigMap, err := deployment.GenerateNodeInventory(ctx, helper, instance, instanceRole) + if err != nil { + util.LogErrorForObject(helper, err, fmt.Sprintf("Unable to generate inventory for %s", instance.Name), instance) + return ctrl.Result{}, err + } + + // all setup tasks complete, mark SetupReadyCondition True + instance.Status.Conditions.MarkTrue(dataplanev1.SetupReadyCondition, condition.ReadyMessage) + + Log.Info("Node", "DeployStrategy", instance.Spec.DeployStrategy.Deploy, "Node.Namespace", instance.Namespace, "Node.Name", instance.Name) + if instance.Spec.DeployStrategy.Deploy { + Log.Info("Starting DataPlaneNode deploy") + Log.Info("Set DeploymentReadyCondition false", "instance", instance) + instance.Status.Conditions.Set(condition.FalseCondition(condition.DeploymentReadyCondition, condition.RequestedReason, condition.SeverityInfo, condition.DeploymentReadyRunningMessage)) + nodes := &dataplanev1.OpenStackDataPlaneNodeList{ + Items: []dataplanev1.OpenStackDataPlaneNode{*instance}, + } + deployResult, err := deployment.Deploy( + ctx, helper, instance, nodes, + ansibleSSHPrivateKeySecret, nodeConfigMap, + &instance.Status, instance.GetAnsibleEESpec(*instanceRole), + deployment.GetServices(instance, instanceRole), instanceRole) + if err != nil { + util.LogErrorForObject(helper, err, fmt.Sprintf("Unable to deploy %s", instance.Name), instance) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ReadyCondition, + condition.ErrorReason, + condition.SeverityError, + dataplanev1.DataPlaneNodeErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if deployResult != nil { + result = *deployResult + return result, nil + } + + instance.Status.Deployed = true + Log.Info("Set DeploymentReadyCondition true", "instance", instance) + instance.Status.Conditions.Set(condition.TrueCondition(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage)) + + // Explicitly set instance.Spec.Deploy = false + // We don't want another deploy triggered by any reconcile request, it + // should only be triggered when the user (or another controller) + // specifically sets it to true. + instance.Spec.DeployStrategy.Deploy = false + + } + + // Set DeploymentReadyCondition to False if it was unknown. + // Handles the case where the Node is created with + // DeployStrategy.Deploy=false. + if instance.Status.Conditions.IsUnknown(condition.DeploymentReadyCondition) { + Log.Info("Set DeploymentReadyCondition false") + instance.Status.Conditions.Set(condition.FalseCondition(condition.DeploymentReadyCondition, condition.NotRequestedReason, condition.SeverityInfo, condition.DeploymentReadyInitMessage)) + } + + return result, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *OpenStackDataPlaneNodeReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + Log := r.GetLogger(ctx) + if err := mgr.GetFieldIndexer().IndexField(ctx, &dataplanev1.OpenStackDataPlaneNode{}, "spec.role", + func(rawObj client.Object) []string { + node := rawObj.(*dataplanev1.OpenStackDataPlaneNode) + return []string{node.Spec.Role} + }); err != nil { + return err + } + + roleWatcher := handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request { + var namespace string = obj.GetNamespace() + var roleName string = obj.GetName() + result := []reconcile.Request{} + + // Get all nodes for the role + nodes := &dataplanev1.OpenStackDataPlaneNodeList{} + + listOpts := []client.ListOption{ + client.InNamespace(namespace), + } + fields := client.MatchingFields{"spec.role": roleName} + listOpts = append(listOpts, fields) + err := r.Client.List(ctx, nodes, listOpts...) + if err != nil { + Log.Error(err, "Unable to retrieve Node CRs %v") + return nil + } + for _, node := range nodes.Items { + name := client.ObjectKey{ + Namespace: namespace, + Name: node.Name, + } + result = append(result, reconcile.Request{NamespacedName: name}) + } + return result + }) + + return ctrl.NewControllerManagedBy(mgr). + For(&dataplanev1.OpenStackDataPlaneNode{}). + Watches(&source.Kind{Type: &dataplanev1.OpenStackDataPlaneRole{}}, roleWatcher). + Owns(&v1alpha1.OpenStackAnsibleEE{}). + Owns(&corev1.ConfigMap{}). + Complete(r) +} + +// GetInstanceRole returns the role of a node based on the node's role name +func (r *OpenStackDataPlaneNodeReconciler) GetInstanceRole(ctx context.Context, instance *dataplanev1.OpenStackDataPlaneNode) (*dataplanev1.OpenStackDataPlaneRole, error) { + // Use the instances's role name to get its role object + var err error + instanceRole := &dataplanev1.OpenStackDataPlaneRole{} + if len(instance.Spec.Role) > 0 { + err = r.Client.Get(ctx, client.ObjectKey{ + Namespace: instance.Namespace, + Name: instance.Spec.Role, + }, instanceRole) + if err == nil { + err = instance.Validate(*instanceRole) + } + } + return instanceRole, err +} diff --git a/controllers/openstackdataplanenodeset_controller.go b/controllers/openstackdataplanenodeset_controller.go index 2f41a3e00..f9244cbc8 100644 --- a/controllers/openstackdataplanenodeset_controller.go +++ b/controllers/openstackdataplanenodeset_controller.go @@ -93,7 +93,11 @@ type OpenStackDataPlaneNodeSetReconciler struct { client.Client Kclient kubernetes.Interface Scheme *runtime.Scheme - Log logr.Logger +} + +// Getlogger returns a logger object with a prefix of "conroller.name" and aditional controller context fields +func (r *OpenStackDataPlaneNodeSetReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("OpenStackDataPlaneRole") } //+kubebuilder:rbac:groups=dataplane.openstack.org,resources=openstackdataplanenodesets,verbs=get;list;watch;create;update;patch;delete @@ -131,8 +135,8 @@ type OpenStackDataPlaneNodeSetReconciler struct { // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.12.2/pkg/reconcile func (r *OpenStackDataPlaneNodeSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { - logger := log.FromContext(ctx) - logger.Info("Reconciling NodeSet") + Log := r.GetLogger(ctx) + Log.Info("Reconciling Role") // Fetch the OpenStackDataPlaneNodeSet instance instance := &dataplanev1.OpenStackDataPlaneNodeSet{} @@ -153,7 +157,7 @@ func (r *OpenStackDataPlaneNodeSetReconciler) Reconcile(ctx context.Context, req r.Client, r.Kclient, r.Scheme, - logger, + Log, ) if err != nil { return ctrl.Result{}, err @@ -174,7 +178,7 @@ func (r *OpenStackDataPlaneNodeSetReconciler) Reconcile(ctx context.Context, req } err := helper.PatchInstance(ctx, instance) if err != nil { - logger.Error(err, "Error updating instance status conditions") + Log.Error(err, "Error updating instance status conditions") _err = err return } @@ -274,7 +278,7 @@ func (r *OpenStackDataPlaneNodeSetReconciler) Reconcile(ctx context.Context, req if instance.Status.Deployed && instance.DeletionTimestamp.IsZero() { // The role is already deployed and not being deleted, so reconciliation // is already complete. - logger.Info("NodeSet already deployed", "instance", instance) + Log.Info("Role already deployed", "instance") return ctrl.Result{}, nil } @@ -292,7 +296,7 @@ func (r *OpenStackDataPlaneNodeSetReconciler) Reconcile(ctx context.Context, req // Set DeploymentReadyCondition to False if it was unknown. // Handles the case where the NodeSet is created, but not yet deployed. if instance.Status.Conditions.IsUnknown(condition.DeploymentReadyCondition) { - logger.Info("Set DeploymentReadyCondition false") + Log.Info("Set DeploymentReadyCondition false") instance.Status.Conditions.MarkFalse(condition.DeploymentReadyCondition, condition.NotRequestedReason, condition.SeverityInfo, condition.DeploymentReadyInitMessage) @@ -300,15 +304,15 @@ func (r *OpenStackDataPlaneNodeSetReconciler) Reconcile(ctx context.Context, req deploymentExists, isDeploymentReady, err := checkDeployment(helper, req) if err != nil { - logger.Error(err, "Unable to get deployed OpenStackDataPlaneDeployments.") + Log.Error(err, "Unable to get deployed OpenStackDataPlaneDeployments.") return ctrl.Result{}, err } if isDeploymentReady { - logger.Info("Set NodeSet DeploymentReadyCondition true") + Log.Info("Set NodeSet DeploymentReadyCondition true") instance.Status.Conditions.MarkTrue(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage) } else if deploymentExists { - logger.Info("Set NodeSet DeploymentReadyCondition false") + Log.Info("Set NodeSet DeploymentReadyCondition false") instance.Status.Conditions.MarkFalse(condition.DeploymentReadyCondition, condition.RequestedReason, condition.SeverityInfo, condition.DeploymentReadyRunningMessage) @@ -341,7 +345,8 @@ func checkDeployment(helper *helper.Helper, } // SetupWithManager sets up the controller with the Manager. -func (r *OpenStackDataPlaneNodeSetReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *OpenStackDataPlaneNodeSetReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + Log := r.GetLogger(ctx) reconcileFunction := handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request { result := []reconcile.Request{} @@ -353,8 +358,8 @@ func (r *OpenStackDataPlaneNodeSetReconciler) SetupWithManager(mgr ctrl.Manager) listOpts := []client.ListOption{ client.InNamespace(o.GetNamespace()), } - if err := r.Client.List(context.Background(), nodeSets, listOpts...); err != nil { - r.Log.Error(err, "Unable to retrieve OpenStackDataPlaneNodeSetList %w") + if err := r.Client.List(ctx, nodeSets, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve OpenStackDataPlaneNodeSetList %w") return nil } diff --git a/main.go b/main.go index e6b0368f9..4caa96e50 100644 --- a/main.go +++ b/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "flag" "os" "strconv" @@ -124,8 +125,7 @@ func main() { Client: mgr.GetClient(), Scheme: mgr.GetScheme(), Kclient: kclient, - Log: ctrl.Log.WithName("controllers").WithName("OpenStackDataPlaneNodeSet"), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(context.Background(), mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "OpenStackDataPlaneNodeSet") os.Exit(1) } diff --git a/tests/functional/suite_test.go b/tests/functional/suite_test.go index c33126572..0b5facd5e 100644 --- a/tests/functional/suite_test.go +++ b/tests/functional/suite_test.go @@ -169,15 +169,13 @@ var _ = BeforeSuite(func() { Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), Kclient: kclient, - Log: ctrl.Log.WithName("controllers").WithName("DataplaneNodeSet"), - }).SetupWithManager(k8sManager) + }).SetupWithManager(context.Background(), k8sManager) Expect(err).ToNot(HaveOccurred()) err = (&controllers.OpenStackDataPlaneDeploymentReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), Kclient: kclient, - Log: ctrl.Log.WithName("controllers").WithName("DataplaneDeployment"), }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred())