diff --git a/api/v1beta1/barbicanworker_types.go b/api/v1beta1/barbicanworker_types.go index c73ca07..918e9f2 100644 --- a/api/v1beta1/barbicanworker_types.go +++ b/api/v1beta1/barbicanworker_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1beta1 import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,6 +34,7 @@ type BarbicanWorkerSpec struct { BarbicanTemplate `json:",inline"` BarbicanWorkerTemplate `json:",inline"` + DatabaseHostname string `json:"databaseHostname"` TransportURLSecret string `json:"transportURLSecret,omitempty"` } @@ -41,6 +43,23 @@ type BarbicanWorkerSpec struct { type BarbicanWorkerStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file + // ReadyCount of barbican API instances + ReadyCount int32 `json:"readyCount,omitempty"` + + // Map of hashes to track e.g. job status + Hash map[string]string `json:"hash,omitempty"` + + // API endpoint + //APIEndpoints map[string]string `json:"apiEndpoint,omitempty"` + + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + // NetworkAttachments status of the deployment pods + NetworkAttachments map[string][]string `json:"networkAttachments,omitempty"` + + // Barbican Database Hostname + DatabaseHostname string `json:"databaseHostname,omitempty"` } //+kubebuilder:object:root=true diff --git a/api/v1beta1/conditions.go b/api/v1beta1/conditions.go index 7ec075a..9061c0f 100644 --- a/api/v1beta1/conditions.go +++ b/api/v1beta1/conditions.go @@ -19,6 +19,8 @@ const ( BarbicanAPIReadyErrorMessage = "BarbicanAPI error occured %s" // BarbicanWorkerReadyInitMessage - BarbicanWorkerReadyInitMessage = "BarbicanWorker not started" + // BarbicanWorkerReadyErrorMessage - + BarbicanWorkerReadyErrorMessage = "BarbicanWorker error occured %s" // BarbicanRabbitMQTransportURLReadyRunningMessage - BarbicanRabbitMQTransportURLReadyRunningMessage = "BarbicanRabbitMQTransportURL creation in progress" diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 7adc95a..c444afe 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -394,7 +394,7 @@ func (in *BarbicanWorker) DeepCopyInto(out *BarbicanWorker) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BarbicanWorker. @@ -467,6 +467,35 @@ func (in *BarbicanWorkerSpec) DeepCopy() *BarbicanWorkerSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BarbicanWorkerStatus) DeepCopyInto(out *BarbicanWorkerStatus) { *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BarbicanWorkerStatus. diff --git a/config/crd/bases/barbican.openstack.org_barbicanworkers.yaml b/config/crd/bases/barbican.openstack.org_barbicanworkers.yaml index a305a9d..2dc2c92 100644 --- a/config/crd/bases/barbican.openstack.org_barbicanworkers.yaml +++ b/config/crd/bases/barbican.openstack.org_barbicanworkers.yaml @@ -54,6 +54,8 @@ spec: items: type: string type: array + databaseHostname: + type: string databaseInstance: description: 'MariaDB instance name TODO(dmendiza): Is this comment right? Right now required by the maridb-operator to get the credentials @@ -207,12 +209,78 @@ spec: type: string required: - containerImage + - databaseHostname - databaseInstance - rabbitMqClusterName - serviceAccount type: object status: description: BarbicanWorkerStatus defines the observed state of BarbicanWorker + properties: + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. This should be when the underlying condition changed. + If that is not known, then using the time when the API field + changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: Severity provides a classification of Reason code, + so the current situation is immediately understandable and + could act accordingly. It is meant for situations where Status=False + and it should be indicated if it is just informational, warning + (next reconciliation might fix it) or an error (e.g. DB create + issue and no actions to automatically resolve the issue can/should + be done). For conditions where Status=Unknown or Status=True + the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + databaseHostname: + description: Barbican Database Hostname + type: string + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + readyCount: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file ReadyCount of barbican API instances' + format: int32 + type: integer type: object type: object served: true diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 685f574..92b5490 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -79,32 +79,6 @@ rules: - get - patch - update -- apiGroups: - - barbican.openstack.org - resources: - - barbicanworkers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - barbican.openstack.org - resources: - - barbicanworkers/finalizers - verbs: - - update -- apiGroups: - - barbican.openstack.org - resources: - - barbicanworkers/status - verbs: - - get - - patch - - update - apiGroups: - batch resources: diff --git a/config/samples/barbican_v1beta1_barbican.yaml b/config/samples/barbican_v1beta1_barbican.yaml index 3a9123d..d4421c0 100644 --- a/config/samples/barbican_v1beta1_barbican.yaml +++ b/config/samples/barbican_v1beta1_barbican.yaml @@ -56,5 +56,3 @@ spec: defautlConfigOverwrite: optional_policy.json: | {"some": "custom policy"} - networkAttachments: - - internal diff --git a/controllers/barbican_controller.go b/controllers/barbican_controller.go index ad47041..58fb8b5 100644 --- a/controllers/barbican_controller.go +++ b/controllers/barbican_controller.go @@ -313,6 +313,20 @@ func (r *BarbicanReconciler) reconcileNormal(ctx context.Context, instance *barb r.Log.Info(fmt.Sprintf("Deployment %s successfully reconciled - operation: %s", instance.Name, string(op))) } + // create or update Barbican Worker deployment + _, op, err = r.workerDeploymentCreateOrUpdate(ctx, instance, helper) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + barbicanv1beta1.BarbicanWorkerReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + barbicanv1beta1.BarbicanWorkerReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if op != controllerutil.OperationResultNone { + r.Log.Info(fmt.Sprintf("Deployment %s successfully reconciled - operation: %s", instance.Name, string(op))) + } // TODO(dmendiza): Handle API endpoints // TODO(dmendiza): Understand what Glance is doing with the API conditions and maybe do it here too @@ -505,6 +519,42 @@ func (r *BarbicanReconciler) apiDeploymentCreateOrUpdate(ctx context.Context, in return deployment, op, err } +func (r *BarbicanReconciler) workerDeploymentCreateOrUpdate(ctx context.Context, instance *barbicanv1beta1.Barbican, helper *helper.Helper) (*barbicanv1beta1.BarbicanWorker, controllerutil.OperationResult, error) { + + r.Log.Info(fmt.Sprintf("Creating barbican Worker spec. transporturlsecret: '%s'", instance.Status.TransportURLSecret)) + r.Log.Info(fmt.Sprintf("database hostname: '%s'", instance.Status.DatabaseHostname)) + workerSpec := barbicanv1beta1.BarbicanWorkerSpec{ + BarbicanTemplate: instance.Spec.BarbicanTemplate, + BarbicanWorkerTemplate: instance.Spec.BarbicanWorker, + DatabaseHostname: instance.Status.DatabaseHostname, + TransportURLSecret: instance.Status.TransportURLSecret, + } + + deployment := &barbicanv1beta1.BarbicanWorker{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-worker", instance.Name), + Namespace: instance.Namespace, + }, + } + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, deployment, func() error { + r.Log.Info("Setting deployment spec to be workerspec") + deployment.Spec = workerSpec + + err := controllerutil.SetControllerReference(instance, deployment, r.Scheme) + if err != nil { + return err + } + + // Add a finalizer to prevent user from manually removing child BarbicanAPI + controllerutil.AddFinalizer(deployment, helper.GetFinalizer()) + + return nil + }) + + return deployment, op, err +} + func (r *BarbicanReconciler) reconcileInit( ctx context.Context, instance *barbicanv1beta1.Barbican, diff --git a/controllers/barbicanworker_controller.go b/controllers/barbicanworker_controller.go index 4c11490..bb0ccf2 100644 --- a/controllers/barbicanworker_controller.go +++ b/controllers/barbicanworker_controller.go @@ -18,18 +18,60 @@ package controllers import ( "context" + "fmt" + "time" + "github.com/go-logr/logr" + //routev1 "github.com/openshift/api/route/v1" + barbicanv1beta1 "github.com/openstack-k8s-operators/barbican-operator/api/v1beta1" + "github.com/openstack-k8s-operators/barbican-operator/pkg/barbican" + "github.com/openstack-k8s-operators/barbican-operator/pkg/barbicanworker" + //keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + //"github.com/openstack-k8s-operators/lib-common/modules/common/deployment" + //"github.com/openstack-k8s-operators/lib-common/modules/common/endpoint" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/labels" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/log" - "github.com/go-logr/logr" - barbicanv1beta1 "github.com/openstack-k8s-operators/barbican-operator/api/v1beta1" + //appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" ) -// BarbicanWorkerReconciler reconciles a BarbicanWorker object +/* +// GetClient - +func (r *BarbicanAPIReconciler) GetClient() client.Client { + return r.Client +} + +// GetKClient - +func (r *BarbicanAPIReconciler) GetKClient() kubernetes.Interface { + return r.Kclient +} + +// GetLogger - +func (r *BarbicanAPIReconciler) GetLogger() logr.Logger { + return r.Log +} + +// GetScheme - +func (r *BarbicanAPIReconciler) GetScheme() *runtime.Scheme { + return r.Scheme +} +*/ + +// BarbicanAPIReconciler reconciles a BarbicanAPI object type BarbicanWorkerReconciler struct { client.Client Kclient kubernetes.Interface @@ -37,24 +79,560 @@ type BarbicanWorkerReconciler struct { Scheme *runtime.Scheme } -//+kubebuilder:rbac:groups=barbican.openstack.org,resources=barbicanworkers,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=barbican.openstack.org,resources=barbicanworkers/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=barbican.openstack.org,resources=barbicanworkers/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the BarbicanWorker object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *BarbicanWorkerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +//+kubebuilder:rbac:groups=barbican.openstack.org,resources=barbicanapis,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=barbican.openstack.org,resources=barbicanapis/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=barbican.openstack.org,resources=barbicanapis/finalizers,verbs=update + +// Reconcile BarbicanAPI +func (r *BarbicanWorkerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { _ = log.FromContext(ctx) - // TODO(user): your logic here + instance := &barbicanv1beta1.BarbicanWorker{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Object not found + return ctrl.Result{}, err + } + } + r.Log.Info(fmt.Sprintf("Reconciling BarbicanWorker %s", instance.Name)) + + helper, err := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + r.Log, + ) + if err != nil { + return ctrl.Result{}, err + } + + // Always patch the instance when this function exits + defer func() { + if instance.Status.Conditions.AllSubConditionIsTrue() { + instance.Status.Conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) + } else { + instance.Status.Conditions.MarkUnknown( + condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage) + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + err := helper.PatchInstance(ctx, instance) + if err != nil { + _err = err + return + } + }() + + r.Log.Info(fmt.Sprintf("Add finalizer %s", instance.Name)) + // Add Finalizer + if instance.DeletionTimestamp.IsZero() && controllerutil.AddFinalizer(instance, helper.GetFinalizer()) { + return ctrl.Result{}, nil + } + + r.Log.Info(fmt.Sprintf("initilize %s", instance.Name)) + + // Initialize Conditions + if instance.Status.Conditions == nil { + instance.Status.Conditions = condition.Conditions{} + cl := condition.CreateList( + //condition.UnknownCondition(condition.ExposeServiceReadyCondition, condition.InitReason, condition.ExposeServiceReadyInitMessage), + condition.UnknownCondition(condition.InputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + condition.UnknownCondition(condition.ServiceConfigReadyCondition, condition.InitReason, condition.ServiceConfigReadyInitMessage), + condition.UnknownCondition(condition.DeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage), + // right now we have no dedicated KeystoneServiceReadyInitMessage and KeystoneEndpointReadyInitMessage + //condition.UnknownCondition(condition.KeystoneServiceReadyCondition, condition.InitReason, ""), + //condition.UnknownCondition(condition.KeystoneEndpointReadyCondition, condition.InitReason, ""), + condition.UnknownCondition(condition.NetworkAttachmentsReadyCondition, condition.InitReason, condition.NetworkAttachmentsReadyInitMessage), + ) + r.Log.Info(fmt.Sprintf("calling init %s", instance.Name)) + instance.Status.Conditions.Init(&cl) + r.Log.Info(fmt.Sprintf("post init %s", instance.Name)) + + // TODO: (alee) this is ssupposed to exit here - but then it never comes back! + // Register overall status immediately to have an early feedback e.g. in the cli + return ctrl.Result{}, nil + } + r.Log.Info(fmt.Sprintf("post initiialize %s", instance.Name)) + + if instance.Status.Hash == nil { + instance.Status.Hash = map[string]string{} + } + //if instance.Status.APIEndpoints == nil { + // instance.Status.APIEndpoints = map[string]string{} + //} + //if instance.Status.ServiceIDs == nil { + // instance.Status.ServiceIDs = map[string]string{} + //} + if instance.Status.NetworkAttachments == nil { + instance.Status.NetworkAttachments = map[string][]string{} + } + + // Handle service delete + if !instance.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, instance, helper) + } + + r.Log.Info(fmt.Sprintf("Calling reconcile normal %s", instance.Name)) + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, instance, helper) +} + +func (r *BarbicanWorkerReconciler) getSecret( + ctx context.Context, + h *helper.Helper, + instance *barbicanv1beta1.BarbicanWorker, + secretName string, + envVars *map[string]env.Setter, +) (ctrl.Result, error) { + secret, hash, err := secret.GetSecret(ctx, h, secretName, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.InputReadyWaitingMessage)) + return ctrl.Result{RequeueAfter: time.Duration(10) * time.Second}, fmt.Errorf("Secret %s not found", secretName) + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + // Add a prefix to the var name to avoid accidental collision with other non-secret + // vars. The secret names themselves will be unique. + (*envVars)["secret-"+secret.Name] = env.SetValue(hash) + // env[secret-osp-secret] = hash? + + return ctrl.Result{}, nil +} + +func (r *BarbicanWorkerReconciler) createHashOfInputHashes( + ctx context.Context, + instance *barbicanv1beta1.BarbicanWorker, + envVars map[string]env.Setter, +) (string, bool, error) { + var hashMap map[string]string + changed := false + mergedMapVars := env.MergeEnvs([]corev1.EnvVar{}, envVars) + hash, err := util.ObjectHash(mergedMapVars) + if err != nil { + return hash, changed, err + } + r.Log.Info("[Worker] ON createHashOfInputHashes") + if hashMap, changed = util.SetHash(instance.Status.Hash, common.InputHashName, hash); changed { + instance.Status.Hash = hashMap + r.Log.Info(fmt.Sprintf("Input maps hash %s - %s", common.InputHashName, hash)) + } + return hash, changed, nil +} + +// generateServiceConfigs - create Secret which holds the service configuration +// TODO add DefaultConfigOverwrite +func (r *BarbicanWorkerReconciler) generateServiceConfigs( + ctx context.Context, + h *helper.Helper, + instance *barbicanv1beta1.BarbicanWorker, + envVars *map[string]env.Setter, +) error { + r.Log.Info("[Worker] generateServiceConfigs - reconciling") + labels := labels.GetLabels(instance, labels.GetGroupLabel(barbican.ServiceName), map[string]string{}) + + // customData hold any customization for the service. + customData := map[string]string{common.CustomServiceConfigFileName: instance.Spec.CustomServiceConfig} + + r.Log.Info(fmt.Sprintf("[Worker] instance type %s", instance.GetObjectKind().GroupVersionKind().Kind)) + + for key, data := range instance.Spec.DefaultConfigOverwrite { + customData[key] = data + } + + //keystoneAPI, err := keystonev1.GetKeystoneAPI(ctx, h, instance.Namespace, map[string]string{}) + // KeystoneAPI not available we should not aggregate the error and continue + //if err != nil { + // return err + //} + //keystoneInternalURL, err := keystoneAPI.GetEndpoint(endpoint.EndpointInternal) + //if err != nil { + // return err + //} + + ospSecret, _, err := secret.GetSecret(ctx, h, instance.Spec.Secret, instance.Namespace) + if err != nil { + return err + } + + transportURLSecret, _, err := secret.GetSecret(ctx, h, instance.Spec.TransportURLSecret, instance.Namespace) + if err != nil { + return err + } + + templateParameters := map[string]interface{}{ + "DatabaseConnection": fmt.Sprintf("mysql+pymysql://%s:%s@%s/%s", + instance.Spec.DatabaseUser, + string(ospSecret.Data[instance.Spec.PasswordSelectors.Database]), + instance.Spec.DatabaseHostname, + barbican.DatabaseName, + ), + //"KeystoneAuthURL": keystoneInternalURL, + //"ServicePassword": string(ospSecret.Data[instance.Spec.PasswordSelectors.Service]), + //"ServiceUser": instance.Spec.ServiceUser, + //"ServiceURL": "https://barbican.openstack.svc:9311", + "TransportURL": string(transportURLSecret.Data["transport_url"]), + "LogFile": fmt.Sprintf("%s%s.log", barbican.BarbicanLogPath, instance.Name), + } + + return GenerateConfigsGeneric(ctx, h, instance, envVars, templateParameters, customData, labels, false) +} + +func (r *BarbicanWorkerReconciler) reconcileInit( + ctx context.Context, + instance *barbicanv1beta1.BarbicanWorker, + helper *helper.Helper, + serviceLabels map[string]string, +) (ctrl.Result, error) { + r.Log.Info(fmt.Sprintf("[Worker] Reconciling Service '%s' init", instance.Name)) + + // + // expose the service (create service, route and return the created endpoint URLs) + // + //ports := map[endpoint.Endpoint]endpoint.Data{} + //ports[endpoint.EndpointInternal] = endpoint.Data{ + // Port: barbican.BarbicanInternalPort, + //} + //ports[endpoint.EndpointPublic] = endpoint.Data{ + // Port: barbican.BarbicanPublicPort, + //} + + /* + for _, metallbcfg := range instance.Spec.ExternalEndpoints { + portCfg := ports[metallbcfg.Endpoint] + portCfg.MetalLB = &endpoint.MetalLBData{ + IPAddressPool: metallbcfg.IPAddressPool, + SharedIP: metallbcfg.SharedIP, + SharedIPKey: metallbcfg.SharedIPKey, + LoadBalancerIPs: metallbcfg.LoadBalancerIPs, + } + + ports[metallbcfg.Endpoint] = portCfg + } + + apiEndpoints, ctrlResult, err := endpoint.ExposeEndpoints( + ctx, + helper, + barbican.ServiceName, + serviceLabels, + ports, + time.Duration(5)*time.Second, + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ExposeServiceReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ExposeServiceReadyErrorMessage, + err.Error())) + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ExposeServiceReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.ExposeServiceReadyRunningMessage)) + return ctrlResult, nil + } + */ + //instance.Status.Conditions.MarkTrue(condition.ExposeServiceReadyCondition, condition.ExposeServiceReadyMessage) + + // + // Update instance status with service endpoint url from route host information + // + // TODO: need to support https default here + //if instance.Status.APIEndpoints == nil { + // instance.Status.APIEndpoints = map[string]string{} + //} + //instance.Status.APIEndpoints = apiEndpoints + + // expose service - end + + // + // create keystone endpoints + // + + //ksEndpointSpec := keystonev1.KeystoneEndpointSpec{ + // ServiceName: barbican.ServiceName, + // Endpoints: instance.Status.APIEndpoints, + //} + + /* + ksSvc := keystonev1.NewKeystoneEndpoint(instance.Name, instance.Namespace, ksEndpointSpec, serviceLabels, time.Duration(10)*time.Second) + ctrlResult, err = ksSvc.CreateOrPatch(ctx, helper) + if err != nil { + return ctrlResult, err + } + + // mirror the Status, Reason, Severity and Message of the latest keystoneendpoint condition + // into a local condition with the type condition.KeystoneEndpointReadyCondition + c := ksSvc.GetConditions().Mirror(condition.KeystoneEndpointReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + + if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // + // create keystone endpoints - end + // + + */ + r.Log.Info(fmt.Sprintf("[Worker] Reconciled Service '%s' init successfully", instance.Name)) + return ctrl.Result{}, nil +} + +func (r *BarbicanWorkerReconciler) reconcileUpdate(ctx context.Context, instance *barbicanv1beta1.BarbicanWorker, helper *helper.Helper) (ctrl.Result, error) { + r.Log.Info(fmt.Sprintf("[Worker] Reconciling Service '%s' update", instance.Name)) + + // TODO: should have minor update tasks if required + // - delete dbsync hash from status to rerun it? + + r.Log.Info(fmt.Sprintf("[Worker] Reconciled Service '%s' update successfully", instance.Name)) + return ctrl.Result{}, nil +} + +func (r *BarbicanWorkerReconciler) reconcileUpgrade(ctx context.Context, instance *barbicanv1beta1.BarbicanWorker, helper *helper.Helper) (ctrl.Result, error) { + r.Log.Info(fmt.Sprintf("[Worker] Reconciling Service '%s' upgrade", instance.Name)) + + // TODO: should have major version upgrade tasks + // -delete dbsync hash from status to rerun it? + + r.Log.Info(fmt.Sprintf("[Worker] Reconciled Service '%s' upgrade successfully", instance.Name)) + return ctrl.Result{}, nil +} + +func (r *BarbicanWorkerReconciler) reconcileDelete(ctx context.Context, instance *barbicanv1beta1.BarbicanWorker, helper *helper.Helper) (ctrl.Result, error) { + r.Log.Info(fmt.Sprintf("Reconciling Service '%s' delete", instance.Name)) + + // Remove the finalizer from our KeystoneEndpoint CR + //keystoneEndpoint, err := keystonev1.GetKeystoneEndpointWithName(ctx, helper, instance.Name, instance.Namespace) + //if err != nil && !k8s_errors.IsNotFound(err) { + // return ctrl.Result{}, err + //} + + /* + if err == nil { + if controllerutil.RemoveFinalizer(keystoneEndpoint, helper.GetFinalizer()) { + err = r.Update(ctx, keystoneEndpoint) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + util.LogForObject(helper, "Removed finalizer from our KeystoneEndpoint", instance) + } + } + */ + + // Service is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(instance, helper.GetFinalizer()) + r.Log.Info(fmt.Sprintf("Reconciled Service '%s' delete successfully", instance.Name)) + + return ctrl.Result{}, nil +} + +func (r *BarbicanWorkerReconciler) reconcileNormal(ctx context.Context, instance *barbicanv1beta1.BarbicanWorker, helper *helper.Helper) (ctrl.Result, error) { + r.Log.Info(fmt.Sprintf("[Worker] Reconciling Service '%s'", instance.Name)) + + configVars := make(map[string]env.Setter) + + // + // check for required OpenStack secret holding passwords for service/admin user and add hash to the vars map + // + r.Log.Info(fmt.Sprintf("[Worker] Get secret 1 '%s'", instance.Name)) + ctrlResult, err := r.getSecret(ctx, helper, instance, instance.Spec.Secret, &configVars) + if err != nil { + return ctrlResult, err + } + + // + // check for required TransportURL secret holding transport URL string + // + r.Log.Info(fmt.Sprintf("[Worker] Get secret 2 '%s'", instance.Spec.TransportURLSecret)) + ctrlResult, err = r.getSecret(ctx, helper, instance, instance.Spec.TransportURLSecret, &configVars) + if err != nil { + return ctrlResult, err + } + + // TODO (alee) cinder has some code here to retrieve secrets from the parent CR + // Seems like we may want this instead + + // TODO (alee) cinder has some code to retrieve CustomServiceConfigSecrets + // This seems like a great place to store things like HSM passwords + + r.Log.Info(fmt.Sprintf("[Worker] Got secrets '%s'", instance.Name)) + // + // create custom config for this barbican service + // + err = r.generateServiceConfigs(ctx, helper, instance, &configVars) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + r.Log.Info(fmt.Sprintf("[Worker] Getting input hash '%s'", instance.Name)) + // + // create hash over all the different input resources to identify if any those changed + // and a restart/recreate is required. + // + inputHash, hashChanged, err := r.createHashOfInputHashes(ctx, instance, configVars) + if err != nil { + r.Log.Info("[Worker] ERR") + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } else if hashChanged { + r.Log.Info("[Worker] HAS CHANGED") + // Hash changed and instance status should be updated (which will be done by main defer func), + // so we need to return and reconcile again + //return ctrl.Result{}, nil + } + r.Log.Info("[Worker] CONTINUE") + instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) + + r.Log.Info(fmt.Sprintf("[Worker] Getting service labels '%s'", instance.Name)) + serviceLabels := map[string]string{ + common.AppSelector: fmt.Sprintf(barbican.ServiceName), + } + + r.Log.Info(fmt.Sprintf("[Worker] Getting networks '%s'", instance.Name)) + // networks to attach to + for _, netAtt := range instance.Spec.NetworkAttachments { + _, err := nad.GetNADWithName(ctx, helper, netAtt, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.NetworkAttachmentsReadyWaitingMessage, + netAtt)) + return ctrl.Result{RequeueAfter: time.Second * 10}, fmt.Errorf("network-attachment-definition %s not found", netAtt) + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + } + + r.Log.Info(fmt.Sprintf("[Worker] Getting service annotations '%s'", instance.Name)) + serviceAnnotations, err := nad.CreateNetworksAnnotation(instance.Namespace, instance.Spec.NetworkAttachments) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed create network annotation from %s: %w", + instance.Spec.NetworkAttachments, err) + } + r.Log.Info(fmt.Sprintf("[DELETE] %s", serviceAnnotations)) + + // Handle service init + ctrlResult, err = r.reconcileInit(ctx, instance, helper, serviceLabels) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // Handle service update + ctrlResult, err = r.reconcileUpdate(ctx, instance, helper) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // Handle service upgrade + ctrlResult, err = r.reconcileUpgrade(ctx, instance, helper) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + r.Log.Info(fmt.Sprintf("[Worker] Defining deployment '%s'", instance.Name)) + // Define a new Deployment object + deplDef := barbicanworker.Deployment(instance, inputHash, serviceLabels, serviceAnnotations) + r.Log.Info(fmt.Sprintf("[Worker] Getting deployment '%s'", instance.Name)) + r.Log.Info(fmt.Sprintf("[Worker] '%s'", deplDef)) + /* + depl := deployment.NewDeployment( + deplDef, + time.Duration(5)*time.Second, + ) + r.Log.Info(fmt.Sprintf("[API] Got deployment '%s'", instance.Name)) + ctrlResult, err = depl.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + return ctrlResult, nil + } + instance.Status.ReadyCount = depl.GetDeployment().Status.ReadyReplicas + + // verify if network attachment matches expectations + networkReady, networkAttachmentStatus, err := nad.VerifyNetworkStatusFromAnnotation(ctx, helper, instance.Spec.NetworkAttachments, serviceLabels, instance.Status.ReadyCount) + if err != nil { + return ctrl.Result{}, err + } + + instance.Status.NetworkAttachments = networkAttachmentStatus + if networkReady { + instance.Status.Conditions.MarkTrue(condition.NetworkAttachmentsReadyCondition, condition.NetworkAttachmentsReadyMessage) + } else { + err := fmt.Errorf("not all pods have interfaces with ips as configured in NetworkAttachments: %s", instance.Spec.NetworkAttachments) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + + return ctrl.Result{}, err + } + + if instance.Status.ReadyCount > 0 { + instance.Status.Conditions.MarkTrue(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage) + } + // create Deployment - end + r.Log.Info(fmt.Sprintf("Reconciled Service '%s' in barbicanAPI successfully", instance.Name)) + */ return ctrl.Result{}, nil } @@ -62,5 +640,10 @@ func (r *BarbicanWorkerReconciler) Reconcile(ctx context.Context, req ctrl.Reque func (r *BarbicanWorkerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&barbicanv1beta1.BarbicanWorker{}). + //Owns(&keystonev1.KeystoneEndpoint{}). + //Owns(&corev1.Service{}). + //Owns(&corev1.Secret{}). + //Owns(&appsv1.Deployment{}). + //Owns(&routev1.Route{}). Complete(r) } diff --git a/main.go b/main.go index 13c2b4a..bdecec8 100644 --- a/main.go +++ b/main.go @@ -66,8 +66,8 @@ func main() { var metricsAddr string var enableLeaderElection bool var probeAddr string - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8085", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8086", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") diff --git a/pkg/barbicanworker/deployment.go b/pkg/barbicanworker/deployment.go new file mode 100644 index 0000000..ad13058 --- /dev/null +++ b/pkg/barbicanworker/deployment.go @@ -0,0 +1,163 @@ +package barbicanworker + +import ( + "fmt" + + "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + //"k8s.io/apimachinery/pkg/util/intstr" + + barbicanv1beta1 "github.com/openstack-k8s-operators/barbican-operator/api/v1beta1" + barbican "github.com/openstack-k8s-operators/barbican-operator/pkg/barbican" +) + +const ( + // ServiceCommand - + ServiceCommand = "/usr/local/bin/kolla_start" +) + +// Deployment - returns a BarbicanWorker Deployment +func Deployment( + instance *barbicanv1beta1.BarbicanWorker, + configHash string, + labels map[string]string, + annotations map[string]string, +) *appsv1.Deployment { + runAsUser := int64(0) + //var config0644AccessMode int32 = 0644 + envVars := map[string]env.Setter{} + envVars["KOLLA_CONFIG_STRATEGY"] = env.SetValue("COPY_ALWAYS") + envVars["CONFIG_HASH"] = env.SetValue(configHash) + livenessProbe := &corev1.Probe{ + // TODO might need tuning + TimeoutSeconds: 5, + PeriodSeconds: 3, + InitialDelaySeconds: 5, + } + readinessProbe := &corev1.Probe{ + // TODO might need tuning + TimeoutSeconds: 5, + PeriodSeconds: 5, + InitialDelaySeconds: 5, + } + args := []string{"-c"} + if instance.Spec.Debug.Service { + args = append(args, common.DebugCommand) + livenessProbe.Exec = &corev1.ExecAction{ + Command: []string{ + "/bin/true", + }, + } + readinessProbe.Exec = livenessProbe.Exec + } else { + args = append(args, ServiceCommand) + // + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + // + //livenessProbe.HTTPGet = &corev1.HTTPGetAction{ + // Path: "/healthcheck", + // Port: intstr.IntOrString{Type: intstr.Int, IntVal: int32(barbican.BarbicanPublicPort)}, + //} + //readinessProbe.HTTPGet = livenessProbe.HTTPGet + } + + /* + workerVolumes := []corev1.Volume{ + { + Name: "config-data-custom", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &config0644AccessMode, + SecretName: instance.Name + "-config-data", + }, + }, + }, + } + + workerVolumes = append(apiVolumes, barbican.GetLogVolume()...) + workerVolumeMounts := []corev1.VolumeMount{ + { + Name: "config-data", + MountPath: "/var/lib/kolla/config_files/config.json", + SubPath: "barbican-worker-config.json", + ReadOnly: true, + }, + } + */ + // Append LogVolume to the apiVolumes: this will be used to stream + // logging + //apiVolumeMounts = append(apiVolumeMounts, barbican.GetLogVolumeMount()...) + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-worker", instance.Name), + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Replicas: instance.Spec.Replicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Labels: labels, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: instance.Spec.ServiceAccount, + Containers: []corev1.Container{ + { + Name: instance.Name + "-log", + Command: []string{ + "/bin/bash", + }, + Args: []string{"-c", "tail -n+1 -F " + barbican.BarbicanLogPath + instance.Name + ".log"}, + Image: instance.Spec.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUser, + }, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: barbican.GetLogVolumeMount(), + Resources: instance.Spec.Resources, + ReadinessProbe: readinessProbe, + LivenessProbe: livenessProbe, + }, + { + Name: barbican.ServiceName + "-worker", + Command: []string{ + "/bin/bash", + }, + Args: args, + Image: instance.Spec.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUser, + }, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + //VolumeMounts: append(barbican.GetVolumeMounts( + // instance.Spec.CustomServiceConfigSecrets, + // barbican.BarbicanAPIPropagation), + // apiVolumeMounts..., + //), + Resources: instance.Spec.Resources, + ReadinessProbe: readinessProbe, + LivenessProbe: livenessProbe, + }, + }, + }, + }, + }, + } + /* + deployment.Spec.Template.Spec.Volumes = append(barbican.GetVolumes( + instance.Name, + barbican.ServiceName, + instance.Spec.CustomServiceConfigSecrets, + barbican.BarbicanAPIPropagation), + apiVolumes...) + */ + return deployment +} diff --git a/templates/barbican/config/00-default.conf b/templates/barbican/config/00-default.conf index 4d64981..ef59382 100644 --- a/templates/barbican/config/00-default.conf +++ b/templates/barbican/config/00-default.conf @@ -1,10 +1,13 @@ [DEFAULT] sql_connection = {{ .DatabaseConnection }} +{{ if (index . "ServiceURL") }} host_href = {{ .ServiceURL }} +{{ end }} debug = true transport_url = {{ .TransportURL }} log_file = {{ .LogFile }} +{{ if (index . "KeystoneAuthURL") }} [keystone_authtoken] auth_version = v3 auth_url={{ .KeystoneAuthURL }} @@ -15,6 +18,7 @@ password = {{ .ServicePassword }} project_name=service project_domain_name=Default interface = internal +{{ end }} [keystone_notifications] enable = false diff --git a/templates/barbican/config/barbican-worker-config.json b/templates/barbican/config/barbican-worker-config.json new file mode 100644 index 0000000..bccec7f --- /dev/null +++ b/templates/barbican/config/barbican-worker-config.json @@ -0,0 +1,39 @@ +{ + "command": "/usr/sbin/httpd -DFOREGROUND", + "config_files": [ + { + "source": "/var/lib/config-data/default/00-default.conf", + "dest": "/etc/barbican/barbican.conf.d/00-default.conf", + "owner": "barbican", + "perm": "0600" + }, + { + "source": "/var/lib/config-data/default/02-service.conf", + "dest": "/etc/barbican/barbican.conf.d/02-service.conf", + "owner": "barbican", + "perm": "0600", + "optional": true + }, + { + "source": "/var/lib/config-data/default/03-secrets.conf", + "dest": "/etc/barbican/barbican.conf.d/03-secrets.conf", + "owner": "barbican", + "perm": "0640", + "optional": true + }, + { + "source": "/var/lib/config-data/default/kolla_extend_start", + "dest": "/usr/local/bin/kolla_extend_start", + "owner": "root", + "perm": "0755", + "optional": true + } + ], + "permissions": [ + { + "path": "/var/log/barbican", + "owner": "barbican:barbican", + "recurse": true + } + ] +} diff --git a/templates/barbicanworker b/templates/barbicanworker new file mode 120000 index 0000000..cded8ff --- /dev/null +++ b/templates/barbicanworker @@ -0,0 +1 @@ +barbican \ No newline at end of file