diff --git a/.golangci.yml b/.golangci.yml index 1a29c45a..eab84070 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -230,7 +230,7 @@ linters-settings: - ["call-chain", "loop", "method-call", "recover", "immediate-recover", "return"] - name: dot-imports arguments: - - { allowedPackages: ["github.com/onsi/ginkgo/v2","github.com/onsi/gomega"] } + - { allowedPackages: ["github.com/onsi/ginkgo/v2","github.com/onsi/gomega","sigs.k8s.io/controller-runtime/pkg/envtest/komega"] } - name: duplicated-imports - name: early-return - name: empty-block diff --git a/Makefile b/Makefile index bab327cb..b718ef18 100644 --- a/Makefile +++ b/Makefile @@ -431,6 +431,10 @@ CAPI_OPERATOR_VERSION ?= v$(shell $(YQ) -r '.dependencies.[] | select(.name == " CAPI_OPERATOR_CRD_PREFIX ?= "operator.cluster.x-k8s.io_" CAPI_OPERATOR_CRDS ?= capi-operator-crds +CLUSTER_API_VERSION ?= v1.9.3 +CLUSTER_API_CRD_PREFIX ?= "cluster.x-k8s.io_" +CLUSTER_API_CRDS ?= cluster-api-crds + ## Tool Binaries KUBECTL ?= kubectl CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION) @@ -511,8 +515,15 @@ $(CAPI_OPERATOR_CRDS): | $(YQ) $(EXTERNAL_CRD_DIR) curl -s --fail https://raw.githubusercontent.com/kubernetes-sigs/cluster-api-operator/$(CAPI_OPERATOR_VERSION)/config/crd/bases/$(CAPI_OPERATOR_CRD_PREFIX)${name}.yaml \ > $(EXTERNAL_CRD_DIR)/$(CAPI_OPERATOR_CRD_PREFIX)${name}-$(CAPI_OPERATOR_VERSION).yaml;) +$(CLUSTER_API_CRDS): | $(YQ) $(EXTERNAL_CRD_DIR) + rm -f $(EXTERNAL_CRD_DIR)/$(CLUSTER_API_CRD_PREFIX)* + @$(foreach name, \ + clusters machinedeployments, \ + curl -s --fail https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/$(CLUSTER_API_VERSION)/config/crd/bases/$(CLUSTER_API_CRD_PREFIX)${name}.yaml \ + > $(EXTERNAL_CRD_DIR)/$(CLUSTER_API_CRD_PREFIX)${name}-$(CLUSTER_API_VERSION).yaml;) + .PHONY: external-crd -external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(CAPI_OPERATOR_CRDS) +external-crd: $(FLUX_HELM_CRD) $(FLUX_SOURCE_CHART_CRD) $(FLUX_SOURCE_REPO_CRD) $(SVELTOS_CRD) $(CAPI_OPERATOR_CRDS) $(CLUSTER_API_CRDS) .PHONY: kind kind: $(KIND) ## Download kind locally if necessary. diff --git a/internal/controller/clusterdeployment_controller.go b/internal/controller/clusterdeployment_controller.go index fb8381c1..f0b3bec3 100644 --- a/internal/controller/clusterdeployment_controller.go +++ b/internal/controller/clusterdeployment_controller.go @@ -60,9 +60,16 @@ const ( DefaultRequeueInterval = 10 * time.Second ) +type helmActor interface { + DownloadChartFromArtifact(ctx context.Context, artifact *sourcev1.Artifact) (*chart.Chart, error) + InitializeConfiguration(clusterDeployment *hmc.ClusterDeployment, log action.DebugLog) (*action.Configuration, error) + EnsureReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, hcChart *chart.Chart, clusterDeployment *hmc.ClusterDeployment) error +} + // ClusterDeploymentReconciler reconciles a ClusterDeployment object type ClusterDeploymentReconciler struct { client.Client + helmActor Config *rest.Config DynamicClient *dynamic.DynamicClient SystemNamespace string @@ -128,7 +135,7 @@ func (r *ClusterDeploymentReconciler) setStatusFromChildObjects(ctx context.Cont if metaCondition.Reason == "" && metaCondition.Status == metav1.ConditionTrue { metaCondition.Message += " is Ready" - metaCondition.Reason = "Succeeded" + metaCondition.Reason = hmc.SucceededReason } apimeta.SetStatusCondition(clusterDeployment.GetConditions(), metaCondition) } @@ -231,7 +238,7 @@ func (r *ClusterDeploymentReconciler) updateCluster(ctx context.Context, mc *hmc return ctrl.Result{}, err } l.Info("Downloading Helm chart") - hcChart, err := helm.DownloadChartFromArtifact(ctx, source.GetArtifact()) + hcChart, err := r.DownloadChartFromArtifact(ctx, source.GetArtifact()) if err != nil { apimeta.SetStatusCondition(mc.GetConditions(), metav1.Condition{ Type: hmc.HelmChartReadyCondition, @@ -243,15 +250,13 @@ func (r *ClusterDeploymentReconciler) updateCluster(ctx context.Context, mc *hmc } l.Info("Initializing Helm client") - getter := helm.NewMemoryRESTClientGetter(r.Config, r.RESTMapper()) - actionConfig := new(action.Configuration) - err = actionConfig.Init(getter, mc.Namespace, "secret", l.Info) + actionConfig, err := r.InitializeConfiguration(mc, l.Info) if err != nil { return ctrl.Result{}, err } l.Info("Validating Helm chart with provided values") - if err := validateReleaseWithValues(ctx, actionConfig, mc, hcChart); err != nil { + if err = r.EnsureReleaseWithValues(ctx, actionConfig, hcChart, mc); err != nil { apimeta.SetStatusCondition(mc.GetConditions(), metav1.Condition{ Type: hmc.HelmChartReadyCondition, Status: metav1.ConditionFalse, @@ -492,22 +497,6 @@ func (r *ClusterDeploymentReconciler) updateServices(ctx context.Context, mc *hm return ctrl.Result{}, nil } -func validateReleaseWithValues(ctx context.Context, actionConfig *action.Configuration, clusterDeployment *hmc.ClusterDeployment, hcChart *chart.Chart) error { - install := action.NewInstall(actionConfig) - install.DryRun = true - install.ReleaseName = clusterDeployment.Name - install.Namespace = clusterDeployment.Namespace - install.ClientOnly = true - - vals, err := clusterDeployment.HelmValues() - if err != nil { - return err - } - - _, err = install.RunWithContext(ctx, hcChart, vals) - return err -} - // updateStatus updates the status for the ClusterDeployment object. func (r *ClusterDeploymentReconciler) updateStatus(ctx context.Context, clusterDeployment *hmc.ClusterDeployment, template *hmc.ClusterTemplate) error { clusterDeployment.Status.ObservedGeneration = clusterDeployment.Generation @@ -854,6 +843,8 @@ func (r *ClusterDeploymentReconciler) setAvailableUpgrades(ctx context.Context, // SetupWithManager sets up the controller with the Manager. func (r *ClusterDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.helmActor = helm.NewActor(r.Config, r.RESTMapper()) + return ctrl.NewControllerManagedBy(mgr). For(&hmc.ClusterDeployment{}). Watches(&hcv2.HelmRelease{}, diff --git a/internal/controller/clusterdeployment_controller_test.go b/internal/controller/clusterdeployment_controller_test.go index bf6109a3..0c62fecc 100644 --- a/internal/controller/clusterdeployment_controller_test.go +++ b/internal/controller/clusterdeployment_controller_test.go @@ -19,142 +19,214 @@ import ( "time" hcv2 "github.com/fluxcd/helm-controller/api/v2" + meta2 "github.com/fluxcd/pkg/apis/meta" + sourcev1 "github.com/fluxcd/source-controller/api/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" + clusterapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" "sigs.k8s.io/controller-runtime/pkg/reconcile" hmc "github.com/K0rdent/kcm/api/v1alpha1" ) +type fakeHelmActor struct{} + +func (*fakeHelmActor) DownloadChartFromArtifact(_ context.Context, _ *sourcev1.Artifact) (*chart.Chart, error) { + return &chart.Chart{ + Metadata: &chart.Metadata{ + APIVersion: "v2", + Version: "0.1.0", + Name: "test-cluster-chart", + }, + }, nil +} + +func (*fakeHelmActor) InitializeConfiguration(_ *hmc.ClusterDeployment, _ action.DebugLog) (*action.Configuration, error) { + return &action.Configuration{}, nil +} + +func (*fakeHelmActor) EnsureReleaseWithValues(_ context.Context, _ *action.Configuration, _ *chart.Chart, _ *hmc.ClusterDeployment) error { + return nil +} + var _ = Describe("ClusterDeployment Controller", func() { Context("When reconciling a resource", func() { const ( - clusterDeploymentName = "test-cluster-deployment" - clusterDeploymentNamespace = "test" - - templateName = "test-template" - svcTemplateName = "test-svc-template" - credentialName = "test-credential" + helmChartURL = "http://source-controller.hmc-system.svc.cluster.local/helmchart/hmc-system/test-chart/0.1.0.tar.gz" ) - ctx := context.Background() + // resources required for ClusterDeployment reconciliation + var ( + namespace = corev1.Namespace{} + secret = corev1.Secret{} + awsCredential = hmc.Credential{} + clusterTemplate = hmc.ClusterTemplate{} + serviceTemplate = hmc.ServiceTemplate{} + helmRepo = sourcev1.HelmRepository{} + clusterTemplateHelmChart = sourcev1.HelmChart{} + serviceTemplateHelmChart = sourcev1.HelmChart{} + + clusterDeployment = hmc.ClusterDeployment{} - typeNamespacedName := types.NamespacedName{ - Name: clusterDeploymentName, - Namespace: clusterDeploymentNamespace, - } - clusterDeployment := &hmc.ClusterDeployment{} - template := &hmc.ClusterTemplate{} - svcTemplate := &hmc.ServiceTemplate{} - management := &hmc.Management{} - credential := &hmc.Credential{} - namespace := &corev1.Namespace{} + cluster = clusterapiv1beta1.Cluster{} + machineDeployment = clusterapiv1beta1.MachineDeployment{} + helmRelease = hcv2.HelmRelease{} + ) BeforeEach(func() { - By("creating ClusterDeployment namespace") - err := k8sClient.Get(ctx, types.NamespacedName{Name: clusterDeploymentNamespace}, namespace) - if err != nil && errors.IsNotFound(err) { - namespace = &corev1.Namespace{ + By("ensure namespace", func() { + namespace = corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: clusterDeploymentNamespace, + GenerateName: "test-namespace-", }, } - Expect(k8sClient.Create(ctx, namespace)).To(Succeed()) - } + Expect(k8sClient.Create(ctx, &namespace)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &namespace) + }) + + By("ensure HelmRepository resource", func() { + helmRepo = sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-repository-", + Namespace: namespace.Name, + }, + Spec: sourcev1.HelmRepositorySpec{ + Insecure: true, + Interval: metav1.Duration{ + Duration: 10 * time.Minute, + }, + Provider: "generic", + Type: "oci", + URL: "oci://hmc-local-registry:5000/charts", + }, + } + Expect(k8sClient.Create(ctx, &helmRepo)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &helmRepo) + }) - By("creating the custom resource for the Kind ClusterTemplate") - err = k8sClient.Get(ctx, typeNamespacedName, template) - if err != nil && errors.IsNotFound(err) { - template = &hmc.ClusterTemplate{ + By("ensure HelmChart resources", func() { + clusterTemplateHelmChart = sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: templateName, - Namespace: clusterDeploymentNamespace, + GenerateName: "test-cluster-template-chart-", + Namespace: namespace.Name, + }, + Spec: sourcev1.HelmChartSpec{ + Chart: "test-cluster", + Interval: metav1.Duration{ + Duration: 10 * time.Minute, + }, + ReconcileStrategy: sourcev1.ReconcileStrategyChartVersion, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: "HelmRepository", + Name: helmRepo.Name, + }, + Version: "0.1.0", + }, + } + Expect(k8sClient.Create(ctx, &clusterTemplateHelmChart)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &clusterTemplateHelmChart) + + serviceTemplateHelmChart = sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-service-template-chart-", + Namespace: namespace.Name, + }, + Spec: sourcev1.HelmChartSpec{ + Chart: "test-service", + Interval: metav1.Duration{ + Duration: 10 * time.Minute, + }, + ReconcileStrategy: sourcev1.ReconcileStrategyChartVersion, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: "HelmRepository", + Name: helmRepo.Name, + }, + Version: "0.1.0", + }, + } + Expect(k8sClient.Create(ctx, &serviceTemplateHelmChart)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &serviceTemplateHelmChart) + }) + + By("ensure ClusterTemplate resource", func() { + clusterTemplate = hmc.ClusterTemplate{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-cluster-template-", + Namespace: namespace.Name, }, Spec: hmc.ClusterTemplateSpec{ Helm: hmc.HelmSpec{ ChartRef: &hcv2.CrossNamespaceSourceReference{ Kind: "HelmChart", - Name: "ref-test", - Namespace: "default", + Name: clusterTemplateHelmChart.Name, + Namespace: namespace.Name, }, }, }, } - Expect(k8sClient.Create(ctx, template)).To(Succeed()) - template.Status = hmc.ClusterTemplateStatus{ + Expect(k8sClient.Create(ctx, &clusterTemplate)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &clusterTemplate) + + clusterTemplate.Status = hmc.ClusterTemplateStatus{ TemplateStatusCommon: hmc.TemplateStatusCommon{ TemplateValidationStatus: hmc.TemplateValidationStatus{ Valid: true, }, - Config: &apiextensionsv1.JSON{ - Raw: []byte(`{"foo":"bar"}`), - }, }, Providers: hmc.Providers{"infrastructure-aws"}, } - Expect(k8sClient.Status().Update(ctx, template)).To(Succeed()) - } + Expect(k8sClient.Status().Update(ctx, &clusterTemplate)).To(Succeed()) + }) - By("creating the custom resource for the Kind ServiceTemplate") - err = k8sClient.Get(ctx, client.ObjectKey{Namespace: clusterDeploymentNamespace, Name: svcTemplateName}, svcTemplate) - if err != nil && errors.IsNotFound(err) { - svcTemplate = &hmc.ServiceTemplate{ + By("ensure ServiceTemplate resource", func() { + serviceTemplate = hmc.ServiceTemplate{ ObjectMeta: metav1.ObjectMeta{ - Name: svcTemplateName, - Namespace: clusterDeploymentNamespace, + GenerateName: "test-service-template-", + Namespace: namespace.Name, }, Spec: hmc.ServiceTemplateSpec{ Helm: hmc.HelmSpec{ ChartRef: &hcv2.CrossNamespaceSourceReference{ Kind: "HelmChart", - Name: "ref-test", - Namespace: "default", + Name: serviceTemplateHelmChart.Name, + Namespace: namespace.Name, }, }, }, } - Expect(k8sClient.Create(ctx, svcTemplate)).To(Succeed()) - svcTemplate.Status = hmc.ServiceTemplateStatus{ + Expect(k8sClient.Create(ctx, &serviceTemplate)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &serviceTemplate) + + serviceTemplate.Status = hmc.ServiceTemplateStatus{ TemplateStatusCommon: hmc.TemplateStatusCommon{ + ChartRef: &hcv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Name: serviceTemplateHelmChart.Name, + Namespace: namespace.Name, + }, TemplateValidationStatus: hmc.TemplateValidationStatus{ Valid: true, }, }, } - Expect(k8sClient.Status().Update(ctx, svcTemplate)).To(Succeed()) - } + Expect(k8sClient.Status().Update(ctx, &serviceTemplate)).To(Succeed()) + }) - By("creating the custom resource for the Kind Management") - err = k8sClient.Get(ctx, typeNamespacedName, management) - if err != nil && errors.IsNotFound(err) { - management = &hmc.Management{ - ObjectMeta: metav1.ObjectMeta{ - Name: hmc.ManagementName, - }, - Spec: hmc.ManagementSpec{ - Release: "test-release", - }, - } - Expect(k8sClient.Create(ctx, management)).To(Succeed()) - management.Status = hmc.ManagementStatus{ - AvailableProviders: hmc.Providers{"infrastructure-aws"}, - } - Expect(k8sClient.Status().Update(ctx, management)).To(Succeed()) - } - By("creating the custom resource for the Kind Credential") - err = k8sClient.Get(ctx, typeNamespacedName, credential) - if err != nil && errors.IsNotFound(err) { - credential = &hmc.Credential{ + By("ensure AWS Credential resource", func() { + awsCredential = hmc.Credential{ ObjectMeta: metav1.ObjectMeta{ - Name: credentialName, - Namespace: clusterDeploymentNamespace, + GenerateName: "test-credential-aws-", + Namespace: namespace.Name, }, Spec: hmc.CredentialSpec{ IdentityRef: &corev1.ObjectReference{ @@ -164,65 +236,358 @@ var _ = Describe("ClusterDeployment Controller", func() { }, }, } - Expect(k8sClient.Create(ctx, credential)).To(Succeed()) - credential.Status = hmc.CredentialStatus{ + Expect(k8sClient.Create(ctx, &awsCredential)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &awsCredential) + + awsCredential.Status = hmc.CredentialStatus{ Ready: true, } - Expect(k8sClient.Status().Update(ctx, credential)).To(Succeed()) + Expect(k8sClient.Status().Update(ctx, &awsCredential)).To(Succeed()) + }) + }) + + AfterEach(func() { + By("cleanup finalizer", func() { + Expect(controllerutil.RemoveFinalizer(&clusterDeployment, hmc.ClusterDeploymentFinalizer)).To(BeTrue()) + Expect(k8sClient.Update(ctx, &clusterDeployment)).To(Succeed()) + }) + }) + + It("should reconcile ClusterDeployment in dry-run mode", func() { + controllerReconciler := &ClusterDeploymentReconciler{ + Client: mgrClient, + helmActor: &fakeHelmActor{}, + Config: &rest.Config{}, } - By("creating the custom resource for the Kind ClusterDeployment") - err = k8sClient.Get(ctx, typeNamespacedName, clusterDeployment) - if err != nil && errors.IsNotFound(err) { - clusterDeployment = &hmc.ClusterDeployment{ + By("creating ClusterDeployment resource", func() { + clusterDeployment = hmc.ClusterDeployment{ ObjectMeta: metav1.ObjectMeta{ - Name: clusterDeploymentName, - Namespace: clusterDeploymentNamespace, + GenerateName: "test-cluster-deployment-", + Namespace: namespace.Name, }, Spec: hmc.ClusterDeploymentSpec{ - Template: templateName, - Credential: credentialName, + Template: clusterTemplate.Name, + Credential: awsCredential.Name, + DryRun: true, + }, + } + Expect(k8sClient.Create(ctx, &clusterDeployment)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &clusterDeployment) + }) + + By("ensuring finalizer is added", func() { + Eventually(func(g Gomega) { + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&clusterDeployment), + }) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(Object(&clusterDeployment)()).Should(SatisfyAll( + HaveField("Finalizers", ContainElement(hmc.ClusterDeploymentFinalizer)), + )) + }).Should(Succeed()) + }) + + By("reconciling resource with finalizer", func() { + Eventually(func(g Gomega) { + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&clusterDeployment), + }) + g.Expect(err).To(HaveOccurred()) + g.Expect(Object(&clusterDeployment)()).Should(SatisfyAll( + HaveField("Status.Conditions", ContainElement(SatisfyAll( + HaveField("Type", hmc.TemplateReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + HaveField("Message", "Template is valid"), + ))), + )) + }).Should(Succeed()) + }) + + By("reconciling when dependencies are not in valid state", func() { + Eventually(func(g Gomega) { + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&clusterDeployment), + }) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("helm chart source is not provided")) + }).Should(Succeed()) + }) + + By("patching ClusterTemplate and corresponding HelmChart statuses", func() { + Expect(Get(&clusterTemplate)()).To(Succeed()) + clusterTemplate.Status.ChartRef = &hcv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Name: clusterTemplateHelmChart.Name, + Namespace: namespace.Name, + } + Expect(k8sClient.Status().Update(ctx, &clusterTemplate)).To(Succeed()) + + Expect(Get(&clusterTemplateHelmChart)()).To(Succeed()) + clusterTemplateHelmChart.Status.URL = helmChartURL + clusterTemplateHelmChart.Status.Artifact = &sourcev1.Artifact{ + URL: helmChartURL, + LastUpdateTime: metav1.Now(), + } + Expect(k8sClient.Status().Update(ctx, &clusterTemplateHelmChart)).To(Succeed()) + + Eventually(func(g Gomega) { + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&clusterDeployment), + }) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(Object(&clusterDeployment)()).Should(SatisfyAll( + HaveField("Status.Conditions", ContainElements( + SatisfyAll( + HaveField("Type", hmc.HelmChartReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + HaveField("Message", "Helm chart is valid"), + ), + SatisfyAll( + HaveField("Type", hmc.CredentialReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + HaveField("Message", "Credential is Ready"), + ), + )))) + }).Should(Succeed()) + }) + }) + + It("should reconcile ClusterDeployment with AWS credentials", func() { + controllerReconciler := &ClusterDeploymentReconciler{ + Client: mgrClient, + helmActor: &fakeHelmActor{}, + Config: &rest.Config{}, + DynamicClient: dynamicClient, + } + + By("creating ClusterDeployment resource", func() { + clusterDeployment = hmc.ClusterDeployment{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-cluster-deployment-", + Namespace: namespace.Name, + }, + Spec: hmc.ClusterDeploymentSpec{ + Template: clusterTemplate.Name, + Credential: awsCredential.Name, Services: []hmc.ServiceSpec{ { - Template: svcTemplateName, - Name: "test-svc-name", + Template: serviceTemplate.Name, + Name: "test-service", }, }, + Config: &apiextensionsv1.JSON{ + Raw: []byte(`{"foo":"bar"}`), + }, }, } - Expect(k8sClient.Create(ctx, clusterDeployment)).To(Succeed()) - } - }) + Expect(k8sClient.Create(ctx, &clusterDeployment)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &clusterDeployment) + }) - AfterEach(func() { - By("Cleanup") + By("ensuring related resources exist", func() { + Expect(Get(&clusterTemplate)()).To(Succeed()) + clusterTemplate.Status.ChartRef = &hcv2.CrossNamespaceSourceReference{ + Kind: "HelmChart", + Name: clusterTemplateHelmChart.Name, + Namespace: namespace.Name, + } + Expect(k8sClient.Status().Update(ctx, &clusterTemplate)).To(Succeed()) - controllerReconciler := &ClusterDeploymentReconciler{ - Client: k8sClient, - } + Expect(Get(&clusterTemplateHelmChart)()).To(Succeed()) + clusterTemplateHelmChart.Status.URL = helmChartURL + clusterTemplateHelmChart.Status.Artifact = &sourcev1.Artifact{ + URL: helmChartURL, + LastUpdateTime: metav1.Now(), + } + Expect(k8sClient.Status().Update(ctx, &clusterTemplateHelmChart)).To(Succeed()) + + cluster = clusterapiv1beta1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterDeployment.Name, + Namespace: namespace.Name, + Labels: map[string]string{hmc.FluxHelmChartNameKey: clusterDeployment.Name}, + }, + } + Expect(k8sClient.Create(ctx, &cluster)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &cluster) - Expect(k8sClient.Delete(ctx, clusterDeployment)).To(Succeed()) - // Running reconcile to remove the finalizer and delete the ClusterDeployment - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: typeNamespacedName}) - Expect(err).NotTo(HaveOccurred()) + machineDeployment = clusterapiv1beta1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterDeployment.Name + "-md", + Namespace: namespace.Name, + Labels: map[string]string{hmc.FluxHelmChartNameKey: clusterDeployment.Name}, + }, + Spec: clusterapiv1beta1.MachineDeploymentSpec{ + ClusterName: cluster.Name, + Template: clusterapiv1beta1.MachineTemplateSpec{ + Spec: clusterapiv1beta1.MachineSpec{ + ClusterName: cluster.Name, + }, + }, + }, + } + Expect(k8sClient.Create(ctx, &machineDeployment)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &machineDeployment) - Eventually(k8sClient.Get, 1*time.Minute, 5*time.Second).WithArguments(ctx, typeNamespacedName, clusterDeployment).Should(HaveOccurred()) + secret = corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterDeployment.Name + "-kubeconfig", + Namespace: namespace.Name, + }, + } + Expect(k8sClient.Create(ctx, &secret)).To(Succeed()) + DeferCleanup(k8sClient.Delete, &secret) + }) - Expect(k8sClient.Delete(ctx, template)).To(Succeed()) - Expect(k8sClient.Delete(ctx, management)).To(Succeed()) - Expect(k8sClient.Delete(ctx, namespace)).To(Succeed()) - }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &ClusterDeploymentReconciler{ - Client: k8sClient, - Config: &rest.Config{}, - } + By("ensuring conditions updates after reconciliation", func() { + Eventually(func(g Gomega) { + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&clusterDeployment), + }) + g.Expect(err).To(HaveOccurred()) + g.Expect(Object(&clusterDeployment)()).Should(SatisfyAll( + HaveField("Finalizers", ContainElement(hmc.ClusterDeploymentFinalizer)), + HaveField("Status.Conditions", ContainElements( + SatisfyAll( + HaveField("Type", hmc.TemplateReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + HaveField("Message", "Template is valid"), + ), + SatisfyAll( + HaveField("Type", hmc.HelmChartReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + HaveField("Message", "Helm chart is valid"), + ), + SatisfyAll( + HaveField("Type", hmc.CredentialReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + HaveField("Message", "Credential is Ready"), + ), + )))) + }).Should(Succeed()) + }) - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, + By("ensuring related resources in proper state", func() { + helmRelease = hcv2.HelmRelease{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterDeployment.Name, + Namespace: namespace.Name, + }, + } + Expect(Get(&helmRelease)()).To(Succeed()) + meta.SetStatusCondition(&helmRelease.Status.Conditions, metav1.Condition{ + Type: meta2.ReadyCondition, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: hcv2.InstallSucceededReason, + }) + Expect(k8sClient.Status().Update(ctx, &helmRelease)).To(Succeed()) + + Expect(Get(&cluster)()).To(Succeed()) + cluster.SetConditions([]clusterapiv1beta1.Condition{ + { + Type: clusterapiv1beta1.ControlPlaneInitializedCondition, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + { + Type: clusterapiv1beta1.ControlPlaneReadyCondition, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + { + Type: clusterapiv1beta1.InfrastructureReadyCondition, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + }) + Expect(k8sClient.Status().Update(ctx, &cluster)).To(Succeed()) + + Expect(Get(&machineDeployment)()).To(Succeed()) + machineDeployment.SetConditions([]clusterapiv1beta1.Condition{ + { + Type: clusterapiv1beta1.MachineDeploymentAvailableCondition, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + }) + Expect(k8sClient.Status().Update(ctx, &machineDeployment)).To(Succeed()) }) - Expect(err).NotTo(HaveOccurred()) + + By("ensuring ClusterDeployment is reconciled", func() { + Eventually(func(g Gomega) { + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: client.ObjectKeyFromObject(&clusterDeployment), + }) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(Object(&clusterDeployment)()).Should(SatisfyAll( + HaveField("Status.Conditions", ContainElements( + SatisfyAll( + HaveField("Type", hmc.HelmReleaseReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hcv2.InstallSucceededReason), + ), + SatisfyAll( + HaveField("Type", hmc.SveltosProfileReadyCondition), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + ), + SatisfyAll( + HaveField("Type", string(clusterapiv1beta1.ControlPlaneInitializedCondition)), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + ), + SatisfyAll( + HaveField("Type", string(clusterapiv1beta1.ControlPlaneReadyCondition)), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + ), + SatisfyAll( + HaveField("Type", string(clusterapiv1beta1.InfrastructureReadyCondition)), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + ), + SatisfyAll( + HaveField("Type", string(clusterapiv1beta1.MachineDeploymentAvailableCondition)), + HaveField("Status", metav1.ConditionTrue), + HaveField("Reason", hmc.SucceededReason), + ), + // TODO (#852 brongineer): add corresponding resources with expected state for successful reconciliation + // SatisfyAll( + // HaveField("Type", hmc.FetchServicesStatusSuccessCondition), + // HaveField("Status", metav1.ConditionTrue), + // HaveField("Reason", hmc.SucceededReason), + // ), + // SatisfyAll( + // HaveField("Type", hmc.ReadyCondition), + // HaveField("Status", metav1.ConditionTrue), + // HaveField("Reason", hmc.SucceededReason), + // ), + )))) + }).Should(Succeed()) + }) + }) + + // TODO (#852 brongineer): Add test for ClusterDeployment reconciliation with Azure credentials + PIt("should reconcile ClusterDeployment with Azure credentials", func() { + // TBD + }) + + // TODO (#852 brongineer): Add tests for ClusterDeployment reconciliation with other providers' credentials + PIt("should reconcile ClusterDeployment with XXX credentials", func() { + // TBD + }) + + // TODO (#852 brongineer): Add test for ClusterDeployment deletion + PIt("should reconcile ClusterDeployment deletion", func() { + // TBD }) }) }) diff --git a/internal/controller/management_controller_test.go b/internal/controller/management_controller_test.go index 90b85786..39bd600b 100644 --- a/internal/controller/management_controller_test.go +++ b/internal/controller/management_controller_test.go @@ -15,7 +15,6 @@ package controller import ( - "context" "fmt" "time" @@ -42,8 +41,6 @@ var _ = Describe("Management Controller", func() { Context("When reconciling a resource", func() { const resourceName = "test-resource" - ctx := context.Background() - typeNamespacedName := types.NamespacedName{ Name: resourceName, Namespace: "default", diff --git a/internal/controller/multiclusterservice_controller_test.go b/internal/controller/multiclusterservice_controller_test.go index 8e4270b8..d8094774 100644 --- a/internal/controller/multiclusterservice_controller_test.go +++ b/internal/controller/multiclusterservice_controller_test.go @@ -56,8 +56,6 @@ var _ = Describe("MultiClusterService Controller", func() { }, nil } - ctx := context.Background() - namespace := &corev1.Namespace{} helmChart := &sourcev1.HelmChart{} helmRepo := &sourcev1.HelmRepository{} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 33a2bcf0..5632b2be 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -32,14 +32,18 @@ import ( . "github.com/onsi/gomega" sveltosv1beta1 "github.com/projectsveltos/addon-controller/api/v1beta1" admissionv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" capioperator "sigs.k8s.io/cluster-api-operator/api/v1alpha2" + clusterapiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -56,6 +60,9 @@ const ( mutatingWebhookKind = "MutatingWebhookConfiguration" validatingWebhookKind = "ValidatingWebhookConfiguration" testSystemNamespace = "test-system-namespace" + + pollingInterval = 30 * time.Millisecond + eventuallyTimeout = 3 * time.Second ) var ( @@ -69,6 +76,8 @@ var ( ) func TestControllers(t *testing.T) { + SetDefaultEventuallyPollingInterval(pollingInterval) + SetDefaultEventuallyTimeout(eventuallyTimeout) RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") @@ -120,12 +129,15 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = capioperator.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = clusterapiv1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) + SetClient(k8sClient) dynamicClient, err = dynamic.NewForConfig(cfg) Expect(err).NotTo(HaveOccurred()) @@ -198,6 +210,8 @@ var _ = BeforeSuite(func() { } return conn.Close() }).Should(Succeed()) + + Expect(seedClusterScopedResources(ctx, k8sClient)).To(Succeed()) }) var _ = AfterSuite(func() { @@ -245,3 +259,37 @@ func loadWebhooks(path string) ([]*admissionv1.ValidatingWebhookConfiguration, [ } return validatingWebhooks, mutatingWebhooks, err } + +func seedClusterScopedResources(ctx context.Context, k8sClient client.Client) error { + var ( + someProviderName = "test-provider-name" + otherProviderName = "test-provider-name-other" + someExposedContract = "v1beta1_v1beta2" + otherExposedContract = "v1beta1" + capiVersion = "v1beta1" + ) + management := &hmcmirantiscomv1alpha1.Management{} + + By("creating the custom resource for the Kind Management") + managementKey := client.ObjectKey{ + Name: hmcmirantiscomv1alpha1.ManagementName, + } + err := mgrClient.Get(ctx, managementKey, management) + if errors.IsNotFound(err) { + management = &hmcmirantiscomv1alpha1.Management{ + ObjectMeta: metav1.ObjectMeta{ + Name: hmcmirantiscomv1alpha1.ManagementName, + }, + Spec: hmcmirantiscomv1alpha1.ManagementSpec{ + Release: "test-release", + }, + } + Expect(k8sClient.Create(ctx, management)).To(Succeed()) + management.Status = hmcmirantiscomv1alpha1.ManagementStatus{ + AvailableProviders: []string{someProviderName, otherProviderName}, + CAPIContracts: map[string]hmcmirantiscomv1alpha1.CompatibilityContracts{someProviderName: {capiVersion: someExposedContract}, otherProviderName: {capiVersion: otherExposedContract}}, + } + Expect(k8sClient.Status().Update(ctx, management)).To(Succeed()) + } + return client.IgnoreNotFound(err) +} diff --git a/internal/controller/template_controller_test.go b/internal/controller/template_controller_test.go index c01e5879..6402aef4 100644 --- a/internal/controller/template_controller_test.go +++ b/internal/controller/template_controller_test.go @@ -257,22 +257,9 @@ var _ = Describe("Template Controller", func() { return nil }).WithTimeout(timeout).WithPolling(interval).Should(Succeed()) - By("Creating a management cluster object with proper required versions in status") - // must set status here since it's controller by another ctrl - mgmt := &hmcmirantiscomv1alpha1.Management{ - ObjectMeta: metav1.ObjectMeta{ - Name: mgmtName, - }, - Spec: hmcmirantiscomv1alpha1.ManagementSpec{ - Release: "test-release", - }, - } - Expect(k8sClient.Create(ctx, mgmt)).To(Succeed()) - mgmt.Status = hmcmirantiscomv1alpha1.ManagementStatus{ - AvailableProviders: []string{someProviderName, otherProviderName}, - CAPIContracts: map[string]hmcmirantiscomv1alpha1.CompatibilityContracts{someProviderName: {capiVersion: someExposedContract}, otherProviderName: {capiVersion: otherExposedContract}}, - } - Expect(k8sClient.Status().Update(ctx, mgmt)).To(Succeed()) + mgmt := &hmcmirantiscomv1alpha1.Management{} + key := client.ObjectKey{Name: mgmtName} + Expect(k8sClient.Get(ctx, key, mgmt)).To(Succeed()) By("Checking the management cluster appears") Eventually(func() error { @@ -324,13 +311,11 @@ var _ = Describe("Template Controller", func() { Expect(clusterTemplate.Status.ProviderContracts).To(BeEquivalentTo(map[string]string{otherProviderName: otherRequiredContract, someProviderName: someRequiredContract})) By("Removing the created objects") - Expect(k8sClient.Delete(ctx, mgmt)).To(Succeed()) Expect(k8sClient.Delete(ctx, clusterTemplate)).To(Succeed()) By("Checking the created objects have been removed") Eventually(func() bool { - return apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(mgmt), &hmcmirantiscomv1alpha1.Management{})) && - apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterTemplate), &hmcmirantiscomv1alpha1.ClusterTemplate{})) + return apierrors.IsNotFound(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterTemplate), &hmcmirantiscomv1alpha1.ClusterTemplate{})) }).WithTimeout(timeout).WithPolling(interval).Should(BeTrue()) }) }) diff --git a/internal/helm/actor.go b/internal/helm/actor.go new file mode 100644 index 00000000..f33ba08a --- /dev/null +++ b/internal/helm/actor.go @@ -0,0 +1,81 @@ +// Copyright 2024 +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package helm + +import ( + "context" + "errors" + + sourcev1 "github.com/fluxcd/source-controller/api/v1" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/rest" + + "github.com/K0rdent/kcm/api/v1alpha1" +) + +type Actor struct { + Config *rest.Config + RESTMapper apimeta.RESTMapper +} + +func NewActor(config *rest.Config, mapper apimeta.RESTMapper) *Actor { + return &Actor{ + Config: config, + RESTMapper: mapper, + } +} + +func (*Actor) DownloadChartFromArtifact(ctx context.Context, artifact *sourcev1.Artifact) (*chart.Chart, error) { + if artifact == nil { + return nil, errors.New("helm chart artifact is not ready yet") + } + return DownloadChart(ctx, artifact.URL, artifact.Digest) +} + +func (a *Actor) InitializeConfiguration( + clusterDeployment *v1alpha1.ClusterDeployment, + log action.DebugLog, +) (*action.Configuration, error) { + getter := NewMemoryRESTClientGetter(a.Config, a.RESTMapper) + actionConfig := new(action.Configuration) + err := actionConfig.Init(getter, clusterDeployment.Namespace, "secret", log) + if err != nil { + return nil, err + } + return actionConfig, nil +} + +func (*Actor) EnsureReleaseWithValues( + ctx context.Context, + actionConfig *action.Configuration, + hcChart *chart.Chart, + clusterDeployment *v1alpha1.ClusterDeployment, +) error { + install := action.NewInstall(actionConfig) + install.DryRun = true + install.ReleaseName = clusterDeployment.Name + install.Namespace = clusterDeployment.Namespace + install.ClientOnly = true + + vals, err := clusterDeployment.HelmValues() + if err != nil { + return err + } + + _, err = install.RunWithContext(ctx, hcChart, vals) + return err +}