From 526020489d205ea7bb84afb35a4ee5383ea5a376 Mon Sep 17 00:00:00 2001 From: Adam Kaplan Date: Wed, 1 Nov 2023 09:48:57 -0400 Subject: [PATCH] feat: Install Cluster Build Strategies with Operator Install the build strategy samples from shipwright-io/build, at the v0.12.0 tag. Only cluster build strategies were added - namespace scoped build strategies were dropped. Red Hat-specific strategies were also dropped, in part to avoid potential trademark issues. The build strategies use v1alpha1 APIs because this is the stored version in v0.12.0. Testing revealed that if we reconciled v1beta1 APIs, we would need to wait for (and mock out!) the conversion webhook deployment. Lastly, the operator was granted RBAC permission to administer all ClusterBuildStrategies. Reconciling build strategies required adding logic to wait for the required CRDs to be installed on the cluster first. If a requeue is required, the operator will report the Ready status condition as "Unknown." The manifestival library code also had to enhanced to optionally recurse a directory for manifests to deploy. Finally, development of this feature revealed refactoring opportunities with respect to our use of k8s and controller-runtime client libraries, and the way we are organizing/managing Manifestival-driven reconcilers. Some refactoring of test code was included to simplify future testing efforts. --- ...wright-operator.clusterserviceversion.yaml | 12 ++ config/rbac/role.yaml | 12 ++ controllers/buildstrategies_test.go | 48 +++++ controllers/default_test.go | 119 +--------- controllers/shipwrightbuild_controller.go | 50 ++++- .../shipwrightbuild_controller_test.go | 22 +- controllers/shipwrightbuild_rbac.go | 9 +- controllers/suite_test.go | 121 ++++++++++- ...gy_buildah_shipwright_managed_push_cr.yaml | 204 ++++++++++++++++++ ...tegy_buildah_strategy_managed_push_cr.yaml | 204 ++++++++++++++++++ .../buildkit/buildstrategy_buildkit_cr.yaml | 170 +++++++++++++++ ...buildstrategy_buildpacks-v3-heroku_cr.yaml | 100 +++++++++ .../buildstrategy_buildpacks-v3_cr.yaml | 100 +++++++++ .../kaniko/buildstrategy_kaniko-trivy_cr.yaml | 82 +++++++ .../kaniko/buildstrategy_kaniko_cr.yaml | 56 +++++ .../buildstrategy/ko/buildstrategy_ko_cr.yaml | 116 ++++++++++ .../buildstrategy_source-to-image_cr.yaml | 69 ++++++ pkg/buildstrategy/buildstrategy.go | 32 +++ pkg/buildstrategy/buildstrategy_test.go | 92 ++++++++ pkg/certmanager/cert_manager.go | 2 +- pkg/common/util.go | 30 +-- test/common.go | 55 ++++- 22 files changed, 1557 insertions(+), 148 deletions(-) create mode 100644 controllers/buildstrategies_test.go create mode 100644 kodata/samples/buildstrategy/buildah/buildstrategy_buildah_shipwright_managed_push_cr.yaml create mode 100644 kodata/samples/buildstrategy/buildah/buildstrategy_buildah_strategy_managed_push_cr.yaml create mode 100644 kodata/samples/buildstrategy/buildkit/buildstrategy_buildkit_cr.yaml create mode 100644 kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_cr.yaml create mode 100644 kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_cr.yaml create mode 100644 kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko-trivy_cr.yaml create mode 100644 kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko_cr.yaml create mode 100644 kodata/samples/buildstrategy/ko/buildstrategy_ko_cr.yaml create mode 100644 kodata/samples/buildstrategy/source-to-image/buildstrategy_source-to-image_cr.yaml create mode 100644 pkg/buildstrategy/buildstrategy.go create mode 100644 pkg/buildstrategy/buildstrategy_test.go diff --git a/bundle/manifests/shipwright-operator.clusterserviceversion.yaml b/bundle/manifests/shipwright-operator.clusterserviceversion.yaml index 749d1a9f..e960def3 100644 --- a/bundle/manifests/shipwright-operator.clusterserviceversion.yaml +++ b/bundle/manifests/shipwright-operator.clusterserviceversion.yaml @@ -739,6 +739,18 @@ spec: - delete - patch - update + - apiGroups: + - shipwright.io + resources: + - clusterbuildstrategies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - authentication.k8s.io resources: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 407ed6ea..6616ac94 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -361,3 +361,15 @@ rules: - delete - patch - update +- apiGroups: + - shipwright.io + resources: + - clusterbuildstrategies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/controllers/buildstrategies_test.go b/controllers/buildstrategies_test.go new file mode 100644 index 00000000..a27a63a4 --- /dev/null +++ b/controllers/buildstrategies_test.go @@ -0,0 +1,48 @@ +package controllers + +import ( + "fmt" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/operator/api/v1alpha1" + "github.com/shipwright-io/operator/test" +) + +var _ = Describe("Install embedded build strategies", func() { + + var build *v1alpha1.ShipwrightBuild + + BeforeEach(func(ctx SpecContext) { + setupTektonCRDs(ctx) + build = createShipwrightBuild(ctx, "shipwright") + test.CRDEventuallyExists(ctx, k8sClient, "clusterbuildstrategies.shipwright.io") + }) + + When("the install build strategies feature is enabled", func() { + + It("applies the embedded build strategy manifests to the cluster", func(ctx SpecContext) { + expectedBuildStrategies, err := test.ParseBuildStrategyNames() + Expect(err).NotTo(HaveOccurred()) + for _, strategy := range expectedBuildStrategies { + strategyObj := &buildv1alpha1.ClusterBuildStrategy{ + ObjectMeta: metav1.ObjectMeta{ + Name: strategy, + }, + } + By(fmt.Sprintf("checking for build strategy %q", strategy)) + test.EventuallyExists(ctx, k8sClient, strategyObj) + } + + }) + }) + + AfterEach(func(ctx SpecContext) { + deleteShipwrightBuild(ctx, build) + }) + +}) diff --git a/controllers/default_test.go b/controllers/default_test.go index bc09594b..c2ee3415 100644 --- a/controllers/default_test.go +++ b/controllers/default_test.go @@ -1,39 +1,21 @@ package controllers import ( - "context" - g "github.com/onsi/ginkgo/v2" o "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/shipwright-io/operator/api/v1alpha1" - "github.com/shipwright-io/operator/pkg/common" "github.com/shipwright-io/operator/test" ) -// createNamespace creates the namespace informed. -func createNamespace(ctx context.Context, name string) { - ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}} - err := k8sClient.Get(ctx, types.NamespacedName{Name: ns.Name}, ns) - if errors.IsNotFound(err) { - err = k8sClient.Create(ctx, ns, &client.CreateOptions{}) - } - o.Expect(err).NotTo(o.HaveOccurred()) -} - var _ = g.Describe("Reconcile default ShipwrightBuild installation", func() { - // namespace where ShipwrightBuild instance will be located - const namespace = "namespace" // targetNamespace namespace where shipwright Controller and dependencies will be located const targetNamespace = "target-namespace" // build Build instance employed during testing @@ -62,109 +44,14 @@ var _ = g.Describe("Reconcile default ShipwrightBuild installation", func() { }, } - truePtr := true g.BeforeEach(func(ctx g.SpecContext) { // setting up the namespaces, where Shipwright Controller will be deployed - createNamespace(ctx, namespace) - - g.By("does tekton taskrun crd exist") - err := k8sClient.Get(ctx, types.NamespacedName{Name: "taskruns.tekton.dev"}, &crdv1.CustomResourceDefinition{}) - if errors.IsNotFound(err) { - g.By("creating tekton taskrun crd") - taskRunCRD := &crdv1.CustomResourceDefinition{} - taskRunCRD.Name = "taskruns.tekton.dev" - taskRunCRD.Spec.Group = "tekton.dev" - taskRunCRD.Spec.Scope = crdv1.NamespaceScoped - taskRunCRD.Spec.Versions = []crdv1.CustomResourceDefinitionVersion{ - { - Name: "v1beta1", - Storage: true, - Schema: &crdv1.CustomResourceValidation{ - OpenAPIV3Schema: &crdv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: &truePtr, - }, - }, - }, - } - taskRunCRD.Spec.Names.Plural = "taskruns" - taskRunCRD.Spec.Names.Singular = "taskrun" - taskRunCRD.Spec.Names.Kind = "TaskRun" - taskRunCRD.Spec.Names.ListKind = "TaskRunList" - taskRunCRD.Status.StoredVersions = []string{"v1beta1"} - err = k8sClient.Create(ctx, taskRunCRD, &client.CreateOptions{}) - o.Expect(err).NotTo(o.HaveOccurred()) - - } - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("does tektonconfig crd exist") - err = k8sClient.Get(ctx, types.NamespacedName{Name: "tektonconfigs.operator.tekton.dev"}, &crdv1.CustomResourceDefinition{}) - if errors.IsNotFound(err) { - tektonOpCRD := &crdv1.CustomResourceDefinition{} - tektonOpCRD.Name = "tektonconfigs.operator.tekton.dev" - tektonOpCRD.Labels = map[string]string{"operator.tekton.dev/release": common.TektonOpMinSupportedVersion} - tektonOpCRD.Spec.Group = "operator.tekton.dev" - tektonOpCRD.Spec.Scope = crdv1.ClusterScoped - tektonOpCRD.Spec.Versions = []crdv1.CustomResourceDefinitionVersion{ - { - Name: "v1alpha1", - Storage: true, - Schema: &crdv1.CustomResourceValidation{ - OpenAPIV3Schema: &crdv1.JSONSchemaProps{ - Type: "object", - XPreserveUnknownFields: &truePtr, - }, - }, - }, - } - tektonOpCRD.Spec.Names.Plural = "tektonconfigs" - tektonOpCRD.Spec.Names.Singular = "tektonconfig" - tektonOpCRD.Spec.Names.Kind = "TektonConfig" - tektonOpCRD.Spec.Names.ListKind = "TektonConfigList" - tektonOpCRD.Status.StoredVersions = []string{"v1alpha1"} - err = k8sClient.Create(ctx, tektonOpCRD, &client.CreateOptions{}) - o.Expect(err).NotTo(o.HaveOccurred()) - } - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("creating a ShipwrightBuild instance") - build = &v1alpha1.ShipwrightBuild{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "cluster", - }, - Spec: v1alpha1.ShipwrightBuildSpec{ - TargetNamespace: targetNamespace, - }, - } - err = k8sClient.Create(ctx, build, &client.CreateOptions{}) - o.Expect(err).NotTo(o.HaveOccurred()) - - // when the finalizer is in place, the deployment of manifest elements is done, and therefore - // functional testing can proceed - g.By("waiting for the finalizer to be set") - test.EventuallyContainFinalizer(ctx, k8sClient, build, FinalizerAnnotation) + setupTektonCRDs(ctx) + build = createShipwrightBuild(ctx, targetNamespace) }) g.AfterEach(func(ctx g.SpecContext) { - g.By("deleting the ShipwrightBuild instance") - namespacedName := types.NamespacedName{Namespace: namespace, Name: build.Name} - err := k8sClient.Get(ctx, namespacedName, build) - if errors.IsNotFound(err) { - return - } - o.Expect(err).NotTo(o.HaveOccurred()) - - err = k8sClient.Delete(ctx, build, &client.DeleteOptions{}) - // the delete e2e's can delete this object before this AfterEach runs - if errors.IsNotFound(err) { - return - } - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By("waiting for ShipwrightBuild instance to be completely removed") - test.EventuallyRemoved(ctx, k8sClient, build) + deleteShipwrightBuild(ctx, build) g.By("checking that the shipwright-build-controller deployment has been removed") deployment := baseDeployment.DeepCopy() diff --git a/controllers/shipwrightbuild_controller.go b/controllers/shipwrightbuild_controller.go index 0cec883d..bfb5cba7 100644 --- a/controllers/shipwrightbuild_controller.go +++ b/controllers/shipwrightbuild_controller.go @@ -7,6 +7,7 @@ package controllers import ( "context" "fmt" + "path/filepath" "github.com/go-logr/logr" "github.com/manifestival/manifestival" @@ -25,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/shipwright-io/operator/api/v1alpha1" + "github.com/shipwright-io/operator/pkg/buildstrategy" "github.com/shipwright-io/operator/pkg/certmanager" "github.com/shipwright-io/operator/pkg/common" "github.com/shipwright-io/operator/pkg/tekton" @@ -53,10 +55,11 @@ type ShipwrightBuildReconciler struct { CRDClient crdclientv1.ApiextensionsV1Interface TektonOperatorClient tektonoperatorv1alpha1client.OperatorV1alpha1Interface - Logger logr.Logger // decorated logger - Scheme *runtime.Scheme // runtime scheme - Manifest manifestival.Manifest // release manifests render - TektonManifest manifestival.Manifest // Tekton release manifest render + Logger logr.Logger // decorated logger + Scheme *runtime.Scheme // runtime scheme + Manifest manifestival.Manifest // release manifests render + TektonManifest manifestival.Manifest // Tekton release manifest render + BuildStrategyManifest manifestival.Manifest // Build strategies manifest to render } // setFinalizer append finalizer on the resource, and uses local client to update it immediately. @@ -195,6 +198,11 @@ func (r *ShipwrightBuildReconciler) Reconcile(ctx context.Context, req ctrl.Requ logger.Info("Finalizers removed, deletion of manifests completed!") return NoRequeue() } + logger.Info("Deleting cluster build strategies") + if err := r.BuildStrategyManifest.Delete(); err != nil { + logger.Error(err, "deleting cluster build strategies") + return RequeueWithError(err) + } logger.Info("Deleting manifests...") if err := manifest.Delete(); err != nil { @@ -229,6 +237,29 @@ func (r *ShipwrightBuildReconciler) Reconcile(ctx context.Context, req ctrl.Requ logger.Error(err, "setting the finalizer") return RequeueWithError(err) } + + requeue, err = buildstrategy.ReconcileBuildStrategies(ctx, + r.CRDClient, + logger, + r.BuildStrategyManifest) + if err != nil { + logger.Error(err, "reconcile cluster build strategies") + return RequeueWithError(err) + } + if requeue { + logger.Info("requeue waiting for cluster build strategy preconditions") + apimeta.SetStatusCondition(&b.Status.Conditions, metav1.Condition{ + Type: ConditionReady, + Status: metav1.ConditionUnknown, + Reason: "ClusterBuildStrategiesWaiting", + Message: "Waiting for cluster build strategies to be deployed", + }) + if updateErr := r.Client.Status().Update(ctx, b); updateErr != nil { + return RequeueWithError(err) + } + return Requeue() + } + apimeta.SetStatusCondition(&b.Status.Conditions, metav1.Condition{ Type: ConditionReady, Status: metav1.ConditionTrue, @@ -246,8 +277,15 @@ func (r *ShipwrightBuildReconciler) Reconcile(ctx context.Context, req ctrl.Requ // setupManifestival instantiate manifestival with local controller attributes, as well as tekton prereqs. func (r *ShipwrightBuildReconciler) setupManifestival() error { var err error - r.Manifest, err = common.SetupManifestival(r.Client, "release.yaml", r.Logger) - return err + r.Manifest, err = common.SetupManifestival(r.Client, "release.yaml", false, r.Logger) + if err != nil { + return err + } + r.BuildStrategyManifest, err = common.SetupManifestival(r.Client, filepath.Join("samples", "buildstrategy"), true, r.Logger) + if err != nil { + return err + } + return nil } // SetupWithManager sets up the controller with the Manager, by instantiating Manifestival and diff --git a/controllers/shipwrightbuild_controller_test.go b/controllers/shipwrightbuild_controller_test.go index e2353e29..07c3842c 100644 --- a/controllers/shipwrightbuild_controller_test.go +++ b/controllers/shipwrightbuild_controller_test.go @@ -158,12 +158,18 @@ func testShipwrightBuildReconcilerReconcile(t *testing.T, targetNamespace string ctx := context.TODO() res, err := r.Reconcile(ctx, req) g.Expect(err).To(o.BeNil()) - g.Expect(res.Requeue).To(o.BeFalse()) + // TODO: Code technically uses two different clientsets that don't talk to each other. + // This makes testing brittle and unable to capture the behavior on a real cluster. + // Requeue can return "true" because the tests think the CRD for ClusterBuildStrategies + // do not exist yet. + g.Expect(res.Requeue).To(o.BeTrue(), "checking requeue for Reconcile") err = c.Get(ctx, deploymentName, &appsv1.Deployment{}) g.Expect(err).To(o.BeNil()) err = c.Get(ctx, namespacedName, b) g.Expect(err).To(o.BeNil()) - g.Expect(b.Status.IsReady()).To(o.BeTrue()) + // Likewise, the ShipwrightBuild object will not report itself ready because it is waiting + // for the ClusterBuildStrategy CRD to be created first. + g.Expect(b.Status.IsReady()).To(o.BeFalse(), "checking ShipwrightBuild readiness") }) t.Run("rollout-manifests-with-images-env-vars", func(t *testing.T) { @@ -174,14 +180,20 @@ func testShipwrightBuildReconcilerReconcile(t *testing.T, targetNamespace string deployment := &appsv1.Deployment{} res, err := r.Reconcile(ctx, req) g.Expect(err).To(o.BeNil()) - g.Expect(res.Requeue).To(o.BeFalse()) + // TODO: Code technically uses two different clientsets that don't talk to each other. + // This makes testing brittle and unable to capture the behavior on a real cluster. + // Requeue can return "true" because the tests think the CRD for ClusterBuildStrategies + // do not exist yet. + g.Expect(res.Requeue).To(o.BeTrue()) err = c.Get(ctx, deploymentName, deployment) g.Expect(err).To(o.BeNil()) containers := deployment.Spec.Template.Spec.Containers g.Expect(containers[0].Image).To(o.Equal("ghcr.io/shipwright-io/build/shipwright-build-controller:nightly-2023-05-05-1683263383")) err = c.Get(ctx, namespacedName, b) g.Expect(err).To(o.BeNil()) - g.Expect(b.Status.IsReady()).To(o.BeTrue()) + // Likewise, the ShipwrightBuild object will not report itself ready because it is waiting + // for the ClusterBuildStrategy CRD to be created first. + g.Expect(b.Status.IsReady()).To(o.BeFalse()) }) // rolling back all changes, making sure the main deployment is also not found afterwards @@ -193,6 +205,8 @@ func testShipwrightBuildReconcilerReconcile(t *testing.T, targetNamespace string // setting a deletion timestemp on the build object, it triggers the rollback logic so the // reconciliation should remove the objects previously deployed + + // TODO: Refactor to use owner references so the rollback is handled by Kubernetes itself. b.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) err = r.Update(ctx, b, &client.UpdateOptions{}) g.Expect(err).To(o.BeNil()) diff --git a/controllers/shipwrightbuild_rbac.go b/controllers/shipwrightbuild_rbac.go index 3ece07b6..be7e9b89 100644 --- a/controllers/shipwrightbuild_rbac.go +++ b/controllers/shipwrightbuild_rbac.go @@ -7,9 +7,13 @@ package controllers // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create // +kubebuilder:rbac:groups=apps,resources=deployments,resourceNames=shipwright-build-controller,verbs=update;patch;delete // +kubebuilder:rbac:groups=apps,resources=deployments/finalizers,resourceNames=shipwright-build-controller,verbs=update +// +kubebuilder:rbac:groups=apps,resources=deployments,resourceNames=shipwright-build-webhook,verbs=update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=deployments/finalizers,resourceNames=shipwright-build-webhook,verbs=update // +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods;events;configmaps;secrets;limitranges;namespaces;services,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create // +kubebuilder:rbac:groups=core,resources=serviceaccounts,resourceNames=shipwright-build-controller,verbs=update;patch;delete +// +kubebuilder:rbac:groups=core,resources=serviceaccounts,resourceNames=shipwright-build-webhook,verbs=update;patch;delete // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch;create // +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,resourceNames=builds.shipwright.io;buildruns.shipwright.io;buildstrategies.shipwright.io;clusterbuildstrategies.shipwright.io,verbs=update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,verbs=get;list;watch;create @@ -22,21 +26,18 @@ package controllers // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,resourceNames=shipwright-build-controller,verbs=update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,resourceNames=shipwright-build-controller,verbs=update;patch;delete +// +kubebuilder:rbac:groups=shipwright.io,resources=clusterbuildstrategies,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=operator.shipwright.io,resources=shipwrightbuilds,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=operator.shipwright.io,resources=shipwrightbuilds/finalizers,verbs=update // +kubebuilder:rbac:groups=operator.shipwright.io,resources=shipwrightbuilds/status,verbs=get;update;patch // +kubebuilder:rbac:groups=operator.tekton.dev,resources=tektonconfigs,verbs=get;list;create -// +kubebuilder:rbac:groups=core,resources=serviceaccounts,resourceNames=shipwright-build-webhook,verbs=update;patch;delete // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=admissionregistration.k8s.io/v1beta1,resources=validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterroles,resourceNames=shipwright-build-webhook,verbs=update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=clusterrolebindings,resourceNames=shipwright-build-webhook,verbs=update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,resourceNames=shipwright-build-webhook,verbs=update;patch;delete // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,resourceNames=shipwright-build-webhook,verbs=update;patch;delete -// +kubebuilder:rbac:groups=apps,resources=deployments,resourceNames=shipwright-build-webhook,verbs=update;patch;delete -// +kubebuilder:rbac:groups=apps,resources=deployments/finalizers,resourceNames=shipwright-build-webhook,verbs=update // +kubebuilder:rbac:groups=cert-manager.io,resources=issuers,verbs=get;list;watch;create // +kubebuilder:rbac:groups=cert-manager.io,resources=issuers,resourceNames=selfsigned-issuer,verbs=update;patch;delete // +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create // +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,resourceNames=shipwright-build-webhook-cert,verbs=update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods;events;configmaps;secrets;limitranges;namespaces;services,verbs=get;list;watch;create;update;patch;delete diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 86a56856..8985d8f3 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -15,8 +15,11 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" crdclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -26,7 +29,10 @@ import ( tektonoperatorv1alpha1client "github.com/tektoncd/operator/pkg/client/clientset/versioned/typed/operator/v1alpha1" + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" operatorv1alpha1 "github.com/shipwright-io/operator/api/v1alpha1" + "github.com/shipwright-io/operator/pkg/common" + "github.com/shipwright-io/operator/test" // +kubebuilder:scaffold:imports ) @@ -54,6 +60,114 @@ func TestAPIs(t *testing.T) { RunSpecs(t, "Controller Suite") } +// setupTektonCRDs mocks out the CRD definition for Tekton TaskRuns and TektonConfig +func setupTektonCRDs(ctx context.Context) { + truePtr := true + By("does tekton taskrun crd exist") + err := k8sClient.Get(ctx, types.NamespacedName{Name: "taskruns.tekton.dev"}, &crdv1.CustomResourceDefinition{}) + if errors.IsNotFound(err) { + By("creating tekton taskrun crd") + taskRunCRD := &crdv1.CustomResourceDefinition{} + taskRunCRD.Name = "taskruns.tekton.dev" + taskRunCRD.Spec.Group = "tekton.dev" + taskRunCRD.Spec.Scope = crdv1.NamespaceScoped + taskRunCRD.Spec.Versions = []crdv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta1", + Storage: true, + Schema: &crdv1.CustomResourceValidation{ + OpenAPIV3Schema: &crdv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: &truePtr, + }, + }, + }, + } + taskRunCRD.Spec.Names.Plural = "taskruns" + taskRunCRD.Spec.Names.Singular = "taskrun" + taskRunCRD.Spec.Names.Kind = "TaskRun" + taskRunCRD.Spec.Names.ListKind = "TaskRunList" + taskRunCRD.Status.StoredVersions = []string{"v1beta1"} + err = k8sClient.Create(ctx, taskRunCRD, &client.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + } + Expect(err).NotTo(HaveOccurred()) + + By("does tektonconfig crd exist") + err = k8sClient.Get(ctx, types.NamespacedName{Name: "tektonconfigs.operator.tekton.dev"}, &crdv1.CustomResourceDefinition{}) + if errors.IsNotFound(err) { + tektonOpCRD := &crdv1.CustomResourceDefinition{} + tektonOpCRD.Name = "tektonconfigs.operator.tekton.dev" + tektonOpCRD.Labels = map[string]string{"operator.tekton.dev/release": common.TektonOpMinSupportedVersion} + tektonOpCRD.Spec.Group = "operator.tekton.dev" + tektonOpCRD.Spec.Scope = crdv1.ClusterScoped + tektonOpCRD.Spec.Versions = []crdv1.CustomResourceDefinitionVersion{ + { + Name: "v1alpha1", + Storage: true, + Schema: &crdv1.CustomResourceValidation{ + OpenAPIV3Schema: &crdv1.JSONSchemaProps{ + Type: "object", + XPreserveUnknownFields: &truePtr, + }, + }, + }, + } + tektonOpCRD.Spec.Names.Plural = "tektonconfigs" + tektonOpCRD.Spec.Names.Singular = "tektonconfig" + tektonOpCRD.Spec.Names.Kind = "TektonConfig" + tektonOpCRD.Spec.Names.ListKind = "TektonConfigList" + tektonOpCRD.Status.StoredVersions = []string{"v1alpha1"} + err = k8sClient.Create(ctx, tektonOpCRD, &client.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + Expect(err).NotTo(HaveOccurred()) +} + +// createShipwrightBuild creates an instance of the ShipwrightBuild object with the given target +// namespace. +func createShipwrightBuild(ctx context.Context, targetNamespace string) *operatorv1alpha1.ShipwrightBuild { + By("creating a ShipwrightBuild instance") + build := &operatorv1alpha1.ShipwrightBuild{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: operatorv1alpha1.ShipwrightBuildSpec{ + TargetNamespace: targetNamespace, + }, + } + err := k8sClient.Create(ctx, build, &client.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // when the finalizer is in place, the deployment of manifest elements is done, and therefore + // functional testing can proceed + By("waiting for the finalizer to be set") + test.EventuallyContainFinalizer(ctx, k8sClient, build, FinalizerAnnotation) + return build +} + +// deleteShipwrightBuild tears down the given ShipwrightBuild instance. +func deleteShipwrightBuild(ctx context.Context, build *operatorv1alpha1.ShipwrightBuild) { + By("deleting the ShipwrightBuild instance") + namespacedName := types.NamespacedName{Name: build.Name} + err := k8sClient.Get(ctx, namespacedName, build) + if errors.IsNotFound(err) { + return + } + Expect(err).NotTo(HaveOccurred()) + + err = k8sClient.Delete(ctx, build, &client.DeleteOptions{}) + // the delete e2e's can delete this object before this AfterEach runs + if errors.IsNotFound(err) { + return + } + Expect(err).NotTo(HaveOccurred()) + + By("waiting for ShipwrightBuild instance to be completely removed") + test.EventuallyRemoved(ctx, k8sClient, build) +} + var _ = BeforeSuite(func() { logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) @@ -70,7 +184,10 @@ var _ = BeforeSuite(func() { err = operatorv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = apiextv1.AddToScheme(scheme.Scheme) + err = crdv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = buildv1alpha1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/kodata/samples/buildstrategy/buildah/buildstrategy_buildah_shipwright_managed_push_cr.yaml b/kodata/samples/buildstrategy/buildah/buildstrategy_buildah_shipwright_managed_push_cr.yaml new file mode 100644 index 00000000..c95c2f6d --- /dev/null +++ b/kodata/samples/buildstrategy/buildah/buildstrategy_buildah_shipwright_managed_push_cr.yaml @@ -0,0 +1,204 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: buildah-shipwright-managed-push +spec: + buildSteps: + - name: build + image: quay.io/containers/buildah:v1.32.0 + workingDir: $(params.shp-source-root) + securityContext: + privileged: true + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + # Parse parameters + context= + dockerfile= + image= + target= + buildArgs=() + inBuildArgs=false + registriesBlock="" + inRegistriesBlock=false + registriesInsecure="" + inRegistriesInsecure=false + registriesSearch="" + inRegistriesSearch=false + while [[ $# -gt 0 ]]; do + arg="$1" + shift + + if [ "${arg}" == "--context" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + context="$1" + shift + elif [ "${arg}" == "--dockerfile" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + dockerfile="$1" + shift + elif [ "${arg}" == "--image" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + image="$1" + shift + elif [ "${arg}" == "--target" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + target="$1" + shift + elif [ "${arg}" == "--build-args" ]; then + inBuildArgs=true + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-block" ]; then + inRegistriesBlock=true + inBuildArgs=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-insecure" ]; then + inRegistriesInsecure=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-search" ]; then + inRegistriesSearch=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + elif [ "${inBuildArgs}" == "true" ]; then + buildArgs+=("--build-arg" "${arg}") + elif [ "${inRegistriesBlock}" == "true" ]; then + registriesBlock="${registriesBlock}'${arg}', " + elif [ "${inRegistriesInsecure}" == "true" ]; then + registriesInsecure="${registriesInsecure}'${arg}', " + elif [ "${inRegistriesSearch}" == "true" ]; then + registriesSearch="${registriesSearch}'${arg}', " + else + echo "Invalid usage" + exit 1 + fi + done + + # Verify the existence of the context directory + if [ ! -d "${context}" ]; then + echo -e "The context directory '${context}' does not exist." + echo -n "ContextDirNotFound" > '$(results.shp-error-reason.path)' + echo -n "The context directory '${context}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + cd "${context}" + + # Verify the existence of the Dockerfile + if [ ! -f "${dockerfile}" ]; then + echo -e "The Dockerfile '${dockerfile}' does not exist." + echo -n "DockerfileNotFound" > '$(results.shp-error-reason.path)' + echo -n "The Dockerfile '${dockerfile}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + echo "[INFO] Creating registries config file..." + if [ "${registriesSearch}" != "" ]; then + cat <>/tmp/registries.conf + [registries.search] + registries = [${registriesSearch::-2}] + + EOF + fi + if [ "${registriesInsecure}" != "" ]; then + cat <>/tmp/registries.conf + [registries.insecure] + registries = [${registriesInsecure::-2}] + + EOF + fi + if [ "${registriesBlock}" != "" ]; then + cat <>/tmp/registries.conf + [registries.block] + registries = [${registriesBlock::-2}] + + EOF + fi + + # Building the image + echo "[INFO] Building image ${image}" + buildah --storage-driver=$(params.storage-driver) \ + bud "${buildArgs[@]}" \ + --registries-conf=/tmp/registries.conf \ + --tag="${image}" \ + --file="${dockerfile}" \ + . + + # Write the image + echo "[INFO] Writing image ${image}" + buildah --storage-driver=$(params.storage-driver) push \ + "${image}" \ + "oci:${target}" + # That's the separator between the shell script and its args + - -- + - --context + - $(params.shp-source-context) + - --dockerfile + - $(build.dockerfile) + - --image + - $(params.shp-output-image) + - --build-args + - $(params.build-args[*]) + - --registries-block + - $(params.registries-block[*]) + - --registries-insecure + - $(params.registries-insecure[*]) + - --registries-search + - $(params.registries-search[*]) + - --target + - $(params.shp-output-directory) + resources: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: 250m + memory: 65Mi + parameters: + - name: build-args + description: "The values for the args in the Dockerfile. Values must be in the format KEY=VALUE." + type: array + defaults: [] + - name: registries-block + description: The registries that need to block pull access. + type: array + defaults: [] + - name: registries-insecure + description: The fully-qualified name of insecure registries. An insecure registry is one that does not have a valid SSL certificate or only supports HTTP. + type: array + defaults: [] + - name: registries-search + description: The registries for searching short name images such as `golang:latest`. + type: array + defaults: + - docker.io + - quay.io + - name: storage-driver + description: "The storage driver to use, such as 'overlay' or 'vfs'." + type: string + default: "vfs" + # For details see the "--storage-driver" section of https://github.com/containers/buildah/blob/main/docs/buildah.1.md#options + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/samples/buildstrategy/buildah/buildstrategy_buildah_strategy_managed_push_cr.yaml b/kodata/samples/buildstrategy/buildah/buildstrategy_buildah_strategy_managed_push_cr.yaml new file mode 100644 index 00000000..03075863 --- /dev/null +++ b/kodata/samples/buildstrategy/buildah/buildstrategy_buildah_strategy_managed_push_cr.yaml @@ -0,0 +1,204 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: buildah-strategy-managed-push +spec: + buildSteps: + - name: build-and-push + image: quay.io/containers/buildah:v1.32.0 + workingDir: $(params.shp-source-root) + securityContext: + capabilities: + add: + - "SETFCAP" + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + # Parse parameters + context= + dockerfile= + image= + buildArgs=() + inBuildArgs=false + registriesBlock="" + inRegistriesBlock=false + registriesInsecure="" + inRegistriesInsecure=false + registriesSearch="" + inRegistriesSearch=false + tlsVerify=true + while [[ $# -gt 0 ]]; do + arg="$1" + shift + + if [ "${arg}" == "--context" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + context="$1" + shift + elif [ "${arg}" == "--dockerfile" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + dockerfile="$1" + shift + elif [ "${arg}" == "--image" ]; then + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + image="$1" + shift + elif [ "${arg}" == "--build-args" ]; then + inBuildArgs=true + inRegistriesBlock=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-block" ]; then + inRegistriesBlock=true + inBuildArgs=false + inRegistriesInsecure=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-insecure" ]; then + inRegistriesInsecure=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesSearch=false + elif [ "${arg}" == "--registries-search" ]; then + inRegistriesSearch=true + inBuildArgs=false + inRegistriesBlock=false + inRegistriesInsecure=false + elif [ "${inBuildArgs}" == "true" ]; then + buildArgs+=("--build-arg" "${arg}") + elif [ "${inRegistriesBlock}" == "true" ]; then + registriesBlock="${registriesBlock}'${arg}', " + elif [ "${inRegistriesInsecure}" == "true" ]; then + registriesInsecure="${registriesInsecure}'${arg}', " + + # This assumes that the image is passed before the insecure registries which is fair in this context + if [[ ${image} == ${arg}/* ]]; then + tlsVerify=false + fi + elif [ "${inRegistriesSearch}" == "true" ]; then + registriesSearch="${registriesSearch}'${arg}', " + else + echo "Invalid usage" + exit 1 + fi + done + + # Verify the existence of the context directory + if [ ! -d "${context}" ]; then + echo -e "The context directory '${context}' does not exist." + echo -n "ContextDirNotFound" > '$(results.shp-error-reason.path)' + echo -n "The context directory '${context}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + cd "${context}" + + # Verify the existence of the Dockerfile + if [ ! -f "${dockerfile}" ]; then + echo -e "The Dockerfile '${dockerfile}' does not exist." + echo -n "DockerfileNotFound" > '$(results.shp-error-reason.path)' + echo -n "The Dockerfile '${dockerfile}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + echo "[INFO] Creating registries config file..." + if [ "${registriesSearch}" != "" ]; then + cat <>/tmp/registries.conf + [registries.search] + registries = [${registriesSearch::-2}] + + EOF + fi + if [ "${registriesInsecure}" != "" ]; then + cat <>/tmp/registries.conf + [registries.insecure] + registries = [${registriesInsecure::-2}] + + EOF + fi + if [ "${registriesBlock}" != "" ]; then + cat <>/tmp/registries.conf + [registries.block] + registries = [${registriesBlock::-2}] + + EOF + fi + + # Building the image + echo "[INFO] Building image ${image}" + buildah --storage-driver=$(params.storage-driver) \ + bud "${buildArgs[@]}" \ + --registries-conf=/tmp/registries.conf \ + --tag="${image}" \ + --file="${dockerfile}" \ + . + + # Push the image + echo "[INFO] Pushing image ${image}" + buildah --storage-driver=$(params.storage-driver) push \ + --digestfile='$(results.shp-image-digest.path)' \ + --tls-verify="${tlsVerify}" \ + "${image}" \ + "docker://${image}" + # That's the separator between the shell script and its args + - -- + - --context + - $(params.shp-source-context) + - --dockerfile + - $(build.dockerfile) + - --image + - $(params.shp-output-image) + - --build-args + - $(params.build-args[*]) + - --registries-block + - $(params.registries-block[*]) + - --registries-insecure + - $(params.registries-insecure[*]) + - --registries-search + - $(params.registries-search[*]) + resources: + limits: + cpu: "1" + memory: 2Gi + requests: + cpu: 250m + memory: 65Mi + parameters: + - name: build-args + description: "The values for the args in the Dockerfile. Values must be in the format KEY=VALUE." + type: array + defaults: [] + - name: registries-block + description: The registries that need to block pull access. + type: array + defaults: [] + - name: registries-insecure + description: The fully-qualified name of insecure registries. An insecure registry is one that does not have a valid SSL certificate or only supports HTTP. + type: array + defaults: [] + - name: registries-search + description: The registries for searching short name images such as `golang:latest`. + type: array + defaults: + - docker.io + - quay.io + - name: storage-driver + description: "The storage driver to use, such as 'overlay' or 'vfs'" + type: string + default: "vfs" + # For details see the "--storage-driver" section of https://github.com/containers/buildah/blob/main/docs/buildah.1.md#options + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/samples/buildstrategy/buildkit/buildstrategy_buildkit_cr.yaml b/kodata/samples/buildstrategy/buildkit/buildstrategy_buildkit_cr.yaml new file mode 100644 index 00000000..ff329d54 --- /dev/null +++ b/kodata/samples/buildstrategy/buildkit/buildstrategy_buildkit_cr.yaml @@ -0,0 +1,170 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: buildkit + annotations: + # See https://github.com/moby/buildkit/blob/master/docs/rootless.md#about---oci-worker-no-process-sandbox for more information + container.apparmor.security.beta.kubernetes.io/step-build-and-push: unconfined + # The usage of seccomp annotation will be deprecate in k8s v1.22.0, see + # https://kubernetes.io/docs/tutorials/clusters/seccomp/#create-a-pod-with-a-seccomp-profile-for-syscall-auditing for more information + container.seccomp.security.alpha.kubernetes.io/step-build-and-push: unconfined +spec: + parameters: + - name: build-args + description: "The values for the ARGs in the Dockerfile. Values must be in the format KEY=VALUE." + type: array + defaults: [] + - name: cache + description: "Configure BuildKit's cache usage. Allowed values are 'disabled' and 'registry'. The default is 'registry'." + type: string + default: registry + - name: platforms + description: "Build the image for different platforms. By default, the image is built for the platform used by the FROM image. If that is present for multiple platforms, then it is built for the environment's platform." + type: array + defaults: [] + - name: secrets + description: "The secrets to pass to the build. Values must be in the format ID=FILE_CONTENT." + type: array + defaults: [] + buildSteps: + - name: build-and-push + image: moby/buildkit:nightly-rootless + imagePullPolicy: Always + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SETGID + - SETUID + workingDir: $(params.shp-source-root) + env: + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: HOME + value: /tekton/home + # See https://github.com/moby/buildkit/blob/master/docs/rootless.md#about---oci-worker-no-process-sandbox for more information + - name: BUILDKITD_FLAGS + value: --oci-worker-no-process-sandbox + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_DOCKERFILE + value: $(params.DOCKERFILE) + - name: PARAM_OUTPUT_DIRECTORY + value: $(params.shp-output-directory) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + - name: PARAM_OUTPUT_INSECURE + value: $(params.shp-output-insecure) + - name: PARAM_CACHE + value: $(params.cache) + command: + - /bin/ash + args: + - -c + - | + set -euo pipefail + + # Verify the existence of the context directory + if [ ! -d "${PARAM_SOURCE_CONTEXT}" ]; then + echo -e "The context directory '${PARAM_SOURCE_CONTEXT}' does not exist." + echo -n "ContextDirNotFound" > '$(results.shp-error-reason.path)' + echo -n "The context directory '${PARAM_SOURCE_CONTEXT}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + # Prepare the file arguments + DOCKERFILE_PATH="${PARAM_SOURCE_CONTEXT}/${PARAM_DOCKERFILE}" + DOCKERFILE_DIR="$(dirname "${DOCKERFILE_PATH}")" + DOCKERFILE_NAME="$(basename "${DOCKERFILE_PATH}")" + + # Verify the existence of the Dockerfile + if [ ! -f "${DOCKERFILE_PATH}" ]; then + echo -e "The Dockerfile '${DOCKERFILE_PATH}' does not exist." + echo -n "DockerfileNotFound" > '$(results.shp-error-reason.path)' + echo -n "The Dockerfile '${DOCKERFILE_PATH}' does not exist." > '$(results.shp-error-message.path)' + exit 1 + fi + + # We only have ash here and therefore no bash arrays to help add dynamic arguments (the build-args) to the build command. + + echo "#!/bin/ash" > /tmp/run.sh + echo "set -euo pipefail" >> /tmp/run.sh + echo "buildctl-daemonless.sh \\" >> /tmp/run.sh + echo "build \\" >> /tmp/run.sh + echo "--frontend=dockerfile.v0 \\" >> /tmp/run.sh + echo "--opt=filename=\"${DOCKERFILE_NAME}\" \\" >> /tmp/run.sh + echo "--local=context=\"${PARAM_SOURCE_CONTEXT}\" \\" >> /tmp/run.sh + echo "--local=dockerfile=\"${DOCKERFILE_DIR}\" \\" >> /tmp/run.sh + echo "--output=type=oci,tar=false,dest=\"${PARAM_OUTPUT_DIRECTORY}\" \\" >> /tmp/run.sh + if [ "${PARAM_CACHE}" == "registry" ]; then + echo "--export-cache=type=inline \\" >> /tmp/run.sh + echo "--import-cache=type=registry,ref=\"${PARAM_OUTPUT_IMAGE}\",registry.insecure=\"${PARAM_OUTPUT_INSECURE}\" \\" >> /tmp/run.sh + elif [ "${PARAM_CACHE}" == "disabled" ]; then + echo "--no-cache \\" >> /tmp/run.sh + else + echo -e "An invalid value for the parameter 'cache' has been provided: '${PARAM_CACHE}'. Allowed values are 'disabled' and 'registry'." + echo -n "InvalidParameterValue" > '$(results.shp-error-reason.path)' + echo -n "An invalid value for the parameter 'cache' has been provided: '${PARAM_CACHE}'. Allowed values are 'disabled' and 'registry'." > '$(results.shp-error-message.path)' + exit 1 + fi + + stage="" + platforms="" + for a in "$@" + do + if [ "${a}" == "--build-args" ]; then + stage=build-args + elif [ "${a}" == "--platforms" ]; then + stage=platforms + elif [ "${a}" == "--secrets" ]; then + stage=secrets + elif [ "${stage}" == "build-args" ]; then + echo "--opt=\"build-arg:${a}\" \\" >> /tmp/run.sh + elif [ "${stage}" == "platforms" ]; then + if [ "${platforms}" == "" ]; then + platforms="${a}" + else + platforms="${platforms},${a}" + fi + elif [ "${stage}" == "secrets" ]; then + # Split ID=FILE_CONTENT into variables id and data + + # using head because the data could be multiline + id="$(echo "${a}" | head -1 | sed 's/=.*//')" + + # This is hacky, we remove the suffix ${id}= from all lines of the data. + # If the data would be multiple lines and a line would start with ${id}= + # then we would remove it. We could force users to give us the secret + # base64 encoded. But ultimately, the best solution might be if the user + # mounts the secret and just gives us the path here. + data="$(echo "${a}" | sed "s/^${id}=//")" + + # Write the secret data into a temporary file, once we have volume support + # in the build strategy, we should use a memory based emptyDir for this. + echo -n "${data}" > "/tmp/secret_${id}" + + # Add the secret argument + echo "--secret id=${id},src="/tmp/secret_${id}" \\" >> /tmp/run.sh + fi + done + + if [ "${platforms}" != "" ]; then + echo "--opt=\"platform=${platforms}\" \\" >> /tmp/run.sh + fi + + echo "--progress=plain" >> /tmp/run.sh + + chmod +x /tmp/run.sh + /tmp/run.sh + # That's the separator between the shell script and its args + - -- + - --build-args + - $(params.build-args[*]) + - --platforms + - $(params.platforms[*]) + - --secrets + - $(params.secrets[*]) + securityContext: + runAsUser: 1000 + runAsGroup: 1000 diff --git a/kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_cr.yaml b/kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_cr.yaml new file mode 100644 index 00000000..61eca36e --- /dev/null +++ b/kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3-heroku_cr.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: buildpacks-v3-heroku +spec: + volumes: + - name: platform-env + emptyDir: {} + parameters: + - name: platform-api-version + description: The referenced version is the minimum version that all relevant buildpack implementations support. + default: "0.7" + buildSteps: + - name: build-and-push + image: heroku/builder:22 + env: + - name: CNB_PLATFORM_API + value: $(params.platform-api-version) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + echo "> Processing environment variables..." + ENV_DIR="/platform/env" + + envs=($(env)) + + # Denying the creation of non required files from system environments. + # The creation of a file named PATH (corresponding to PATH system environment) + # caused failure for python source during pip install (https://github.com/Azure-Samples/python-docs-hello-world) + block_list=("PATH" "HOSTNAME" "PWD" "_" "SHLVL" "HOME" "") + + for env in "${envs[@]}"; do + blocked=false + + IFS='=' read -r key value string <<< "$env" + + for str in "${block_list[@]}"; do + if [[ "$key" == "$str" ]]; then + blocked=true + break + fi + done + + if [ "$blocked" == "false" ]; then + path="${ENV_DIR}/${key}" + echo -n "$value" > "$path" + fi + done + + LAYERS_DIR=/tmp/.shp/layers + CACHE_DIR=/tmp/.shp/cache + + mkdir -p "$CACHE_DIR" "$LAYERS_DIR" + + function announce_phase { + printf "===> %s\n" "$1" + } + + announce_phase "ANALYZING" + /cnb/lifecycle/analyzer -layers="$LAYERS_DIR" "${PARAM_OUTPUT_IMAGE}" + + announce_phase "DETECTING" + /cnb/lifecycle/detector -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + announce_phase "RESTORING" + /cnb/lifecycle/restorer -cache-dir="$CACHE_DIR" -layers="$LAYERS_DIR" + + announce_phase "BUILDING" + /cnb/lifecycle/builder -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + exporter_args=( -layers="$LAYERS_DIR" -report=/tmp/report.toml -cache-dir="$CACHE_DIR" -app="${PARAM_SOURCE_CONTEXT}") + grep -q "buildpack-default-process-type" "$LAYERS_DIR/config/metadata.toml" || exporter_args+=( -process-type web ) + + announce_phase "EXPORTING" + /cnb/lifecycle/exporter "${exporter_args[@]}" "${PARAM_OUTPUT_IMAGE}" + + # Store the image digest + grep digest /tmp/report.toml | tail -n 1 | tr -d ' \"\n' | sed s/digest=// > "$(results.shp-image-digest.path)" + volumeMounts: + - mountPath: /platform/env + name: platform-env + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1000 + runAsGroup: 1000 diff --git a/kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_cr.yaml b/kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_cr.yaml new file mode 100644 index 00000000..f4c11a65 --- /dev/null +++ b/kodata/samples/buildstrategy/buildpacks-v3/buildstrategy_buildpacks-v3_cr.yaml @@ -0,0 +1,100 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: buildpacks-v3 +spec: + volumes: + - name: platform-env + emptyDir: {} + parameters: + - name: platform-api-version + description: The referenced version is the minimum version that all relevant buildpack implementations support. + default: "0.7" + buildSteps: + - name: build-and-push + image: docker.io/paketobuildpacks/builder-jammy-full:latest + env: + - name: CNB_PLATFORM_API + value: $(params.platform-api-version) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + echo "> Processing environment variables..." + ENV_DIR="/platform/env" + + envs=($(env)) + + # Denying the creation of non required files from system environments. + # The creation of a file named PATH (corresponding to PATH system environment) + # caused failure for python source during pip install (https://github.com/Azure-Samples/python-docs-hello-world) + block_list=("PATH" "HOSTNAME" "PWD" "_" "SHLVL" "HOME" "") + + for env in "${envs[@]}"; do + blocked=false + + IFS='=' read -r key value string <<< "$env" + + for str in "${block_list[@]}"; do + if [[ "$key" == "$str" ]]; then + blocked=true + break + fi + done + + if [ "$blocked" == "false" ]; then + path="${ENV_DIR}/${key}" + echo -n "$value" > "$path" + fi + done + + LAYERS_DIR=/tmp/.shp/layers + CACHE_DIR=/tmp/.shp/cache + + mkdir -p "$CACHE_DIR" "$LAYERS_DIR" + + function announce_phase { + printf "===> %s\n" "$1" + } + + announce_phase "ANALYZING" + /cnb/lifecycle/analyzer -layers="$LAYERS_DIR" "${PARAM_OUTPUT_IMAGE}" + + announce_phase "DETECTING" + /cnb/lifecycle/detector -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + announce_phase "RESTORING" + /cnb/lifecycle/restorer -cache-dir="$CACHE_DIR" -layers="$LAYERS_DIR" + + announce_phase "BUILDING" + /cnb/lifecycle/builder -app="${PARAM_SOURCE_CONTEXT}" -layers="$LAYERS_DIR" + + exporter_args=( -layers="$LAYERS_DIR" -report=/tmp/report.toml -cache-dir="$CACHE_DIR" -app="${PARAM_SOURCE_CONTEXT}") + grep -q "buildpack-default-process-type" "$LAYERS_DIR/config/metadata.toml" || exporter_args+=( -process-type web ) + + announce_phase "EXPORTING" + /cnb/lifecycle/exporter "${exporter_args[@]}" "${PARAM_OUTPUT_IMAGE}" + + # Store the image digest + grep digest /tmp/report.toml | tail -n 1 | tr -d ' \"\n' | sed s/digest=// > "$(results.shp-image-digest.path)" + volumeMounts: + - mountPath: /platform/env + name: platform-env + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1001 + runAsGroup: 1000 diff --git a/kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko-trivy_cr.yaml b/kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko-trivy_cr.yaml new file mode 100644 index 00000000..12465470 --- /dev/null +++ b/kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko-trivy_cr.yaml @@ -0,0 +1,82 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: kaniko-trivy +# This Build Strategy will intentionally fail if the image has any +# critical CVEs. It will not be pushed into the destination registry +# if any critical vulnerabilities are found. +spec: + volumes: + - name: layout + emptyDir: {} + - name: tar + emptyDir: {} + buildSteps: + - name: kaniko-build + image: gcr.io/kaniko-project/executor:v1.17.0 + workingDir: $(params.shp-source-root) + securityContext: + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - SETFCAP + - KILL + env: + - name: HOME + value: /tekton/home + - name: AWS_ACCESS_KEY_ID + value: NOT_SET + - name: AWS_SECRET_KEY + value: NOT_SET + command: + - /kaniko/executor + args: + - --dockerfile + - $(build.dockerfile) + - --context + - $(params.shp-source-context) + - --destination + - $(params.shp-output-image) + - --snapshot-mode + - redo + - --no-push + - --tar-path + - $(params.shp-output-directory)/image.tar + # https://github.com/GoogleContainerTools/kaniko/issues/2164 + - --ignore-path + - /product_uuid + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + - name: trivy-scan + image: docker.io/aquasec/trivy:0.46.0 + command: + - trivy + args: + - image + - --exit-code=1 + - --severity=CRITICAL + - --input + - $(params.shp-output-directory)/image.tar + env: + - name: HOME + value: /tekton/home + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko_cr.yaml b/kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko_cr.yaml new file mode 100644 index 00000000..12eb4fae --- /dev/null +++ b/kodata/samples/buildstrategy/kaniko/buildstrategy_kaniko_cr.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: kaniko +spec: + buildSteps: + - name: build-and-push + image: gcr.io/kaniko-project/executor:v1.17.0 + workingDir: $(params.shp-source-root) + securityContext: + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - SETFCAP + - KILL + env: + - name: HOME + value: /tekton/home + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: AWS_ACCESS_KEY_ID + value: NOT_SET + - name: AWS_SECRET_KEY + value: NOT_SET + command: + - /kaniko/executor + args: + - --dockerfile + - $(build.dockerfile) + - --context + - $(params.shp-source-context) + - --destination + - $(params.shp-output-image) + - --snapshot-mode + - redo + - --no-push + - --tar-path + - $(params.shp-output-directory)/image.tar + # https://github.com/GoogleContainerTools/kaniko/issues/2164 + - --ignore-path + - /product_uuid + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/kodata/samples/buildstrategy/ko/buildstrategy_ko_cr.yaml b/kodata/samples/buildstrategy/ko/buildstrategy_ko_cr.yaml new file mode 100644 index 00000000..e98b0aed --- /dev/null +++ b/kodata/samples/buildstrategy/ko/buildstrategy_ko_cr.yaml @@ -0,0 +1,116 @@ +--- +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: ko +spec: + parameters: + - name: go-flags + description: "Value for the GOFLAGS environment variable." + default: "" + - name: go-version + description: "Version of Go, must match a tag from https://hub.docker.com/_/golang?tab=tags" + default: "1.20" + - name: ko-version + description: "Version of ko, must be either 'latest', or a release name from https://github.com/ko-build/ko/releases" + default: latest + - name: package-directory + description: "The directory inside the context directory containing the main package." + default: "." + - name: target-platform + description: "Target platform to be built. For example: 'linux/arm64'. Multiple platforms can be provided separated by comma, for example: 'linux/arm64,linux/amd64'. The value 'all' will build all platforms supported by the base image. The value 'current' will build the platform on which the build runs." + default: current + volumes: + - name: gocache + description: "Volume to contain the GOCACHE. Can be set to a persistent volume to optimize compilation performance for rebuilds." + overridable: true + emptyDir: {} + buildSteps: + - name: build + image: golang:$(params.go-version) + imagePullPolicy: Always + workingDir: $(params.shp-source-root) + volumeMounts: + - mountPath: /gocache + name: gocache + readOnly: false + env: + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: HOME + value: /tekton/home + - name: GOFLAGS + value: $(params.go-flags) + - name: GOCACHE + value: /gocache + - name: PARAM_OUTPUT_IMAGE + value: $(params.shp-output-image) + - name: PARAM_OUTPUT_DIRECTORY + value: $(params.shp-output-directory) + - name: PARAM_SOURCE_CONTEXT + value: $(params.shp-source-context) + - name: PARAM_SOURCE_ROOT + value: $(params.shp-source-root) + - name: PARAM_TARGET_PLATFORM + value: $(params.target-platform) + - name: PARAM_PACKAGE_DIRECTORY + value: $(params.package-directory) + - name: PARAM_KO_VERSION + value: $(params.ko-version) + command: + - /bin/bash + args: + - -c + - | + set -euo pipefail + + # Determine the ko version + KO_VERSION="${PARAM_KO_VERSION}" + if [ "${KO_VERSION}" == "latest" ]; then + KO_VERSION=$(curl --silent "https://api.github.com/repos/ko-build/ko/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + fi + + # Create one variable with v-suffix and one without as we need both for the download URL + if [[ ${KO_VERSION} = v* ]]; then + KO_VERSION_WITH_V=${KO_VERSION} + KO_VERSION_WITHOUT_V=${KO_VERSION:1} + else + KO_VERSION_WITH_V=v${KO_VERSION} + KO_VERSION_WITHOUT_V=${KO_VERSION} + fi + + # Download ko to the temp directory + curl -f -s -L "https://github.com/ko-build/ko/releases/download/${KO_VERSION_WITH_V}/ko_${KO_VERSION_WITHOUT_V}_$(uname)_$(uname -m | sed 's/aarch64/arm64/').tar.gz" | tar xzf - -C /tmp ko + + # Determine the platform + PLATFORM="${PARAM_TARGET_PLATFORM}" + if [ "${PLATFORM}" == "current" ]; then + PLATFORM="$(uname | tr '[:upper:]' '[:lower:]')/$(uname -m | sed -e 's/x86_64/amd64/' -e 's/aarch64/arm64/')" + fi + + # Print version information + go version + echo "ko version $(/tmp/ko version)" + + # Allow directory to be owned by other user which is normal for a volume-mounted directory. + # This allows Go to run git commands to access repository metadata. + # Documentation: https://git-scm.com/docs/git-config/2.39.0#Documentation/git-config.txt-safedirectory + git config --global --add safe.directory "${PARAM_SOURCE_ROOT}" + + # Run ko + + export GOROOT="$(go env GOROOT)" + + pushd "${PARAM_SOURCE_CONTEXT}" > /dev/null + /tmp/ko build "${PARAM_PACKAGE_DIRECTORY}" --oci-layout-path="${PARAM_OUTPUT_DIRECTORY}" --platform="${PLATFORM}" --push=false + popd > /dev/null + resources: + limits: + cpu: 500m + memory: 1Gi + requests: + cpu: 250m + memory: 65Mi + securityContext: + runAsUser: 1000 + runAsGroup: 1000 diff --git a/kodata/samples/buildstrategy/source-to-image/buildstrategy_source-to-image_cr.yaml b/kodata/samples/buildstrategy/source-to-image/buildstrategy_source-to-image_cr.yaml new file mode 100644 index 00000000..85eca91c --- /dev/null +++ b/kodata/samples/buildstrategy/source-to-image/buildstrategy_source-to-image_cr.yaml @@ -0,0 +1,69 @@ +apiVersion: shipwright.io/v1alpha1 +kind: ClusterBuildStrategy +metadata: + name: source-to-image +spec: + volumes: + - name: gen-source + emptyDir: {} + buildSteps: + - command: + - /usr/local/bin/s2i + - build + - $(params.shp-source-context) + - $(build.builder.image) + - '--as-dockerfile' + - /gen-source/Dockerfile.gen + image: quay.io/openshift-pipeline/s2i:nightly + imagePullPolicy: Always + name: s2i-build-as-dockerfile + volumeMounts: + - mountPath: /gen-source + name: gen-source + workingDir: $(params.shp-source-root) + - name: build-and-push + image: gcr.io/kaniko-project/executor:v1.17.0 + command: + - /kaniko/executor + args: + - --dockerfile + - /gen-source/Dockerfile.gen + - --context + - /gen-source + - --destination + - $(params.shp-output-image) + - --snapshot-mode + - redo + - --no-push + - --tar-path + - $(params.shp-output-directory)/image.tar + # https://github.com/GoogleContainerTools/kaniko/issues/2164 + - --ignore-path + - /product_uuid + env: + - name: DOCKER_CONFIG + value: /tekton/home/.docker + - name: HOME + value: /tekton/home + - name: AWS_ACCESS_KEY_ID + value: NOT_SET + - name: AWS_SECRET_KEY + value: NOT_SET + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - CHOWN + - DAC_OVERRIDE + - FOWNER + - SETGID + - SETUID + - SETFCAP + - KILL + volumeMounts: + - mountPath: /gen-source + name: gen-source + workingDir: /gen-source + securityContext: + runAsUser: 0 + runAsGroup: 0 diff --git a/pkg/buildstrategy/buildstrategy.go b/pkg/buildstrategy/buildstrategy.go new file mode 100644 index 00000000..597bf540 --- /dev/null +++ b/pkg/buildstrategy/buildstrategy.go @@ -0,0 +1,32 @@ +package buildstrategy + +import ( + "context" + + "github.com/go-logr/logr" + "github.com/manifestival/manifestival" + "github.com/shipwright-io/operator/pkg/common" + crdclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" +) + +const clusterBuildStrategiesCRD = "clusterbuildstrategies.shipwright.io" + +// ReconcileBuildStrategies reconciles the desired ClusterBuildStrategies to install on the cluster. +// Returns `true` if the build strategies were not installed and a requeue is required. +func ReconcileBuildStrategies(ctx context.Context, crdClient crdclientv1.ApiextensionsV1Interface, log logr.Logger, manifest manifestival.Manifest) (bool, error) { + crdExists, err := common.CRDExist(ctx, crdClient, clusterBuildStrategiesCRD) + if err != nil { + return true, err + } + // If the CRD for Shipwright's cluster build strategies were not installed yet, the reconciler + // should requeue. + if !crdExists { + return true, nil + } + // Apply the provided manifest containing the build strategies + err = manifest.Apply() + if err != nil { + return true, err + } + return false, nil +} diff --git a/pkg/buildstrategy/buildstrategy_test.go b/pkg/buildstrategy/buildstrategy_test.go new file mode 100644 index 00000000..d6c79fcc --- /dev/null +++ b/pkg/buildstrategy/buildstrategy_test.go @@ -0,0 +1,92 @@ +package buildstrategy + +import ( + "context" + "path/filepath" + "testing" + + . "github.com/onsi/gomega" + + crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/operator/pkg/common" + "github.com/shipwright-io/operator/test" +) + +func TestReconcileBuildStrategies(t *testing.T) { + + cases := []struct { + name string + installShipwrightCRDs bool + expectRequeue bool + expectStrategiesInstalled bool + }{ + { + name: "no Shipwright CRDs", + installShipwrightCRDs: false, + expectRequeue: true, + }, + { + name: "install Shipwright CRDs", + installShipwrightCRDs: true, + expectRequeue: false, + expectStrategiesInstalled: true, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + o := NewWithT(t) + ctx := context.Background() + var cancel context.CancelFunc + deadline, hasDeadline := t.Deadline() + if hasDeadline { + ctx, cancel = context.WithDeadline(context.Background(), deadline) + defer cancel() + } + objects := []runtime.Object{} + if tc.installShipwrightCRDs { + objects = append(objects, &crdv1.CustomResourceDefinition{ + ObjectMeta: v1.ObjectMeta{ + Name: clusterBuildStrategiesCRD, + }, + }) + } + crdClient := apiextensionsfake.NewSimpleClientset(objects...) + schemeBuilder := runtime.NewSchemeBuilder(scheme.AddToScheme, buildv1alpha1.AddToScheme) + scheme := runtime.NewScheme() + err := schemeBuilder.AddToScheme(scheme) + o.Expect(err).NotTo(HaveOccurred(), "create k8s client scheme") + k8sClient := fake.NewClientBuilder().WithScheme(scheme).Build() + log := zap.New() + manifests, err := common.SetupManifestival(k8sClient, filepath.Join("samples", "buildstrategy"), true, log) + o.Expect(err).NotTo(HaveOccurred(), "setting up Manifestival") + requeue, err := ReconcileBuildStrategies(ctx, crdClient.ApiextensionsV1(), log, manifests) + o.Expect(err).NotTo(HaveOccurred(), "reconciling build strategies") + o.Expect(requeue).To(BeEquivalentTo(tc.expectRequeue), "check reconcile requeue") + + if tc.expectStrategiesInstalled { + strategies, err := test.ParseBuildStrategyNames() + t.Logf("build strategies: %s", strategies) + o.Expect(err).NotTo(HaveOccurred(), "parse build strategy names") + for _, strategy := range strategies { + obj := &buildv1alpha1.ClusterBuildStrategy{ + ObjectMeta: v1.ObjectMeta{ + Name: strategy, + }, + } + err := k8sClient.Get(ctx, client.ObjectKeyFromObject(obj), obj) + o.Expect(err).NotTo(HaveOccurred(), "get ClusterBuildStrategy %s", strategy) + } + } + }) + } +} diff --git a/pkg/certmanager/cert_manager.go b/pkg/certmanager/cert_manager.go index de29ec06..148c7b6a 100644 --- a/pkg/certmanager/cert_manager.go +++ b/pkg/certmanager/cert_manager.go @@ -34,7 +34,7 @@ func ReconcileCertManager(ctx context.Context, crdClient crdclientv1.Apiextensio } } - manifest, err := common.SetupManifestival(client, "certificates.yaml", logger) + manifest, err := common.SetupManifestival(client, "certificates.yaml", false, logger) if err != nil { return true, fmt.Errorf("Error creating inital certificates manifest") } diff --git a/pkg/common/util.go b/pkg/common/util.go index 0c235102..8683ae20 100644 --- a/pkg/common/util.go +++ b/pkg/common/util.go @@ -7,10 +7,11 @@ import ( "strconv" "strings" + "path/filepath" + "github.com/go-logr/logr" mfc "github.com/manifestival/controller-runtime-client" "github.com/manifestival/manifestival" - mf "github.com/manifestival/manifestival" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" crdclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" @@ -18,24 +19,29 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "path/filepath" "sigs.k8s.io/controller-runtime/pkg/client" ) -// setupManifestival instantiate manifestival -func SetupManifestival(client client.Client, manifestFile string, logger logr.Logger) (manifestival.Manifest, error) { +// SetupManifestival instantiates a Manifestival instance for the provided file or directory +func SetupManifestival(client client.Client, fileOrDir string, recurse bool, logger logr.Logger) (manifestival.Manifest, error) { mfclient := mfc.NewClient(client) - dataPath, err := koDataPath() + dataPath, err := KoDataPath() if err != nil { return manifestival.Manifest{}, err } - manifest := filepath.Join(dataPath, manifestFile) - return manifestival.NewManifest(manifest, manifestival.UseClient(mfclient), manifestival.UseLogger(logger)) + manifest := filepath.Join(dataPath, fileOrDir) + var src manifestival.Source + if recurse { + src = manifestival.Recursive(manifest) + } else { + src = manifestival.Path(manifest) + } + return manifestival.ManifestFrom(src, manifestival.UseClient(mfclient), manifestival.UseLogger(logger)) } -// koDataPath retrieve the data path environment variable, returning error when not found. -func koDataPath() (string, error) { +// KoDataPath retrieve the data path environment variable, returning error when not found. +func KoDataPath() (string, error) { dataPath, exists := os.LookupEnv(koDataPathEnv) if !exists { return "", fmt.Errorf("'%s' is not set", koDataPathEnv) @@ -82,7 +88,7 @@ func ToLowerCaseKeys(keyValues map[string]string) map[string]string { } // deploymentImages replaces container and env vars images. -func DeploymentImages(images map[string]string) mf.Transformer { +func DeploymentImages(images map[string]string) manifestival.Transformer { return func(u *unstructured.Unstructured) error { if u.GetKind() != "Deployment" { return nil @@ -153,11 +159,11 @@ func BoolFromEnvVar(envVar string) bool { return false } -// injectAnnotations adds annotation key:value to a resource annotations +// InjectAnnotations adds annotation key:value to a resource annotations // overwritePolicy (Retain/Overwrite) decides whehther to overwrite an already existing annotation // []kinds specify the Kinds on which the label should be applied // if len(kinds) = 0, label will be apllied to all/any resources irrespective of its Kind -func InjectAnnotations(key, value string, overwritePolicy int, kinds ...string) mf.Transformer { +func InjectAnnotations(key, value string, overwritePolicy int, kinds ...string) manifestival.Transformer { return func(u *unstructured.Unstructured) error { kind := u.GetKind() if len(kinds) != 0 && !itemInSlice(kind, kinds) { diff --git a/test/common.go b/test/common.go index c6ed69d2..9907469f 100644 --- a/test/common.go +++ b/test/common.go @@ -1,16 +1,25 @@ package test import ( + "bytes" "context" + "io/fs" + "os" + "path/filepath" "time" + o "github.com/onsi/gomega" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - - o "github.com/onsi/gomega" + k8syaml "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/shipwright-io/build/pkg/apis/build/v1alpha1" + "github.com/shipwright-io/operator/pkg/common" ) // timeout amount of time to wait for Eventually methods @@ -27,9 +36,13 @@ func EventuallyExists(ctx context.Context, k8sClient client.Client, obj client.O if errors.IsNotFound(err) { return false } + if meta.IsNoMatchError(err) { + // For CRDs created by the operator, we may need to wait. + return false + } o.Expect(err).NotTo(o.HaveOccurred()) return true - }, timeout).Should(o.BeTrue()) + }, timeout).Should(o.BeTrue(), "waiting for object %s/%s to exist", obj.GetNamespace(), obj.GetName()) } // EventuallyContainFinalizer retrieves and inspect the object to assert if the informed finalizer @@ -85,3 +98,39 @@ func CRDEventuallyRemoved(ctx context.Context, k8sClient client.Client, crdName } EventuallyRemoved(ctx, k8sClient, crd) } + +// ParseBuildStrategyNames returns a list of object names from the embedded build strategy +// manifests. +func ParseBuildStrategyNames() ([]string, error) { + koDataPath, err := common.KoDataPath() + if err != nil { + return nil, err + } + strategyPath := filepath.Join(koDataPath, "samples", "buildstrategy") + sampleNames := []string{} + err = filepath.WalkDir(strategyPath, func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { + return nil + } + clusterBuildStrategy := &v1alpha1.ClusterBuildStrategy{} + decodeErr := decodeYaml(path, clusterBuildStrategy) + if decodeErr != nil { + return decodeErr + } + sampleNames = append(sampleNames, clusterBuildStrategy.Name) + return nil + }) + if err != nil { + return nil, err + } + return sampleNames, nil +} + +func decodeYaml(path string, obj *v1alpha1.ClusterBuildStrategy) error { + yaml, err := os.ReadFile(path) + if err != nil { + return err + } + decoder := k8syaml.NewYAMLOrJSONDecoder(bytes.NewBuffer(yaml), 16) + return decoder.Decode(obj) +}