From f041aabdac9967e757d39e11674ccefb409c4726 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Haram=20Nyg=C3=A5rd?= Date: Wed, 14 Aug 2024 15:40:27 +0200 Subject: [PATCH 1/4] Refactor (#488) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Refactor * started work on processor and reconciliation packages * finish processor * finished initial namespace controller refactor, started routing controller * finish rough refactor of controllers * Fix schemas, make it build and run * fix namespace controller so tests pass * adding more tests, fixing errors * Fixing tests * more refactoring * Fix deployment patching and more tests * fix custom certs * fix ingress and watched tests, cloudsql proxy proper security context * Fix routing * skipjob testing * all tests fixed except condition/status * remove istio pkg from gitignore * clean up skipjobs a bit, add schemas for each controller * Add SKIPObject and SkiperatorStatus * Fix tests, fix status (not conditions) * Clean up status stuff a bit * fix(ish) skipjob conditions * Add reason to error state function * remove unused files, add some todos * only use app label on certain resources. improve tests * fix skiperator ignore on resources and add unit tests to makefile * remove unneccessary pointers * add warning event for mistyped resource labels * Add missing namespace creation to setup-local * added alloy support in netpol/default deny * Fix panic when logging * review * review * more review * multigenerator: simplify resource generation code and expandability (#503) Co-authored-by: Martin Haram Nygård * review * fix test * review, moved commonspec to types, some changes to status * more review corrections * Fix status reconciles, tweak status on pending * fix test * fix typo, add todos * fix all tests failing, add dir for cluster-config * upgrade docker pkg for pharos scan * add resourceDiff struct for more readable diffs * add comment for image pull secret * Dynamic port allocation (#506) * add support for dynamic port * rebase * fix some tests * fix validation and tests * adjust tests to be more resilient * dont add netpol rules that doesn't have a port * add todo * add todo * custom-cert: bugfix * fix missing rbac permissions --------- Co-authored-by: Eline Henriksen Co-authored-by: Bård Ove Hoel Co-authored-by: Eline Henriksen Co-authored-by: Even Holthe Co-authored-by: Even Holthe --- .gitignore | 3 +- Makefile | 13 +- api/v1alpha1/application_types.go | 153 ++----- api/v1alpha1/hosts.go | 53 +++ api/v1alpha1/podtypes/access_policy.go | 7 +- .../podtypes/zz_generated.deepcopy.go | 20 +- api/v1alpha1/routing_types.go | 36 +- api/v1alpha1/skipjob_defaults.go | 63 --- api/v1alpha1/skipjob_types.go | 125 ++++- api/v1alpha1/skipns_types.go | 28 ++ api/v1alpha1/skipobj_interfaces.go | 23 + api/v1alpha1/status_types.go | 88 ++++ api/v1alpha1/zz_generated.deepcopy.go | 53 ++- cmd/skiperator/main.go | 47 +- ...skiperator.kartverket.no_applications.yaml | 179 +++++++- .../skiperator.kartverket.no_routings.yaml | 54 ++- .../skiperator.kartverket.no_skipjobs.yaml | 149 +++++- .../application/authorization_policy.go | 112 ----- controllers/application/certificate.go | 191 -------- controllers/application/configmap.go | 108 ----- controllers/application/controller.go | 426 ------------------ .../application/egress_service_entry.go | 92 ---- .../application/horizontal_pod_autoscaler.go | 81 ---- controllers/application/ingress_gateway.go | 161 ------- .../application/ingress_virtual_service.go | 121 ----- controllers/application/maskinporten.go | 97 ---- controllers/application/network_policy.go | 85 ---- .../application/peer_authentication.go | 45 -- .../application/pod_disruption_budget.go | 92 ---- controllers/application/service.go | 110 ----- controllers/application/service_account.go | 58 --- controllers/application/service_monitor.go | 91 ---- controllers/namespace/controller.go | 85 ---- .../namespace/default_deny_network_policy.go | 148 ------ controllers/namespace/image_pull_secret.go | 58 --- controllers/namespace/sidecar.go | 33 -- controllers/routing/certificate.go | 106 ----- controllers/routing/controller.go | 104 ----- controllers/routing/gateway.go | 80 ---- controllers/routing/network_policy.go | 131 ------ controllers/routing/status.go | 132 ------ controllers/routing/virtual_service.go | 113 ----- controllers/skipjob/controller.go | 155 ------- controllers/skipjob/egress_service_entry.go | 58 --- controllers/skipjob/gcp_configmap.go | 76 ---- controllers/skipjob/job.go | 317 ------------- controllers/skipjob/network_policy.go | 56 --- controllers/skipjob/pod_monitor.go | 66 --- controllers/skipjob/service_account.go | 31 -- controllers/skipjob/status.go | 194 -------- go.mod | 7 +- go.sum | 16 +- internal/controllers/application.go | 399 ++++++++++++++++ internal/controllers/common/reconciler.go | 235 ++++++++++ internal/controllers/common/util.go | 57 +++ internal/controllers/common/util_test.go | 16 + internal/controllers/namespace.go | 138 ++++++ internal/controllers/routing.go | 231 ++++++++++ internal/controllers/skipjob.go | 341 ++++++++++++++ pkg/certs/k8s.go | 40 -- pkg/log/log.go | 48 ++ pkg/reconciliation/application.go | 32 ++ pkg/reconciliation/namespace.go | 32 ++ pkg/reconciliation/reconciliation.go | 73 +++ pkg/reconciliation/routing.go | 32 ++ pkg/reconciliation/skipjob.go | 30 ++ .../certificate/application.go | 56 +++ .../certificate/certificate.go | 16 + pkg/resourcegenerator/certificate/routing.go | 60 +++ pkg/resourcegenerator/core/constants.go | 12 - .../deployment}/deployment.go | 226 +++------- .../deployment/deployment_test.go | 23 + pkg/resourcegenerator/gcp/auth/configmap.go | 77 ++++ .../gcp/workload_identity.go | 47 +- .../github/image_pull_secret.go | 61 +++ .../hpa/horizontal_pod_autoscaler.go | 64 +++ .../resourcegenerator/idporten}/idporten.go | 91 ++-- .../authorization_policy.go | 92 ++++ .../istio/gateway/application.go | 80 ++++ .../istio/gateway/gateway.go | 12 + .../istio/gateway/routing.go | 76 ++++ .../istio/peer_authentication.go | 18 - .../peerauthentication/peer_authentication.go | 43 ++ .../serviceentry.go} | 121 +++-- .../istio/sidecar/sidecar.go | 32 ++ .../istio/virtualservice/application.go | 102 +++++ .../istio/virtualservice/routing.go | 103 +++++ .../istio/virtualservice/virtual_service.go | 12 + pkg/resourcegenerator/job/job.go | 123 +++++ .../maskinporten/maskinporten.go | 87 ++++ .../networking/network_policy.go | 313 ------------- .../default_deny_network_policy.go | 147 ++++++ .../networkpolicy/dynamic/common.go | 272 +++++++++++ .../networkpolicy/dynamic/network_policy.go | 19 + .../networkpolicy/dynamic/routing.go | 74 +++ .../pdb/pod_disruption_budget.go | 71 +++ pkg/resourcegenerator/{core => pod}/pod.go | 11 +- .../podmonitor/pod_monitor.go | 60 +++ .../resourceutils/generator/multigenerator.go | 41 ++ .../resourceutils/helpers.go | 18 + .../resourceutils/metadata.go | 100 ++++ .../resourceutils/metadata_test.go | 37 ++ pkg/resourcegenerator/resourceutils/refs.go | 21 + .../resourceutils/typemeta.go | 17 + pkg/resourcegenerator/service/service.go | 94 ++++ .../serviceaccount/application.go | 46 ++ .../serviceaccount/application_test.go | 21 + .../serviceaccount/service_account.go | 12 + .../serviceaccount/skipjob.go | 30 ++ .../servicemonitor/service_monitor.go | 67 +++ .../{core => volume}/volumes.go | 2 +- pkg/resourceprocessor/crud.go | 102 +++++ pkg/resourceprocessor/diffs.go | 117 +++++ pkg/resourceprocessor/diffs_test.go | 189 ++++++++ pkg/resourceprocessor/processor.go | 68 +++ pkg/resourceprocessor/resource.go | 91 ++++ pkg/resourceprocessor/resource_test.go | 19 + pkg/resourceschemas/schemas.go | 108 +++++ pkg/resourceschemas/schemas_test.go | 37 ++ pkg/testutil/reconciliation.go | 33 ++ pkg/util/conditions.go | 39 -- pkg/util/constants.go | 7 - pkg/util/digest.go | 13 +- pkg/util/helperfunctions.go | 12 +- pkg/util/reconciler.go | 223 --------- .../access-policy/advanced-assert.yaml | 30 ++ .../access-policy/advanced-patch-assert.yaml | 4 +- .../access-policy/bad-policy-assert.yaml | 81 ++++ .../access-policy/bad-policy-error.yaml | 32 ++ .../application/access-policy/bad-policy.yaml | 38 ++ .../access-policy/chainsaw-test.yaml | 10 +- .../patch-application-assert.yaml | 42 +- .../patch-application.yaml | 4 +- .../application-duplicate-ingress-assert.yaml | 137 ++++++ .../application-duplicate-ingress-error.yaml | 53 +++ .../application-duplicate-ingress.yaml | 10 + .../custom-certificate/chainsaw-test.yaml | 6 + .../ignore-reconcile/remove-label-assert.yaml | 2 - .../minimal/application-assert.yaml | 81 +++- .../service/application-assert.yaml | 2 + .../application-error-assert.yaml | 56 --- .../application-generate-error-assert.yaml | 22 + ...r.yaml => application-generate-error.yaml} | 7 + ...plication-resource-apply-error-assert.yaml | 77 ++++ .../application-resource-apply-error.yaml | 21 + .../application-synced-assert.yaml | 52 +-- .../application-synced.yaml | 30 ++ .../subresource-status/chainsaw-test.yaml | 14 +- .../application/team-label/chainsaw-test.yaml | 4 +- .../watched/certificate-assert.yaml | 21 + .../watched/certificate-errors.yaml | 15 + tests/application/watched/certificate.yaml | 17 + tests/application/watched/chainsaw-test.yaml | 19 + tests/cluster-config/gcp-identity-config.yaml | 8 + .../cluster-config/ns-exclusions-config.yaml | 13 + tests/config.yaml | 2 +- tests/namespace/default-deny/assert.yaml | 7 +- tests/namespace/image-pull-secret/assert.yaml | 3 + tests/namespace/sidecar/assert.yaml | 3 + .../patch-routing-change-hostname-assert.yaml | 2 +- tests/routing/routes/routing-assert.yaml | 2 +- .../access-policy-job/chainsaw-test.yaml | 5 + .../access-policy-job/skipjob-assert.yaml | 56 ++- .../skipjob-cron-assert.yaml | 68 +++ .../access-policy-job/skipjob-cron.yaml | 25 + tests/skipjob/access-policy-job/skipjob.yaml | 2 +- tests/skipjob/conditions/chainsaw-test.yaml | 7 +- tests/skipjob/conditions/skipjob-assert.yaml | 81 ++-- tests/skipjob/conditions/skipjob.yaml | 9 +- .../immutable-container/chainsaw-test.yaml | 26 ++ .../immutable-container/skipjob-assert.yaml | 15 + .../skipjob-patch-error.yaml | 12 + .../immutable-container/skipjob-patch.yaml | 12 + .../skipjob/immutable-container/skipjob.yaml | 12 + .../minimal-cron-job/skipjob-assert.yaml | 17 + tests/skipjob/minimal-job/skipjob-assert.yaml | 6 + 176 files changed, 6788 insertions(+), 5291 deletions(-) delete mode 100644 api/v1alpha1/skipjob_defaults.go create mode 100644 api/v1alpha1/skipns_types.go create mode 100644 api/v1alpha1/skipobj_interfaces.go create mode 100644 api/v1alpha1/status_types.go delete mode 100644 controllers/application/authorization_policy.go delete mode 100644 controllers/application/certificate.go delete mode 100644 controllers/application/configmap.go delete mode 100644 controllers/application/controller.go delete mode 100644 controllers/application/egress_service_entry.go delete mode 100644 controllers/application/horizontal_pod_autoscaler.go delete mode 100644 controllers/application/ingress_gateway.go delete mode 100644 controllers/application/ingress_virtual_service.go delete mode 100644 controllers/application/maskinporten.go delete mode 100644 controllers/application/network_policy.go delete mode 100644 controllers/application/peer_authentication.go delete mode 100644 controllers/application/pod_disruption_budget.go delete mode 100644 controllers/application/service.go delete mode 100644 controllers/application/service_account.go delete mode 100644 controllers/application/service_monitor.go delete mode 100644 controllers/namespace/controller.go delete mode 100644 controllers/namespace/default_deny_network_policy.go delete mode 100644 controllers/namespace/image_pull_secret.go delete mode 100644 controllers/namespace/sidecar.go delete mode 100644 controllers/routing/certificate.go delete mode 100644 controllers/routing/controller.go delete mode 100644 controllers/routing/gateway.go delete mode 100644 controllers/routing/network_policy.go delete mode 100644 controllers/routing/status.go delete mode 100644 controllers/routing/virtual_service.go delete mode 100644 controllers/skipjob/controller.go delete mode 100644 controllers/skipjob/egress_service_entry.go delete mode 100644 controllers/skipjob/gcp_configmap.go delete mode 100644 controllers/skipjob/job.go delete mode 100644 controllers/skipjob/network_policy.go delete mode 100644 controllers/skipjob/pod_monitor.go delete mode 100644 controllers/skipjob/service_account.go delete mode 100644 controllers/skipjob/status.go create mode 100644 internal/controllers/application.go create mode 100644 internal/controllers/common/reconciler.go create mode 100644 internal/controllers/common/util.go create mode 100644 internal/controllers/common/util_test.go create mode 100644 internal/controllers/namespace.go create mode 100644 internal/controllers/routing.go create mode 100644 internal/controllers/skipjob.go delete mode 100644 pkg/certs/k8s.go create mode 100644 pkg/log/log.go create mode 100644 pkg/reconciliation/application.go create mode 100644 pkg/reconciliation/namespace.go create mode 100644 pkg/reconciliation/reconciliation.go create mode 100644 pkg/reconciliation/routing.go create mode 100644 pkg/reconciliation/skipjob.go create mode 100644 pkg/resourcegenerator/certificate/application.go create mode 100644 pkg/resourcegenerator/certificate/certificate.go create mode 100644 pkg/resourcegenerator/certificate/routing.go delete mode 100644 pkg/resourcegenerator/core/constants.go rename {controllers/application => pkg/resourcegenerator/deployment}/deployment.go (50%) create mode 100644 pkg/resourcegenerator/deployment/deployment_test.go create mode 100644 pkg/resourcegenerator/gcp/auth/configmap.go create mode 100644 pkg/resourcegenerator/github/image_pull_secret.go create mode 100644 pkg/resourcegenerator/hpa/horizontal_pod_autoscaler.go rename {controllers/application => pkg/resourcegenerator/idporten}/idporten.go (73%) create mode 100644 pkg/resourcegenerator/istio/authorizationpolicy/authorization_policy.go create mode 100644 pkg/resourcegenerator/istio/gateway/application.go create mode 100644 pkg/resourcegenerator/istio/gateway/gateway.go create mode 100644 pkg/resourcegenerator/istio/gateway/routing.go delete mode 100644 pkg/resourcegenerator/istio/peer_authentication.go create mode 100644 pkg/resourcegenerator/istio/peerauthentication/peer_authentication.go rename pkg/resourcegenerator/istio/{service_entry.go => serviceentry/serviceentry.go} (71%) create mode 100644 pkg/resourcegenerator/istio/sidecar/sidecar.go create mode 100644 pkg/resourcegenerator/istio/virtualservice/application.go create mode 100644 pkg/resourcegenerator/istio/virtualservice/routing.go create mode 100644 pkg/resourcegenerator/istio/virtualservice/virtual_service.go create mode 100644 pkg/resourcegenerator/job/job.go create mode 100644 pkg/resourcegenerator/maskinporten/maskinporten.go delete mode 100644 pkg/resourcegenerator/networking/network_policy.go create mode 100644 pkg/resourcegenerator/networkpolicy/defaultdeny/default_deny_network_policy.go create mode 100644 pkg/resourcegenerator/networkpolicy/dynamic/common.go create mode 100644 pkg/resourcegenerator/networkpolicy/dynamic/network_policy.go create mode 100644 pkg/resourcegenerator/networkpolicy/dynamic/routing.go create mode 100644 pkg/resourcegenerator/pdb/pod_disruption_budget.go rename pkg/resourcegenerator/{core => pod}/pod.go (96%) create mode 100644 pkg/resourcegenerator/podmonitor/pod_monitor.go create mode 100644 pkg/resourcegenerator/resourceutils/generator/multigenerator.go create mode 100644 pkg/resourcegenerator/resourceutils/helpers.go create mode 100644 pkg/resourcegenerator/resourceutils/metadata.go create mode 100644 pkg/resourcegenerator/resourceutils/metadata_test.go create mode 100644 pkg/resourcegenerator/resourceutils/refs.go create mode 100644 pkg/resourcegenerator/resourceutils/typemeta.go create mode 100644 pkg/resourcegenerator/service/service.go create mode 100644 pkg/resourcegenerator/serviceaccount/application.go create mode 100644 pkg/resourcegenerator/serviceaccount/application_test.go create mode 100644 pkg/resourcegenerator/serviceaccount/service_account.go create mode 100644 pkg/resourcegenerator/serviceaccount/skipjob.go create mode 100644 pkg/resourcegenerator/servicemonitor/service_monitor.go rename pkg/resourcegenerator/{core => volume}/volumes.go (99%) create mode 100644 pkg/resourceprocessor/crud.go create mode 100644 pkg/resourceprocessor/diffs.go create mode 100644 pkg/resourceprocessor/diffs_test.go create mode 100644 pkg/resourceprocessor/processor.go create mode 100644 pkg/resourceprocessor/resource.go create mode 100644 pkg/resourceprocessor/resource_test.go create mode 100644 pkg/resourceschemas/schemas.go create mode 100644 pkg/resourceschemas/schemas_test.go create mode 100644 pkg/testutil/reconciliation.go delete mode 100644 pkg/util/conditions.go delete mode 100644 pkg/util/reconciler.go create mode 100644 tests/application/access-policy/bad-policy-assert.yaml create mode 100644 tests/application/access-policy/bad-policy-error.yaml create mode 100644 tests/application/access-policy/bad-policy.yaml create mode 100644 tests/application/custom-certificate/application-duplicate-ingress-assert.yaml create mode 100644 tests/application/custom-certificate/application-duplicate-ingress-error.yaml create mode 100644 tests/application/custom-certificate/application-duplicate-ingress.yaml delete mode 100644 tests/application/subresource-status/application-error-assert.yaml create mode 100644 tests/application/subresource-status/application-generate-error-assert.yaml rename tests/application/subresource-status/{application-error.yaml => application-generate-error.yaml} (53%) create mode 100644 tests/application/subresource-status/application-resource-apply-error-assert.yaml create mode 100644 tests/application/subresource-status/application-resource-apply-error.yaml create mode 100644 tests/application/watched/certificate-assert.yaml create mode 100644 tests/application/watched/certificate-errors.yaml create mode 100644 tests/application/watched/certificate.yaml create mode 100644 tests/application/watched/chainsaw-test.yaml create mode 100644 tests/cluster-config/gcp-identity-config.yaml create mode 100644 tests/cluster-config/ns-exclusions-config.yaml create mode 100644 tests/skipjob/access-policy-job/skipjob-cron-assert.yaml create mode 100644 tests/skipjob/access-policy-job/skipjob-cron.yaml create mode 100644 tests/skipjob/immutable-container/chainsaw-test.yaml create mode 100644 tests/skipjob/immutable-container/skipjob-assert.yaml create mode 100644 tests/skipjob/immutable-container/skipjob-patch-error.yaml create mode 100644 tests/skipjob/immutable-container/skipjob-patch.yaml create mode 100644 tests/skipjob/immutable-container/skipjob.yaml diff --git a/.gitignore b/.gitignore index ee7e4bd0..7f84a131 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ .vscode/ .idea/ dist/ -istio* \ No newline at end of file +istio* +!**/istio/ \ No newline at end of file diff --git a/Makefile b/Makefile index 38c09d53..d8bf2324 100644 --- a/Makefile +++ b/Makefile @@ -95,7 +95,7 @@ install-digdirator-crds: install-skiperator: generate @kubectl create namespace skiperator-system --context $(SKIPERATOR_CONTEXT) || true @kubectl apply -f config/ --recursive --context $(SKIPERATOR_CONTEXT) - @kubectl apply -f samples/ --recursive --context $(SKIPERATOR_CONTEXT) || true + @kubectl apply -f tests/cluster-config/ --recursive --context $(SKIPERATOR_CONTEXT) || true .PHONY: install-test-tools install-test-tools: @@ -112,6 +112,15 @@ test: install-test-tools install-skiperator @./bin/chainsaw test --kube-context $(SKIPERATOR_CONTEXT) --config tests/config.yaml --test-dir tests/ && \ echo "Test succeeded" || (echo "Test failed" && exit 1) +.PHONY: run-unit-tests +run-unit-tests: + @failed_tests=$$(go test ./... 2>&1 | grep "^FAIL" | awk '{print $$2}'); \ + if [ -n "$$failed_tests" ]; then \ + echo -e "\033[31mFailed Unit Tests: [$$failed_tests]\033[0m" && exit 1; \ + else \ + echo -e "\033[32mAll unit tests passed\033[0m"; \ + fi + .PHONY: run-test run-test: build @echo "Starting skiperator in background..." @@ -127,4 +136,4 @@ run-test: build $(MAKE) test-single dir=$(TEST_DIR); \ fi; \ ) && \ - (echo "Stopping skiperator (PID $$PID)..." && kill $$PID) || (echo "Test or skiperator failed. Stopping skiperator (PID $$PID)" && kill $$PID && exit 1) + (echo "Stopping skiperator (PID $$PID)..." && kill $$PID && echo "running unit tests..." && $(MAKE) run-unit-tests) || (echo "Test or skiperator failed. Stopping skiperator (PID $$PID)" && kill $$PID && exit 1) diff --git a/api/v1alpha1/application_types.go b/api/v1alpha1/application_types.go index de184c5a..6d1a9ee6 100644 --- a/api/v1alpha1/application_types.go +++ b/api/v1alpha1/application_types.go @@ -3,15 +3,13 @@ package v1alpha1 import ( "encoding/json" "errors" - "time" - "github.com/kartverket/skiperator/api/v1alpha1/digdirator" "github.com/kartverket/skiperator/api/v1alpha1/podtypes" - "golang.org/x/exp/slices" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "time" ) // +kubebuilder:object:root=true @@ -30,14 +28,13 @@ type ApplicationList struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:shortName="app" -// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.application.status` +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.summary.status` type Application struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec ApplicationSpec `json:"spec,omitempty"` - - Status ApplicationStatus `json:"status,omitempty"` + Spec ApplicationSpec `json:"spec,omitempty"` + Status SkiperatorStatus `json:"status,omitempty"` } // +kubebuilder:object:generate=true @@ -68,7 +65,6 @@ type ApplicationSpec struct { // Ingresses must be lowercase, contain no spaces, be a non-empty string, and have a hostname/domain separated by a period // They can optionally be suffixed with a plus and name of a custom TLS secret located in the istio-gateways namespace. // E.g. "foo.atkv3-dev.kartverket-intern.cloud+env-wildcard-cert" - // //+kubebuilder:validation:Optional Ingresses []string `json:"ingresses,omitempty"` @@ -329,38 +325,6 @@ type PrometheusConfig struct { AllowAllMetrics bool `json:"allowAllMetrics,omitempty"` } -// ApplicationStatus -// -// A status field shown on the Application resource which contains information regarding all controllers present on the Application. -// Will for example show errors on the Deployment field when something went wrong when attempting to create a Deployment. -// -// +kubebuilder:object:generate=true -type ApplicationStatus struct { - ApplicationStatus Status `json:"application"` - ControllersStatus map[string]Status `json:"controllers"` -} - -// Status -// -// +kubebuilder:object:generate=true -type Status struct { - // +kubebuilder:default="Synced" - Status StatusNames `json:"status"` - // +kubebuilder:default="hello" - Message string `json:"message"` - // +kubebuilder:default="hello" - TimeStamp string `json:"timestamp"` -} - -type StatusNames string - -const ( - SYNCED StatusNames = "Synced" - PROGRESSING StatusNames = "Progressing" - ERROR StatusNames = "Error" - PENDING StatusNames = "Pending" -) - func NewDefaultReplicas() Replicas { return Replicas{ Min: 2, @@ -418,106 +382,65 @@ func (a *Application) FillDefaultsSpec() { } func (a *Application) FillDefaultsStatus() { - if a.Status.ApplicationStatus.Status == "" { - a.Status.ApplicationStatus = Status{ - Status: PENDING, - Message: "Default application status, application has not initialized yet", - TimeStamp: time.Now().String(), - } - } + var msg string - if a.Status.ControllersStatus == nil { - a.Status.ControllersStatus = make(map[string]Status) + if a.Status.Summary.Status == "" { + msg = "Default Application status, it has not initialized yet" + } else { + msg = "Application is trying to reconcile" } -} -func (a *Application) UpdateApplicationStatus() { - newApplicationStatus := a.CalculateApplicationStatus() - if newApplicationStatus.Status == a.Status.ApplicationStatus.Status { - return + a.Status.Summary = Status{ + Status: PENDING, + Message: msg, + TimeStamp: time.Now().String(), } - a.Status.ApplicationStatus = newApplicationStatus -} - -func (a *Application) UpdateControllerStatus(controllerName string, message string, status StatusNames) { - if a.Status.ControllersStatus[controllerName].Status == status { - return + if a.Status.SubResources == nil { + a.Status.SubResources = make(map[string]Status) } - newStatus := Status{ - Status: status, - Message: message, - TimeStamp: time.Now().String(), + if len(a.Status.Conditions) == 0 { + a.Status.Conditions = make([]metav1.Condition, 0) } - a.Status.ControllersStatus[controllerName] = newStatus - - a.UpdateApplicationStatus() - } -func (a *Application) ShouldUpdateApplicationStatus(newStatus Status) bool { - shouldUpdate := newStatus.Status != a.Status.ApplicationStatus.Status - - return shouldUpdate +func (a *Application) GetStatus() *SkiperatorStatus { + return &a.Status } -func (a *Application) CalculateApplicationStatus() Status { - returnStatus := Status{ - Status: ERROR, - Message: "CALCULATION DEFAULT, YOU SHOULD NOT SEE THIS MESSAGE. PLEASE LET SKIP KNOW IF THIS MESSAGE IS VISIBLE", - TimeStamp: time.Now().String(), - } - statusList := []string{} - for _, s := range a.Status.ControllersStatus { - statusList = append(statusList, string(s.Status)) - } - - if slices.IndexFunc(statusList, func(s string) bool { return s == string(ERROR) }) != -1 { - returnStatus.Status = ERROR - returnStatus.Message = "One of the controllers is in a failed state" - return returnStatus - } - - if slices.IndexFunc(statusList, func(s string) bool { return s == string(PROGRESSING) }) != -1 { - returnStatus.Status = PROGRESSING - returnStatus.Message = "One of the controllers is progressing" - return returnStatus - } +func (a *Application) SetStatus(status SkiperatorStatus) { + a.Status = status +} - if allSameStatus(statusList) { - returnStatus.Status = StatusNames(statusList[0]) - if returnStatus.Status == SYNCED { - returnStatus.Message = "All controllers synced" - } else if returnStatus.Status == PENDING { - returnStatus.Message = "All controllers pending" - } - return returnStatus +// TODO clean up labels +func (a *Application) GetDefaultLabels() map[string]string { + return map[string]string{ + "app.kubernetes.io/managed-by": "skiperator", + "skiperator.kartverket.no/controller": "application", + "application.skiperator.no/app": a.Name, + "application.skiperator.no/app-name": a.Name, + "application.skiperator.no/app-namespace": a.Namespace, } - - return returnStatus } -func allSameStatus(a []string) bool { - for _, v := range a { - if v != a[0] { - return false - } +func (a *Application) GetCommonSpec() *CommonSpec { + return &CommonSpec{ + GCP: a.Spec.GCP, + AccessPolicy: a.Spec.AccessPolicy, } - return true } -func (s *ApplicationSpec) Hosts() ([]Host, error) { - var hosts []Host +func (s *ApplicationSpec) Hosts() (HostCollection, error) { + hosts := NewCollection() + var errorsFound []error for _, ingress := range s.Ingresses { - h, err := NewHost(ingress) + err := hosts.Add(ingress) if err != nil { errorsFound = append(errorsFound, err) continue } - - hosts = append(hosts, *h) } return hosts, errors.Join(errorsFound...) diff --git a/api/v1alpha1/hosts.go b/api/v1alpha1/hosts.go index 9520dfef..15cc531e 100644 --- a/api/v1alpha1/hosts.go +++ b/api/v1alpha1/hosts.go @@ -15,6 +15,10 @@ type Host struct { CustomCertificateSecret *string } +type HostCollection struct { + hosts map[string]*Host +} + func NewHost(hostname string) (*Host, error) { if len(hostname) == 0 { return nil, fmt.Errorf("hostname cannot be empty") @@ -52,3 +56,52 @@ func NewHost(hostname string) (*Host, error) { func (h *Host) UsesCustomCert() bool { return h.CustomCertificateSecret != nil } + +func NewCollection() HostCollection { + return HostCollection{ + hosts: map[string]*Host{}, + } +} + +func (hs *HostCollection) Add(hostname string) error { + h, err := NewHost(hostname) + if err != nil { + return err + } + + existingValue, alreadyPresent := hs.hosts[h.Hostname] + + switch alreadyPresent { + case true: + if existingValue.UsesCustomCert() { + return fmt.Errorf("host '%s' is already defined and using a custom certificate", existingValue.Hostname) + } + fallthrough + case false: + fallthrough + default: + hs.hosts[h.Hostname] = h + } + + return nil +} + +func (hs *HostCollection) AllHosts() []*Host { + hosts := make([]*Host, 0, len(hs.hosts)) + for _, host := range hs.hosts { + hosts = append(hosts, host) + } + return hosts +} + +func (hs *HostCollection) Hostnames() []string { + hostnames := make([]string, 0, len(hs.hosts)) + for hostname := range hs.hosts { + hostnames = append(hostnames, hostname) + } + return hostnames +} + +func (hs *HostCollection) Count() int { + return len(hs.hosts) +} diff --git a/api/v1alpha1/podtypes/access_policy.go b/api/v1alpha1/podtypes/access_policy.go index 75c4c86f..cfa24ac0 100644 --- a/api/v1alpha1/podtypes/access_policy.go +++ b/api/v1alpha1/podtypes/access_policy.go @@ -1,5 +1,7 @@ package podtypes +import v1 "k8s.io/api/networking/v1" + // AccessPolicy // // Zero trust dictates that only applications with a reason for being able @@ -19,7 +21,7 @@ type AccessPolicy struct { // internet is the Application allowed to send requests to? // //+kubebuilder:validation:Optional - Outbound OutboundPolicy `json:"outbound,omitempty"` + Outbound *OutboundPolicy `json:"outbound,omitempty"` } // InboundPolicy @@ -76,6 +78,9 @@ type InternalRule struct { // //+kubebuilder:validation:Optional NamespacesByLabel map[string]string `json:"namespacesByLabel,omitempty"` + // The ports to allow for the above application. + //+kubebuilder:validation:Optional + Ports []v1.NetworkPolicyPort `json:"ports,omitempty"` } // ExternalRule diff --git a/api/v1alpha1/podtypes/zz_generated.deepcopy.go b/api/v1alpha1/podtypes/zz_generated.deepcopy.go index 0b5b7c57..2bc23168 100644 --- a/api/v1alpha1/podtypes/zz_generated.deepcopy.go +++ b/api/v1alpha1/podtypes/zz_generated.deepcopy.go @@ -5,7 +5,8 @@ package podtypes import ( - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/api/networking/v1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -16,7 +17,11 @@ func (in *AccessPolicy) DeepCopyInto(out *AccessPolicy) { *out = new(InboundPolicy) (*in).DeepCopyInto(*out) } - in.Outbound.DeepCopyInto(&out.Outbound) + if in.Outbound != nil { + in, out := &in.Outbound, &out.Outbound + *out = new(OutboundPolicy) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessPolicy. @@ -81,6 +86,13 @@ func (in *InternalRule) DeepCopyInto(out *InternalRule) { (*out)[key] = val } } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]v1.NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalRule. @@ -149,14 +161,14 @@ func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { *out = *in if in.Limits != nil { in, out := &in.Limits, &out.Limits - *out = make(v1.ResourceList, len(*in)) + *out = make(corev1.ResourceList, len(*in)) for key, val := range *in { (*out)[key] = val.DeepCopy() } } if in.Requests != nil { in, out := &in.Requests, &out.Requests - *out = make(v1.ResourceList, len(*in)) + *out = make(corev1.ResourceList, len(*in)) for key, val := range *in { (*out)[key] = val.DeepCopy() } diff --git a/api/v1alpha1/routing_types.go b/api/v1alpha1/routing_types.go index ae36bfe8..19e7e32e 100644 --- a/api/v1alpha1/routing_types.go +++ b/api/v1alpha1/routing_types.go @@ -2,8 +2,9 @@ package v1alpha1 import ( "fmt" - "github.com/kartverket/skiperator/pkg/util" + "github.com/nais/liberator/pkg/namegen" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" ) //+kubebuilder:object:root=true @@ -18,13 +19,14 @@ type RoutingList struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:resource:shortName="routing" +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.summary.status` type Routing struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` //+kubebuilder:validation:Required - Spec RoutingSpec `json:"spec,omitempty"` - Status RoutingStatus `json:"status,omitempty"` + Spec RoutingSpec `json:"spec,omitempty"` + Status SkiperatorStatus `json:"status,omitempty"` } // +kubebuilder:object:generate=true @@ -49,6 +51,8 @@ type Route struct { //+kubebuilder:validation:Optional //+kubebuilder:default:=false RewriteUri bool `json:"rewriteUri,omitempty"` + //+kubebuilder:validation:Optional + Port int32 `json:"port,omitempty"` } // +kubebuilder:object:generate=true @@ -75,7 +79,10 @@ func (in *Routing) GetVirtualServiceName() string { func (in *Routing) GetCertificateName() (string, error) { namePrefix := fmt.Sprintf("%s-%s", in.Namespace, in.Name) - return util.GetSecretName(namePrefix, "routing-ingress") + // https://github.com/nais/naiserator/blob/faed273b68dff8541e1e2889fda5d017730f9796/pkg/resourcecreator/idporten/idporten.go#L82 + // https://github.com/nais/naiserator/blob/faed273b68dff8541e1e2889fda5d017730f9796/pkg/resourcecreator/idporten/idporten.go#L170 + secretName, err := namegen.ShortName(fmt.Sprintf("%s-%s", namePrefix, "routing-ingress"), validation.DNS1035LabelMaxLength) + return secretName, err } func (in *Routing) GetConditions() []metav1.Condition { @@ -89,3 +96,24 @@ func (in *Routing) SetConditions(conditions []metav1.Condition) { func (in *RoutingSpec) GetHost() (*Host, error) { return NewHost(in.Hostname) } + +func (in *Routing) GetStatus() *SkiperatorStatus { + return &in.Status +} + +func (in *Routing) SetStatus(status SkiperatorStatus) { + in.Status = status +} + +func (in *Routing) GetDefaultLabels() map[string]string { + return map[string]string{ + "app.kubernetes.io/managed-by": "skiperator", + "skiperator.kartverket.no/controller": "routing", + "skiperator.kartverket.no/routing-name": in.Name, + "skiperator.kartverket.no/source-namespace": in.Namespace, + } +} + +func (in *Routing) GetCommonSpec() *CommonSpec { + panic("common spec not available for routing resource type") +} diff --git a/api/v1alpha1/skipjob_defaults.go b/api/v1alpha1/skipjob_defaults.go deleted file mode 100644 index e8c200c1..00000000 --- a/api/v1alpha1/skipjob_defaults.go +++ /dev/null @@ -1,63 +0,0 @@ -package v1alpha1 - -import ( - "dario.cat/mergo" - "github.com/kartverket/skiperator/pkg/util" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - DefaultTTLSecondsAfterFinished = int32(60 * 60 * 24 * 7) // One week - DefaultBackoffLimit = int32(6) - - DefaultSuspend = false -) - -var JobCreatedCondition = "SKIPJobCreated" - -func (skipJob *SKIPJob) ApplyDefaults() error { - skipJob.setDefaultAnnotations() - return skipJob.setSkipJobDefaults() -} - -func (skipJob *SKIPJob) setSkipJobDefaults() error { - - defaults := &SKIPJob{ - Spec: SKIPJobSpec{ - Job: &JobSettings{ - TTLSecondsAfterFinished: &DefaultTTLSecondsAfterFinished, - BackoffLimit: &DefaultBackoffLimit, - Suspend: &DefaultSuspend, - }, - }, - Status: SKIPJobStatus{ - Conditions: []metav1.Condition{ - { - Type: JobCreatedCondition, - Status: metav1.ConditionTrue, - LastTransitionTime: metav1.Now(), - Reason: "SKIPJobCreated", - Message: "SKIPJob was created", - }, - }, - }, - } - - if skipJob.Spec.Cron != nil { - defaults.Spec.Cron = &CronSettings{} - - defaults.Spec.Cron.Suspend = util.PointTo(false) - } - - return mergo.Merge(skipJob, defaults) -} - -func (skipJob *SKIPJob) setDefaultAnnotations() { - annotations := skipJob.Annotations - - if annotations == nil { - annotations = map[string]string{} - } - - skipJob.SetAnnotations(annotations) -} diff --git a/api/v1alpha1/skipjob_types.go b/api/v1alpha1/skipjob_types.go index 5d76ac11..61acfa22 100644 --- a/api/v1alpha1/skipjob_types.go +++ b/api/v1alpha1/skipjob_types.go @@ -1,11 +1,27 @@ package v1alpha1 import ( + "dario.cat/mergo" + "fmt" "github.com/kartverket/skiperator/api/v1alpha1/podtypes" - "github.com/kartverket/skiperator/pkg/util" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" + "time" +) + +var ( + DefaultTTLSecondsAfterFinished = int32(60 * 60 * 24 * 7) // One week + DefaultBackoffLimit = int32(6) + + DefaultSuspend = false + JobCreatedCondition = "SKIPJobCreated" + ConditionRunning = "Running" + ConditionFinished = "Finished" + ConditionFailed = "Failed" + SKIPJobReferenceLabelKey = "skiperator.kartverket.no/skipjobName" + IsSKIPJobKey = "skiperator.kartverket.no/skipjob" ) // SKIPJobStatus defines the observed state of SKIPJob @@ -18,6 +34,12 @@ type SKIPJobStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status // +kubebuilder:object:generate=true +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.summary.status` +// +// A SKIPJob is either defined as a one-off or a scheduled job. If the Cron field is set for SKIPJob, it may not be removed. If the Cron field is unset, it may not be added. +// The Container field of a SKIPJob is only mutable if the Cron field is set. If unset, you must delete your SKIPJob to change container settings. +// +kubebuilder:validation:XValidation:rule="(has(oldSelf.spec.cron) && has(self.spec.cron)) || (!has(oldSelf.spec.cron) && !has(self.spec.cron))", message="After creation of a SKIPJob you may not remove the Cron field if it was previously present, or add it if it was previously omitted. Please delete the SKIPJob to change its nature from a one-off/scheduled job." +// +kubebuilder:validation:XValidation:rule="(!has(self.status) || ((!has(self.spec.cron) && (oldSelf.spec.container == self.spec.container)) || has(self.spec.cron)))", message="The field Container is immutable for one-off jobs. Please delete your SKIPJob to change the containers settings." // SKIPJob is the Schema for the skipjobs API type SKIPJob struct { metav1.TypeMeta `json:",inline"` @@ -27,7 +49,7 @@ type SKIPJob struct { Spec SKIPJobSpec `json:"spec"` //+kubebuilder:validation:Optional - Status SKIPJobStatus `json:"status"` + Status SkiperatorStatus `json:"status,omitempty"` } //+kubebuilder:object:root=true @@ -41,11 +63,6 @@ type SKIPJobList struct { // SKIPJobSpec defines the desired state of SKIPJob // -// A SKIPJob is either defined as a one-off or a scheduled job. If the Cron field is set for SKIPJob, it may not be removed. If the Cron field is unset, it may not be added. -// The Container field of a SKIPJob is only mutable if the Cron field is set. If unset, you must delete your SKIPJob to change container settings. -// -// +kubebuilder:validation:XValidation:rule="(has(oldSelf.cron) && has(self.cron)) || (!has(oldSelf.cron) && !has(self.cron))", message="After creation of a SKIPJob you may not remove the Cron field if it was previously present, or add it if it was previously omitted. Please delete the SKIPJob to change its nature from a one-off/scheduled job." -// +kubebuilder:validation:XValidation:rule="((!has(self.cron) && (oldSelf.container == self.container)) || has(self.cron))", message="The field Container is immutable for one-off jobs. Please delete your SKIPJob to change the containers settings." // +kubebuilder:object:generate=true type SKIPJobSpec struct { // Settings for the actual Job. If you use a scheduled job, the settings in here will also specify the template of the job. @@ -172,5 +189,97 @@ type CronSettings struct { } func (skipJob *SKIPJob) KindPostFixedName() string { - return util.ResourceNameWithKindPostfix(skipJob.Name, skipJob.Kind) + return strings.ToLower(fmt.Sprintf("%v-%v", skipJob.Name, skipJob.Kind)) +} + +func (skipJob *SKIPJob) GetStatus() *SkiperatorStatus { + return &skipJob.Status +} +func (skipJob *SKIPJob) SetStatus(status SkiperatorStatus) { + skipJob.Status = status +} + +func (skipJob *SKIPJob) FillDefaultSpec() error { + defaults := &SKIPJob{ + Spec: SKIPJobSpec{ + Job: &JobSettings{ + TTLSecondsAfterFinished: &DefaultTTLSecondsAfterFinished, + BackoffLimit: &DefaultBackoffLimit, + Suspend: &DefaultSuspend, + }, + }, + } + + if skipJob.Spec.Cron != nil { + defaults.Spec.Cron = &CronSettings{} + suspend := false + defaults.Spec.Cron.Suspend = &suspend + } + + return mergo.Merge(skipJob, defaults) +} + +// TODO we should test SKIPJob status better, same for Routing probably +func (skipJob *SKIPJob) FillDefaultStatus() { + var msg string + + if skipJob.Status.Summary.Status == "" { + msg = "Default SKIPJob status, it has not initialized yet" + } else { + msg = "SKIPJob is trying to reconcile" + } + + skipJob.Status.Summary = Status{ + Status: PENDING, + Message: msg, + TimeStamp: time.Now().String(), + } + + if skipJob.Status.SubResources == nil { + skipJob.Status.SubResources = make(map[string]Status) + } + + if len(skipJob.Status.Conditions) == 0 { + skipJob.Status.Conditions = []metav1.Condition{ + { + Type: ConditionRunning, + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: "NotReconciled", + Message: "SKIPJob has not been reconciled yet", + }, + { + Type: ConditionFinished, + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: "NotReconciled", + Message: "SKIPJob has not been reconciled yet", + }, + { + Type: ConditionFailed, + Status: metav1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: "NotReconciled", + Message: "SKIPJob has not been reconciled yet", + }, + } + } +} + +func (skipJob *SKIPJob) GetDefaultLabels() map[string]string { + return map[string]string{ + "app.kubernetes.io/managed-by": "skiperator", + "skiperator.kartverket.no/controller": "skipjob", + // Used by hahaha to know that the Pod should be watched for killing sidecars + IsSKIPJobKey: "true", + // Added to be able to add the SKIPJob to a reconcile queue when Watched Jobs are queued + SKIPJobReferenceLabelKey: skipJob.Name, + } +} + +func (skipJob *SKIPJob) GetCommonSpec() *CommonSpec { + return &CommonSpec{ + GCP: skipJob.Spec.Container.GCP, + AccessPolicy: skipJob.Spec.Container.AccessPolicy, + } } diff --git a/api/v1alpha1/skipns_types.go b/api/v1alpha1/skipns_types.go new file mode 100644 index 00000000..f035582d --- /dev/null +++ b/api/v1alpha1/skipns_types.go @@ -0,0 +1,28 @@ +package v1alpha1 + +import corev1 "k8s.io/api/core/v1" + +/* + * SKIPNamespace is a wrapper for the kubernetes namespace resource, so we can utilize the SKIPObject interface + */ + +type SKIPNamespace struct { + *corev1.Namespace +} + +func (n SKIPNamespace) GetStatus() *SkiperatorStatus { + return &SkiperatorStatus{} +} + +func (n SKIPNamespace) SetStatus(status SkiperatorStatus) {} + +func (n SKIPNamespace) GetDefaultLabels() map[string]string { + return map[string]string{ + "app.kubernetes.io/managed-by": "skiperator", + "skiperator.kartverket.no/controller": "namespace", + } +} + +func (n SKIPNamespace) GetCommonSpec() *CommonSpec { + panic("common spec not available for namespace resource type") +} diff --git a/api/v1alpha1/skipobj_interfaces.go b/api/v1alpha1/skipobj_interfaces.go new file mode 100644 index 00000000..468aa145 --- /dev/null +++ b/api/v1alpha1/skipobj_interfaces.go @@ -0,0 +1,23 @@ +package v1alpha1 + +import ( + "fmt" + "github.com/kartverket/skiperator/api/v1alpha1/podtypes" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type SKIPObject interface { + client.Object + GetStatus() *SkiperatorStatus + SetStatus(status SkiperatorStatus) + GetDefaultLabels() map[string]string + GetCommonSpec() *CommonSpec +} + +var ErrNoGVK = fmt.Errorf("no GroupVersionKind found in the resources, cannot process resources") + +// CommonSpec TODO: This needs some more thought. We should probably try to expand on it. v1Alpha2? +type CommonSpec struct { + AccessPolicy *podtypes.AccessPolicy + GCP *podtypes.GCP +} diff --git a/api/v1alpha1/status_types.go b/api/v1alpha1/status_types.go new file mode 100644 index 00000000..b3353bfe --- /dev/null +++ b/api/v1alpha1/status_types.go @@ -0,0 +1,88 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ApplicationStatus +// +// A status field shown on a Skiperator resource which contains information regarding deployment of the resource. +// +kubebuilder:object:generate=true +type SkiperatorStatus struct { + Summary Status `json:"summary"` + SubResources map[string]Status `json:"subresources"` + Conditions []metav1.Condition `json:"conditions"` +} + +// Status +// +// +kubebuilder:object:generate=true +type Status struct { + // +kubebuilder:default="Synced" + Status StatusNames `json:"status"` + // +kubebuilder:default="hello" + Message string `json:"message"` + // +kubebuilder:default="hello" + TimeStamp string `json:"timestamp"` +} + +type StatusNames string + +const ( + SYNCED StatusNames = "Synced" + PROGRESSING StatusNames = "Progressing" + ERROR StatusNames = "Error" + PENDING StatusNames = "Pending" +) + +func (s *SkiperatorStatus) SetSummaryPending() { + s.Summary.Status = PENDING + s.Summary.Message = "Awaiting first reconcile" + s.Summary.TimeStamp = metav1.Now().String() + if s.Conditions == nil { + s.Conditions = make([]metav1.Condition, 0) + } +} + +func (s *SkiperatorStatus) SetSummarySynced() { + s.Summary.Status = SYNCED + s.Summary.Message = "All subresources synced" + s.Summary.TimeStamp = metav1.Now().String() + if s.Conditions == nil { + s.Conditions = make([]metav1.Condition, 0) + } +} + +func (s *SkiperatorStatus) SetSummaryProgressing() { + s.Summary.Status = PROGRESSING + s.Summary.Message = "Resource is progressing" + s.Summary.TimeStamp = metav1.Now().String() + if s.Conditions == nil { + s.Conditions = make([]metav1.Condition, 0) + } + s.SubResources = make(map[string]Status) +} + +func (s *SkiperatorStatus) SetSummaryError(errorMsg string) { + s.Summary.Status = ERROR + s.Summary.Message = errorMsg + s.Summary.TimeStamp = metav1.Now().String() + if s.Conditions == nil { + s.Conditions = make([]metav1.Condition, 0) + } +} + +func (s *SkiperatorStatus) AddSubResourceStatus(object client.Object, message string, status StatusNames) { + if s.SubResources == nil { + s.SubResources = map[string]Status{} + } + kind := object.GetObjectKind().GroupVersionKind().Kind + key := kind + "[" + object.GetName() + "]" + s.SubResources[key] = Status{ + Status: status, + Message: kind + " " + message, + TimeStamp: metav1.Now().String(), + } + +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 875d3762..8ea0c699 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -215,29 +215,6 @@ func (in *ApplicationSpec) DeepCopy() *ApplicationSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { - *out = *in - out.ApplicationStatus = in.ApplicationStatus - if in.ControllersStatus != nil { - in, out := &in.ControllersStatus, &out.ControllersStatus - *out = make(map[string]Status, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationStatus. -func (in *ApplicationStatus) DeepCopy() *ApplicationStatus { - if in == nil { - return nil - } - out := new(ApplicationStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthorizationSettings) DeepCopyInto(out *AuthorizationSettings) { *out = *in @@ -664,6 +641,36 @@ func (in *SKIPJobStatus) DeepCopy() *SKIPJobStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SkiperatorStatus) DeepCopyInto(out *SkiperatorStatus) { + *out = *in + out.Summary = in.Summary + if in.SubResources != nil { + in, out := &in.SubResources, &out.SubResources + *out = make(map[string]Status, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SkiperatorStatus. +func (in *SkiperatorStatus) DeepCopy() *SkiperatorStatus { + if in == nil { + return nil + } + out := new(SkiperatorStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Status) DeepCopyInto(out *Status) { *out = *in diff --git a/cmd/skiperator/main.go b/cmd/skiperator/main.go index 84a6c6ea..d66a3475 100644 --- a/cmd/skiperator/main.go +++ b/cmd/skiperator/main.go @@ -3,40 +3,26 @@ package main import ( "flag" "fmt" + "github.com/kartverket/skiperator/internal/controllers" + "github.com/kartverket/skiperator/internal/controllers/common" "github.com/kartverket/skiperator/pkg/flags" "github.com/kartverket/skiperator/pkg/k8sfeatures" - "github.com/kartverket/skiperator/pkg/util" + "github.com/kartverket/skiperator/pkg/resourceschemas" "k8s.io/client-go/discovery" "k8s.io/client-go/rest" "os" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "strings" - certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" - nais_io_v1 "github.com/nais/liberator/pkg/apis/nais.io/v1" - pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "go.uber.org/zap/zapcore" - autoscalingv2 "k8s.io/api/autoscaling/v2" - policyv1 "k8s.io/api/policy/v1" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - applicationcontroller "github.com/kartverket/skiperator/controllers/application" - namespacecontroller "github.com/kartverket/skiperator/controllers/namespace" - routingcontroller "github.com/kartverket/skiperator/controllers/routing" - skipjobcontroller "github.com/kartverket/skiperator/controllers/skipjob" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" ) //+kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;create;update @@ -51,15 +37,7 @@ var ( ) func init() { - utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(skiperatorv1alpha1.AddToScheme(scheme)) - utilruntime.Must(autoscalingv2.AddToScheme(scheme)) - utilruntime.Must(securityv1beta1.AddToScheme(scheme)) - utilruntime.Must(networkingv1beta1.AddToScheme(scheme)) - utilruntime.Must(certmanagerv1.AddToScheme(scheme)) - utilruntime.Must(policyv1.AddToScheme(scheme)) - utilruntime.Must(pov1.AddToScheme(scheme)) - utilruntime.Must(nais_io_v1.AddToScheme(scheme)) + resourceschemas.AddSchemas(scheme) } func main() { @@ -72,6 +50,7 @@ func main() { parsedLogLevel, _ := zapcore.ParseLevel(*logLevel) + //TODO use zap directly so we get more loglevels ctrl.SetLogger(zap.New(zap.UseFlagOptions(&zap.Options{ Development: !*isDeployment, Level: parsedLogLevel, @@ -103,32 +82,32 @@ func main() { os.Exit(1) } - err = (&applicationcontroller.ApplicationReconciler{ - ReconcilerBase: util.NewFromManager(mgr, mgr.GetEventRecorderFor("application-controller")), + err = (&controllers.ApplicationReconciler{ + ReconcilerBase: common.NewFromManager(mgr, mgr.GetEventRecorderFor("application-controller"), resourceschemas.GetApplicationSchemas(mgr.GetScheme())), }).SetupWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "Application") os.Exit(1) } - err = (&skipjobcontroller.SKIPJobReconciler{ - ReconcilerBase: util.NewFromManager(mgr, mgr.GetEventRecorderFor("skipjob-controller")), + err = (&controllers.SKIPJobReconciler{ + ReconcilerBase: common.NewFromManager(mgr, mgr.GetEventRecorderFor("skipjob-controller"), resourceschemas.GetJobSchemas(mgr.GetScheme())), }).SetupWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "SKIPJob") os.Exit(1) } - err = (&routingcontroller.RoutingReconciler{ - ReconcilerBase: util.NewFromManager(mgr, mgr.GetEventRecorderFor("routing-controller")), + err = (&controllers.RoutingReconciler{ + ReconcilerBase: common.NewFromManager(mgr, mgr.GetEventRecorderFor("routing-controller"), resourceschemas.GetRoutingSchemas(mgr.GetScheme())), }).SetupWithManager(mgr) if err != nil { setupLog.Error(err, "unable to create controller", "controller", "Routing") os.Exit(1) } - err = (&namespacecontroller.NamespaceReconciler{ - ReconcilerBase: util.NewFromManager(mgr, mgr.GetEventRecorderFor("namespace-controller")), + err = (&controllers.NamespaceReconciler{ + ReconcilerBase: common.NewFromManager(mgr, mgr.GetEventRecorderFor("namespace-controller"), resourceschemas.GetNamespaceSchemas(mgr.GetScheme())), Registry: "ghcr.io", Token: *imagePullToken, }).SetupWithManager(mgr) diff --git a/config/crd/skiperator.kartverket.no_applications.yaml b/config/crd/skiperator.kartverket.no_applications.yaml index 064e3fc0..1368578d 100644 --- a/config/crd/skiperator.kartverket.no_applications.yaml +++ b/config/crd/skiperator.kartverket.no_applications.yaml @@ -17,7 +17,7 @@ spec: scope: Namespaced versions: - additionalPrinterColumns: - - jsonPath: .status.application.status + - jsonPath: .status.summary.status name: Status type: string name: v1alpha1 @@ -89,6 +89,38 @@ spec: If both namespace and namespacesByLabel are set, namespace takes precedence and namespacesByLabel is omitted. type: object + ports: + description: The ports to allow for the above application. + items: + description: NetworkPolicyPort describes a port to + allow traffic on + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be allowed by the policy. This field cannot be defined if the port field + is not defined or if the port field is defined as a named (string) port. + The endPort must be equal or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: |- + port represents the port on the given protocol. This can either be a numerical or named + port on a pod. If this field is not provided, this matches all port names and + numbers. + If present, only traffic on the specified protocol AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + default: TCP + description: |- + protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + If not specified, this field defaults to TCP. + type: string + type: object + type: array required: - application type: object @@ -190,6 +222,38 @@ spec: If both namespace and namespacesByLabel are set, namespace takes precedence and namespacesByLabel is omitted. type: object + ports: + description: The ports to allow for the above application. + items: + description: NetworkPolicyPort describes a port to + allow traffic on + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be allowed by the policy. This field cannot be defined if the port field + is not defined or if the port field is defined as a named (string) port. + The endPort must be equal or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: |- + port represents the port on the given protocol. This can either be a numerical or named + port on a pod. If this field is not provided, this matches all port names and + numbers. + If present, only traffic on the specified protocol AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + default: TCP + description: |- + protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + If not specified, this field defaults to TCP. + type: string + type: object + type: array required: - application type: object @@ -1059,27 +1123,78 @@ spec: ApplicationStatus - A status field shown on the Application resource which contains information regarding all controllers present on the Application. - Will for example show errors on the Deployment field when something went wrong when attempting to create a Deployment. + A status field shown on a Skiperator resource which contains information regarding deployment of the resource. properties: - application: - description: Status - properties: - message: - default: hello - type: string - status: - default: Synced - type: string - timestamp: - default: hello - type: string - required: - - message - - status - - timestamp - type: object - controllers: + conditions: + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + subresources: additionalProperties: description: Status properties: @@ -1098,9 +1213,27 @@ spec: - timestamp type: object type: object + summary: + description: Status + properties: + message: + default: hello + type: string + status: + default: Synced + type: string + timestamp: + default: hello + type: string + required: + - message + - status + - timestamp + type: object required: - - application - - controllers + - conditions + - subresources + - summary type: object type: object served: true diff --git a/config/crd/skiperator.kartverket.no_routings.yaml b/config/crd/skiperator.kartverket.no_routings.yaml index 99565c8d..eb7e6a01 100644 --- a/config/crd/skiperator.kartverket.no_routings.yaml +++ b/config/crd/skiperator.kartverket.no_routings.yaml @@ -16,7 +16,11 @@ spec: singular: routing scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.summary.status + name: Status + type: string + name: v1alpha1 schema: openAPIV3Schema: properties: @@ -49,6 +53,9 @@ spec: properties: pathPrefix: type: string + port: + format: int32 + type: integer rewriteUri: default: false type: boolean @@ -64,6 +71,11 @@ spec: - routes type: object status: + description: |- + ApplicationStatus + + + A status field shown on a Skiperator resource which contains information regarding deployment of the resource. properties: conditions: items: @@ -134,6 +146,46 @@ spec: - type type: object type: array + subresources: + additionalProperties: + description: Status + properties: + message: + default: hello + type: string + status: + default: Synced + type: string + timestamp: + default: hello + type: string + required: + - message + - status + - timestamp + type: object + type: object + summary: + description: Status + properties: + message: + default: hello + type: string + status: + default: Synced + type: string + timestamp: + default: hello + type: string + required: + - message + - status + - timestamp + type: object + required: + - conditions + - subresources + - summary type: object type: object served: true diff --git a/config/crd/skiperator.kartverket.no_skipjobs.yaml b/config/crd/skiperator.kartverket.no_skipjobs.yaml index 804d5426..cf72e1e0 100644 --- a/config/crd/skiperator.kartverket.no_skipjobs.yaml +++ b/config/crd/skiperator.kartverket.no_skipjobs.yaml @@ -14,10 +14,17 @@ spec: singular: skipjob scope: Namespaced versions: - - name: v1alpha1 + - additionalPrinterColumns: + - jsonPath: .status.summary.status + name: Status + type: string + name: v1alpha1 schema: openAPIV3Schema: - description: SKIPJob is the Schema for the skipjobs API + description: |- + A SKIPJob is either defined as a one-off or a scheduled job. If the Cron field is set for SKIPJob, it may not be removed. If the Cron field is unset, it may not be added. + The Container field of a SKIPJob is only mutable if the Cron field is set. If unset, you must delete your SKIPJob to change container settings. + SKIPJob is the Schema for the skipjobs API properties: apiVersion: description: |- @@ -37,12 +44,7 @@ spec: metadata: type: object spec: - description: |- - SKIPJobSpec defines the desired state of SKIPJob - - - A SKIPJob is either defined as a one-off or a scheduled job. If the Cron field is set for SKIPJob, it may not be removed. If the Cron field is unset, it may not be added. - The Container field of a SKIPJob is only mutable if the Cron field is set. If unset, you must delete your SKIPJob to change container settings. + description: SKIPJobSpec defines the desired state of SKIPJob properties: container: description: |- @@ -97,6 +99,38 @@ spec: are set, namespace takes precedence and namespacesByLabel is omitted. type: object + ports: + description: The ports to allow for the above application. + items: + description: NetworkPolicyPort describes a port + to allow traffic on + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be allowed by the policy. This field cannot be defined if the port field + is not defined or if the port field is defined as a named (string) port. + The endPort must be equal or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: |- + port represents the port on the given protocol. This can either be a numerical or named + port on a pod. If this field is not provided, this matches all port names and + numbers. + If present, only traffic on the specified protocol AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + default: TCP + description: |- + protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + If not specified, this field defaults to TCP. + type: string + type: object + type: array required: - application type: object @@ -201,6 +235,38 @@ spec: are set, namespace takes precedence and namespacesByLabel is omitted. type: object + ports: + description: The ports to allow for the above application. + items: + description: NetworkPolicyPort describes a port + to allow traffic on + properties: + endPort: + description: |- + endPort indicates that the range of ports from port to endPort if set, inclusive, + should be allowed by the policy. This field cannot be defined if the port field + is not defined or if the port field is defined as a named (string) port. + The endPort must be equal or greater than port. + format: int32 + type: integer + port: + anyOf: + - type: integer + - type: string + description: |- + port represents the port on the given protocol. This can either be a numerical or named + port on a pod. If this field is not provided, this matches all port names and + numbers. + If present, only traffic on the specified protocol AND port will be matched. + x-kubernetes-int-or-string: true + protocol: + default: TCP + description: |- + protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. + If not specified, this field defaults to TCP. + type: string + type: object + type: array required: - application type: object @@ -787,19 +853,12 @@ spec: required: - container type: object - x-kubernetes-validations: - - message: After creation of a SKIPJob you may not remove the Cron field - if it was previously present, or add it if it was previously omitted. - Please delete the SKIPJob to change its nature from a one-off/scheduled - job. - rule: (has(oldSelf.cron) && has(self.cron)) || (!has(oldSelf.cron) && - !has(self.cron)) - - message: The field Container is immutable for one-off jobs. Please delete - your SKIPJob to change the containers settings. - rule: ((!has(self.cron) && (oldSelf.container == self.container)) || - has(self.cron)) status: - description: SKIPJobStatus defines the observed state of SKIPJob + description: |- + ApplicationStatus + + + A status field shown on a Skiperator resource which contains information regarding deployment of the resource. properties: conditions: items: @@ -870,10 +929,60 @@ spec: - type type: object type: array + subresources: + additionalProperties: + description: Status + properties: + message: + default: hello + type: string + status: + default: Synced + type: string + timestamp: + default: hello + type: string + required: + - message + - status + - timestamp + type: object + type: object + summary: + description: Status + properties: + message: + default: hello + type: string + status: + default: Synced + type: string + timestamp: + default: hello + type: string + required: + - message + - status + - timestamp + type: object + required: + - conditions + - subresources + - summary type: object required: - spec type: object + x-kubernetes-validations: + - message: After creation of a SKIPJob you may not remove the Cron field if + it was previously present, or add it if it was previously omitted. Please + delete the SKIPJob to change its nature from a one-off/scheduled job. + rule: (has(oldSelf.spec.cron) && has(self.spec.cron)) || (!has(oldSelf.spec.cron) + && !has(self.spec.cron)) + - message: The field Container is immutable for one-off jobs. Please delete + your SKIPJob to change the containers settings. + rule: (!has(self.status) || ((!has(self.spec.cron) && (oldSelf.spec.container + == self.spec.container)) || has(self.spec.cron))) served: true storage: true subresources: diff --git a/controllers/application/authorization_policy.go b/controllers/application/authorization_policy.go deleted file mode 100644 index 59f93361..00000000 --- a/controllers/application/authorization_policy.go +++ /dev/null @@ -1,112 +0,0 @@ -package applicationcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - securityv1beta1api "istio.io/api/security/v1beta1" - typev1beta1 "istio.io/api/type/v1beta1" - securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileAuthorizationPolicy(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "AuthorizationPolicy" - r.SetControllerProgressing(ctx, application, controllerName) - - defaultDenyPaths := []string{ - "/actuator*", - } - defaultDenyAuthPolicy := getDefaultDenyPolicy(application, defaultDenyPaths) - - shouldReconcile, err := r.ShouldReconcile(ctx, &defaultDenyAuthPolicy) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if application.Spec.AuthorizationSettings != nil { - if application.Spec.AuthorizationSettings.AllowAll == true { - err := r.GetClient().Delete(ctx, &defaultDenyAuthPolicy) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, nil) - return util.DoNotRequeue() - } - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &defaultDenyAuthPolicy, func() error { - err := ctrlutil.SetControllerReference(application, &defaultDenyAuthPolicy, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&defaultDenyAuthPolicy, *application) - util.SetCommonAnnotations(&defaultDenyAuthPolicy) - - if application.Spec.AuthorizationSettings != nil { - - // As of now we only use one rule and one operation for all default denies. No need to loop over them all - defaultDenyToOperation := defaultDenyAuthPolicy.Spec.Rules[0].To[0].Operation - defaultDenyToOperation.NotPaths = nil - - if len(application.Spec.AuthorizationSettings.AllowList) > 0 { - for _, endpoint := range application.Spec.AuthorizationSettings.AllowList { - defaultDenyToOperation.NotPaths = append(defaultDenyToOperation.NotPaths, endpoint) - } - } - } - - // update defaultDenyAuthPolicy rules and action - return nil - }) - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} - -func getGeneralFromRule() []*securityv1beta1api.Rule_From { - return []*securityv1beta1api.Rule_From{ - { - Source: &securityv1beta1api.Source{ - Namespaces: []string{"istio-gateways"}, - }, - }, - } -} - -func getDefaultDenyPolicy(application *skiperatorv1alpha1.Application, denyPaths []string) securityv1beta1.AuthorizationPolicy { - return securityv1beta1.AuthorizationPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: application.Namespace, - Name: application.Name + "-deny", - }, - Spec: securityv1beta1api.AuthorizationPolicy{ - Action: securityv1beta1api.AuthorizationPolicy_DENY, - Rules: []*securityv1beta1api.Rule{ - { - To: []*securityv1beta1api.Rule_To{ - { - Operation: &securityv1beta1api.Operation{ - Paths: denyPaths, - }, - }, - }, - From: getGeneralFromRule(), - }, - }, - Selector: &typev1beta1.WorkloadSelector{ - MatchLabels: util.GetPodAppSelector(application.Name), - }, - }, - } -} diff --git a/controllers/application/certificate.go b/controllers/application/certificate.go deleted file mode 100644 index bd0117f4..00000000 --- a/controllers/application/certificate.go +++ /dev/null @@ -1,191 +0,0 @@ -package applicationcontroller - -import ( - "context" - "fmt" - "regexp" - - certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" - v1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - "golang.org/x/exp/slices" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) SkiperatorOwnedCertRequests(_ context.Context, obj client.Object) []reconcile.Request { - certificate, isCert := obj.(*certmanagerv1.Certificate) - - if !isCert { - return nil - } - - isSkiperatorOwned := certificate.Labels["app.kubernetes.io/managed-by"] == "skiperator" && - certificate.Labels["skiperator.skiperator.no/controller"] == "application" - - requests := make([]reconcile.Request, 0) - - if isSkiperatorOwned { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: certificate.Labels["application.skiperator.no/app-namespace"], - Name: certificate.Labels["application.skiperator.no/app-name"], - }, - }) - } - - return requests -} - -func (r *ApplicationReconciler) reconcileCertificate(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - - controllerName := "Certificate" - r.SetControllerProgressing(ctx, application, controllerName) - - // Generate separate gateway for each ingress - hosts, err := application.Spec.Hosts() - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.DoNotRequeue() - } - - for _, h := range hosts { - certificateName := fmt.Sprintf("%s-%s-ingress-%x", application.Namespace, application.Name, util.GenerateHashFromName(h.Hostname)) - - certificate := certmanagerv1.Certificate{ObjectMeta: metav1.ObjectMeta{Namespace: "istio-gateways", Name: certificateName}} - - shouldReconcile, err := r.ShouldReconcile(ctx, &certificate) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if !shouldReconcile { - continue - } - - if !h.UsesCustomCert() { - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &certificate, func() error { - r.SetLabelsFromApplication(&certificate, *application) - - certificate.Spec = certmanagerv1.CertificateSpec{ - IssuerRef: v1.ObjectReference{ - Kind: "ClusterIssuer", - Name: "cluster-issuer", // Name defined in https://github.com/kartverket/certificate-management/blob/main/clusterissuer.tf - }, - DNSNames: []string{h.Hostname}, - SecretName: certificateName, - } - - certificate.Labels = getLabels(certificate, application) - - return nil - }) - } else { - secret, err := util.GetSecret(r.GetClient(), ctx, types.NamespacedName{Namespace: "istio-gateways", Name: *h.CustomCertificateSecret}) - if err != nil { - fmt.Errorf("Failed to get secret %s", *h.CustomCertificateSecret) - r.SetControllerError(ctx, application, controllerName, err) - return util.DoNotRequeue() - } - if secret.Type != corev1.SecretTypeTLS { - err = fmt.Errorf("Secret %s is not of type TLS", *h.CustomCertificateSecret) - r.SetControllerError(ctx, application, controllerName, err) - return util.DoNotRequeue() - } - } - - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } - - // Clear out unused certs - certificates, err := r.GetSkiperatorOwnedCertificates(ctx) - - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - // Could we get in trouble with shouldReconcile here? I'm not entirely sure - for _, certificate := range certificates.Items { - - shouldReconcile, err := r.ShouldReconcile(ctx, &certificate) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if !shouldReconcile { - continue - } - - certificateInApplicationSpecIndex := slices.IndexFunc(hosts, func(h skiperatorv1alpha1.Host) bool { - if h.UsesCustomCert() { - return false - } - certificateName := fmt.Sprintf("%s-%s-ingress-%x", application.Namespace, application.Name, util.GenerateHashFromName(h.Hostname)) - return certificate.Name == certificateName - }) - certificateInApplicationSpec := certificateInApplicationSpecIndex != -1 - if certificateInApplicationSpec { - continue - } - - // We want to delete certificate which are not in the spec, but still "owned" by the application. - // This should be the case for any certificate not in spec from the earlier continue, if the name still matches --ingress-* - if !r.IsApplicationsCertificate(ctx, *application, certificate) { - continue - } - - // Delete the rest - err = r.GetClient().Delete(ctx, &certificate) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} - -func getLabels(certificate certmanagerv1.Certificate, application *skiperatorv1alpha1.Application) map[string]string { - certLabels := certificate.Labels - if len(certLabels) == 0 { - certLabels = make(map[string]string) - } - certLabels["app.kubernetes.io/managed-by"] = "skiperator" - - // TODO Find better label names here - certLabels["skiperator.skiperator.no/controller"] = "application" - certLabels["application.skiperator.no/app-name"] = application.Name - certLabels["application.skiperator.no/app-namespace"] = application.Namespace - - return certLabels -} - -func (r *ApplicationReconciler) GetSkiperatorOwnedCertificates(context context.Context) (certmanagerv1.CertificateList, error) { - certificates := certmanagerv1.CertificateList{} - err := r.GetClient().List(context, &certificates, client.MatchingLabels{ - "app.kubernetes.io/managed-by": "skiperator", - }) - - return certificates, err -} - -func (r *ApplicationReconciler) IsApplicationsCertificate(context context.Context, application skiperatorv1alpha1.Application, certificate certmanagerv1.Certificate) bool { - applicationNamespacedName := application.Namespace + "-" + application.Name - certNameMatchesApplicationNamespacedName, _ := regexp.MatchString("^"+applicationNamespacedName+"-ingress-.+$", certificate.Name) - - return certNameMatchesApplicationNamespacedName -} diff --git a/controllers/application/configmap.go b/controllers/application/configmap.go deleted file mode 100644 index 3bfd9b29..00000000 --- a/controllers/application/configmap.go +++ /dev/null @@ -1,108 +0,0 @@ -package applicationcontroller - -import ( - "context" - "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type Config struct { - Type string `json:"type"` - Audience string `json:"audience"` - ServiceAccountImpersonationUrl string `json:"service_account_impersonation_url"` - SubjectTokenType string `json:"subject_token_type"` - TokenUrl string `json:"token_url"` - CredentialSource CredentialSource `json:"credential_source"` -} -type CredentialSource struct { - File string `json:"file"` -} - -var controllerName = "ConfigMap" - -func (r *ApplicationReconciler) reconcileConfigMap(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - r.SetControllerProgressing(ctx, application, controllerName) - - if util.IsGCPAuthEnabled(application.Spec.GCP) { - gcpIdentityConfigMapNamespacedName := types.NamespacedName{Namespace: "skiperator-system", Name: "gcp-identity-config"} - gcpIdentityConfigMap, err := util.GetConfigMap(r.GetClient(), ctx, gcpIdentityConfigMapNamespacedName) - - if !util.ErrIsMissingOrNil( - r.GetRecorder(), - err, - "Cannot find configmap named "+gcpIdentityConfigMapNamespacedName.Name+" in namespace "+gcpIdentityConfigMapNamespacedName.Namespace, - application, - ) { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - err = r.setupGCPAuthConfigMap(ctx, gcpIdentityConfigMap, application) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } else { - gcpAuthConfigMap := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: application.Namespace, - Name: gcp.GetGCPConfigMapName(application.Name), - }, - } - err := client.IgnoreNotFound(r.GetClient().Delete(ctx, &gcpAuthConfigMap)) - if err != nil { - return util.RequeueWithError(err) - } - - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, nil) - - return util.DoNotRequeue() - -} - -func (r *ApplicationReconciler) setupGCPAuthConfigMap(ctx context.Context, gcpIdentityConfigMap corev1.ConfigMap, application *skiperatorv1alpha1.Application) error { - - gcpAuthConfigMapName := gcp.GetGCPConfigMapName(application.Name) - gcpAuthConfigMap, err := gcp.GetGoogleServiceAccountCredentialsConfigMap( - ctx, - application.Namespace, - gcpAuthConfigMapName, - application.Spec.GCP.Auth.ServiceAccount, - gcpIdentityConfigMap, - ) - if err != nil { - return err - } - - credentialsBytes := gcpAuthConfigMap.Data["config"] - - shouldReconcile, err := r.ShouldReconcile(ctx, &gcpAuthConfigMap) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return err - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &gcpAuthConfigMap, func() error { - // Set application as owner of the configmap - err := ctrlutil.SetControllerReference(application, &gcpAuthConfigMap, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - r.SetLabelsFromApplication(&gcpAuthConfigMap, *application) - gcpAuthConfigMap.Data["config"] = credentialsBytes - return nil - }) - - return err -} diff --git a/controllers/application/controller.go b/controllers/application/controller.go deleted file mode 100644 index c8a60deb..00000000 --- a/controllers/application/controller.go +++ /dev/null @@ -1,426 +0,0 @@ -package applicationcontroller - -import ( - "context" - "fmt" - "k8s.io/apimachinery/pkg/types" - "regexp" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - policyv1 "k8s.io/api/policy/v1" - - certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - nais_io_v1 "github.com/nais/liberator/pkg/apis/nais.io/v1" - "golang.org/x/exp/maps" - - "github.com/kartverket/skiperator/pkg/util" - pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" - appsv1 "k8s.io/api/apps/v1" - autoscalingv2 "k8s.io/api/autoscaling/v2" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/validation/field" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=applications;applications/status,verbs=get;list;watch;update -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch -// +kubebuilder:rbac:groups=core,resources=services;configmaps;serviceaccounts,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=networking.istio.io,resources=gateways;serviceentries;virtualservices,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=security.istio.io,resources=peerauthentications;authorizationpolicies,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=podmonitors,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=nais.io,resources=maskinportenclients;idportenclients,verbs=get;list;watch;create;update;patch;delete - -type ApplicationReconciler struct { - util.ReconcilerBase -} - -const applicationFinalizer = "skip.statkart.no/finalizer" - -var hostMatchExpression = regexp.MustCompile(`^([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}$`) - -func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&skiperatorv1alpha1.Application{}). - Owns(&appsv1.Deployment{}). - Owns(&corev1.Service{}). - Owns(&corev1.ConfigMap{}). - Owns(&networkingv1beta1.ServiceEntry{}). - Owns(&networkingv1beta1.Gateway{}, builder.WithPredicates( - util.MatchesPredicate[*networkingv1beta1.Gateway](isIngressGateway), - )). - Owns(&autoscalingv2.HorizontalPodAutoscaler{}). - Owns(&networkingv1beta1.VirtualService{}). - Owns(&securityv1beta1.PeerAuthentication{}). - Owns(&corev1.ServiceAccount{}). - Owns(&policyv1.PodDisruptionBudget{}). - Owns(&networkingv1.NetworkPolicy{}). - Owns(&securityv1beta1.AuthorizationPolicy{}). - Owns(&nais_io_v1.MaskinportenClient{}). - Owns(&nais_io_v1.IDPortenClient{}). - Owns(&pov1.ServiceMonitor{}). - Watches(&certmanagerv1.Certificate{}, handler.EnqueueRequestsFromMapFunc(r.SkiperatorOwnedCertRequests)). - WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})). - Complete(r) -} - -func (r *ApplicationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - application := &skiperatorv1alpha1.Application{} - err := r.GetClient().Get(ctx, req.NamespacedName, application) - - if errors.IsNotFound(err) { - return util.DoNotRequeue() - } else if err != nil { - r.EmitWarningEvent(application, "ReconcileStartFail", "something went wrong fetching the application, it might have been deleted") - return util.RequeueWithError(err) - } - - isApplicationMarkedToBeDeleted := application.GetDeletionTimestamp() != nil - if isApplicationMarkedToBeDeleted { - if ctrlutil.ContainsFinalizer(application, applicationFinalizer) { - if err := r.finalizeApplication(ctx, application); err != nil { - return ctrl.Result{}, err - } - - ctrlutil.RemoveFinalizer(application, applicationFinalizer) - err := r.GetClient().Update(ctx, application) - if err != nil { - return ctrl.Result{}, err - } - } - } - - err = r.validateApplicationSpec(application) - if err != nil { - r.EmitNormalEvent(application, "InvalidApplication", fmt.Sprintf("Application %v failed validation and was rejected, error: %s", application.Name, err.Error())) - return util.RequeueWithError(err) - } - - tmpApplication := application.DeepCopy() - application.FillDefaultsSpec() - if !ctrlutil.ContainsFinalizer(application, applicationFinalizer) { - ctrlutil.AddFinalizer(application, applicationFinalizer) - } - - if len(application.Labels) == 0 { - application.Labels = application.Spec.Labels - } else { - aggregateLabels := application.Labels - maps.Copy(aggregateLabels, application.Spec.Labels) - application.Labels = aggregateLabels - } - - // Add team label - if len(application.Spec.Team) == 0 { - if name, err := r.teamNameForNamespace(ctx, application); err == nil { - application.Spec.Team = name - } - } - - application.FillDefaultsStatus() - - specDiff, err := util.GetObjectDiff(tmpApplication.Spec, application.Spec) - if err != nil { - return util.RequeueWithError(err) - } - - statusDiff, err := util.GetObjectDiff(tmpApplication.Status, application.Status) - if err != nil { - return util.RequeueWithError(err) - } - - // If we update the Application initially on applied defaults before starting reconciling resources we allow all - // updates to be visible even though the controllerDuties may take some time. - if len(statusDiff) > 0 { - err := r.GetClient().Status().Update(ctx, application) - return reconcile.Result{Requeue: true}, err - } - - // Finalizer check is due to a bug when updating using controller-runtime - // See https://github.com/kubernetes-sigs/controller-runtime/issues/2453 - if len(specDiff) > 0 || (!ctrlutil.ContainsFinalizer(tmpApplication, applicationFinalizer) && ctrlutil.ContainsFinalizer(application, applicationFinalizer)) { - err := r.GetClient().Update(ctx, application) - return reconcile.Result{Requeue: true}, err - } - - r.EmitNormalEvent(application, "ReconcileStart", fmt.Sprintf("Application %v has started reconciliation loop", application.Name)) - - controllerDuties := []func(context.Context, *skiperatorv1alpha1.Application) (reconcile.Result, error){ - r.reconcileCertificate, - r.reconcileService, - r.reconcileConfigMap, - r.reconcileEgressServiceEntry, - r.reconcileIngressGateway, - r.reconcileIngressVirtualService, - r.reconcileHorizontalPodAutoscaler, - r.reconcilePeerAuthentication, - r.reconcileServiceAccount, - r.reconcileNetworkPolicy, - r.reconcileAuthorizationPolicy, - r.reconcilePodDisruptionBudget, - r.reconcileServiceMonitor, - r.reconcileIDPorten, - r.reconcileMaskinporten, - r.reconcileDeployment, - } - - for _, fn := range controllerDuties { - res, err := fn(ctx, application) - if err != nil { - r.GetClient().Status().Update(ctx, application) - return res, err - } else if res.RequeueAfter > 0 || res.Requeue { - r.GetClient().Status().Update(ctx, application) - return res, nil - } - } - r.GetClient().Status().Update(ctx, application) - r.EmitNormalEvent(application, "ReconcileEnd", fmt.Sprintf("Application %v has finished reconciliation loop", application.Name)) - - return util.RequeueWithError(err) -} - -func (r *ApplicationReconciler) teamNameForNamespace(ctx context.Context, app *skiperatorv1alpha1.Application) (string, error) { - ns := &corev1.Namespace{} - if err := r.GetClient().Get(ctx, types.NamespacedName{Name: app.Namespace}, ns); err != nil { - return "", err - } - - teamValue := ns.Labels["team"] - if len(teamValue) > 0 { - return teamValue, nil - } - - return "", fmt.Errorf("missing value for team label") -} - -func (r *ApplicationReconciler) finalizeApplication(ctx context.Context, application *skiperatorv1alpha1.Application) error { - certificates, err := r.GetSkiperatorOwnedCertificates(ctx) - if err != nil { - return err - } - - for _, certificate := range certificates.Items { - if r.IsApplicationsCertificate(ctx, *application, certificate) { - err = r.GetClient().Delete(ctx, &certificate) - err = client.IgnoreNotFound(err) - if err != nil { - return err - } - } - - } - return err -} - -func (r *ApplicationReconciler) validateApplicationSpec(application *skiperatorv1alpha1.Application) error { - validationFunctions := []func(application *skiperatorv1alpha1.Application) error{ - ValidateIngresses, - } - - for _, validationFunction := range validationFunctions { - err := validationFunction(application) - - if err != nil { - return err - } - } - - return nil -} - -// Name in the form of "servicemonitors.monitoring.coreos.com". -func (r *ApplicationReconciler) isCrdPresent(ctx context.Context, name string) bool { - result, err := r.GetApiExtensionsClient().ApiextensionsV1().CustomResourceDefinitions().Get(ctx, name, metav1.GetOptions{}) - if err != nil || result == nil { - return false - } - - return true -} - -func ValidateIngresses(application *skiperatorv1alpha1.Application) error { - var err error - hosts, err := application.Spec.Hosts() - if err != nil { - return err - } - - // TODO: Remove/rewrite? - for _, h := range hosts { - if !hostMatchExpression.MatchString(h.Hostname) { - errMessage := fmt.Sprintf("ingress with value '%s' was not valid. ingress must be lower case, contain no spaces, be a non-empty string, and have a hostname/domain separated by a period", h.Hostname) - return errors.NewInvalid(application.GroupVersionKind().GroupKind(), application.Name, field.ErrorList{ - field.Invalid(field.NewPath("application").Child("spec").Child("ingresses"), application.Spec.Ingresses, errMessage), - }) - } - } - - return nil -} - -func (r *ApplicationReconciler) manageControllerStatus(context context.Context, app *skiperatorv1alpha1.Application, controller string, statusName skiperatorv1alpha1.StatusNames, message string) (reconcile.Result, error) { - app.UpdateControllerStatus(controller, message, statusName) - return util.DoNotRequeue() -} - -func (r *ApplicationReconciler) manageControllerStatusError(context context.Context, app *skiperatorv1alpha1.Application, controller string, issue error) (reconcile.Result, error) { - app.UpdateControllerStatus(controller, issue.Error(), skiperatorv1alpha1.ERROR) - r.EmitWarningEvent(app, "ControllerFault", fmt.Sprintf("%v controller experienced an error: %v", controller, issue.Error())) - return util.RequeueWithError(issue) -} - -func (r *ApplicationReconciler) SetControllerPending(context context.Context, app *skiperatorv1alpha1.Application, controller string) (reconcile.Result, error) { - message := controller + " has been initialized and is pending Skiperator startup" - - return r.manageControllerStatus(context, app, controller, skiperatorv1alpha1.PENDING, message) -} - -func (r *ApplicationReconciler) SetControllerProgressing(context context.Context, app *skiperatorv1alpha1.Application, controller string) (reconcile.Result, error) { - message := controller + " has started sync" - - return r.manageControllerStatus(context, app, controller, skiperatorv1alpha1.PROGRESSING, message) -} - -func (r *ApplicationReconciler) SetControllerSynced(context context.Context, app *skiperatorv1alpha1.Application, controller string) (reconcile.Result, error) { - message := controller + " has finished synchronizing" - - return r.manageControllerStatus(context, app, controller, skiperatorv1alpha1.SYNCED, message) -} - -func (r *ApplicationReconciler) SetControllerError(context context.Context, app *skiperatorv1alpha1.Application, controller string, issue error) (reconcile.Result, error) { - return r.manageControllerStatusError(context, app, controller, issue) -} - -func (r *ApplicationReconciler) SetControllerFinishedOutcome(context context.Context, app *skiperatorv1alpha1.Application, controllerName string, issue error) (reconcile.Result, error) { - if issue != nil { - return r.manageControllerStatusError(context, app, controllerName, issue) - } - - return r.SetControllerSynced(context, app, controllerName) -} - -type ControllerResources string - -const ( - DEPLOYMENT ControllerResources = "Deployment" - POD ControllerResources = "Pod" - SERVICE ControllerResources = "Service" - SERVICEACCOUNT ControllerResources = "ServiceAccount" - CONFIGMAP ControllerResources = "ConfigMap" - NETWORKPOLICY ControllerResources = "NetworkPolicy" - GATEWAY ControllerResources = "Gateway" - SERVICEENTRY ControllerResources = "ServiceEntry" - VIRTUALSERVICE ControllerResources = "VirtualService" - PEERAUTHENTICATION ControllerResources = "PeerAuthentication" - HORIZONTALPODAUTOSCALER ControllerResources = "HorizontalPodAutoscaler" - CERTIFICATE ControllerResources = "Certificate" - AUTHORIZATIONPOLICY ControllerResources = "AuthorizationPolicy" -) - -var GroupKindFromControllerResource = map[string]metav1.GroupKind{ - "deployment": { - Group: "apps", - Kind: string(DEPLOYMENT), - }, - "pod": { - Group: "", - Kind: string(POD), - }, - "service": { - Group: "", - Kind: string(SERVICE), - }, - "serviceaccount": { - Group: "", - Kind: string(SERVICEACCOUNT), - }, - "configmaps": { - Group: "", - Kind: string(CONFIGMAP), - }, - "networkpolicy": { - Group: "networking.k8s.io", - Kind: string(NETWORKPOLICY), - }, - "gateway": { - Group: "networking.istio.io", - Kind: string(GATEWAY), - }, - "serviceentry": { - Group: "networking.istio.io", - Kind: string(SERVICEENTRY), - }, - "virtualservice": { - Group: "networking.istio.io", - Kind: string(VIRTUALSERVICE), - }, - "peerauthentication": { - Group: "security.istio.io", - Kind: string(PEERAUTHENTICATION), - }, - "horizontalpodautoscaler": { - Group: "autoscaling", - Kind: string(HORIZONTALPODAUTOSCALER), - }, - "certificate": { - Group: "cert-manager.io", - Kind: string(CERTIFICATE), - }, - "authorizationpolicy": { - Group: "security.istio.io", - Kind: string(AUTHORIZATIONPOLICY), - }, -} - -func (r *ApplicationReconciler) setResourceLabelsIfApplies(obj client.Object, app skiperatorv1alpha1.Application) { - objectGroupVersionKind := obj.GetObjectKind().GroupVersionKind() - - for controllerResource, resourceLabels := range app.Spec.ResourceLabels { - resourceLabelGroupKind, present := GroupKindFromControllerResource[strings.ToLower(controllerResource)] - if present { - if strings.EqualFold(objectGroupVersionKind.Group, resourceLabelGroupKind.Group) && strings.EqualFold(objectGroupVersionKind.Kind, resourceLabelGroupKind.Kind) { - objectLabels := obj.GetLabels() - if len(objectLabels) == 0 { - objectLabels = make(map[string]string) - } - maps.Copy(objectLabels, resourceLabels) - obj.SetLabels(objectLabels) - } - } else { - r.EmitWarningEvent(&app, "MistypedLabel", fmt.Sprintf("could not find according Kind for Resource %v, make sure your resource is spelled correctly", controllerResource)) - } - } -} - -func (r *ApplicationReconciler) SetLabelsFromApplication(object client.Object, app skiperatorv1alpha1.Application) { - labels := object.GetLabels() - if len(labels) == 0 { - labels = make(map[string]string) - } - if app.Spec.Labels != nil { - maps.Copy(labels, app.Spec.Labels) - object.SetLabels(labels) - } - - r.setResourceLabelsIfApplies(object, app) -} diff --git a/controllers/application/egress_service_entry.go b/controllers/application/egress_service_entry.go deleted file mode 100644 index 3bc94213..00000000 --- a/controllers/application/egress_service_entry.go +++ /dev/null @@ -1,92 +0,0 @@ -package applicationcontroller - -import ( - "context" - "fmt" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/resourcegenerator/istio" - "github.com/kartverket/skiperator/pkg/util" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileEgressServiceEntry(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "EgressServiceEntry" - r.SetControllerProgressing(ctx, application, controllerName) - - serviceEntries, err := istio.GetServiceEntries(application.Spec.AccessPolicy, application) - if err != nil { - r.EmitWarningEvent(application, "ServiceEntryError", fmt.Sprintf("something went wrong when fetching service entries: %v", err.Error())) - - return util.RequeueWithError(err) - } - - for _, serviceEntry := range serviceEntries { - // CreateOrPatch gets the object (from cache) before the mutating function is run, masquerading actual changes - // Restoring the Spec from a copy within the mutating func fixes this - desiredServiceEntry := serviceEntry.DeepCopy() - - shouldReconcile, err := r.ShouldReconcile(ctx, &serviceEntry) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if !shouldReconcile { - continue - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &serviceEntry, func() error { - serviceEntry.Spec = desiredServiceEntry.Spec - // Set application as owner of the service entry - err := ctrlutil.SetControllerReference(application, &serviceEntry, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - r.SetLabelsFromApplication(&serviceEntry, *application) - util.SetCommonAnnotations(&serviceEntry) - - return nil - }) - - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } - - serviceEntriesInNamespace := networkingv1beta1.ServiceEntryList{} - err = r.GetClient().List(ctx, &serviceEntriesInNamespace, client.InNamespace(application.Namespace)) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - serviceEntriesToDelete := istio.GetServiceEntriesToDelete(serviceEntriesInNamespace.Items, application.Name, serviceEntries) - for _, serviceEntry := range serviceEntriesToDelete { - shouldReconcile, err := r.ShouldReconcile(ctx, &serviceEntry) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if !shouldReconcile { - continue - } - - err = r.GetClient().Delete(ctx, &serviceEntry) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} diff --git a/controllers/application/horizontal_pod_autoscaler.go b/controllers/application/horizontal_pod_autoscaler.go deleted file mode 100644 index 09471b64..00000000 --- a/controllers/application/horizontal_pod_autoscaler.go +++ /dev/null @@ -1,81 +0,0 @@ -package applicationcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - autoscalingv2 "k8s.io/api/autoscaling/v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileHorizontalPodAutoscaler(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "HorizontalPodAutoScaler" - r.SetControllerProgressing(ctx, application, controllerName) - - horizontalPodAutoscaler := autoscalingv2.HorizontalPodAutoscaler{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} - shouldReconcile, err := r.ShouldReconcile(ctx, &horizontalPodAutoscaler) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if shouldScaleToZero(application.Spec.Replicas) || !skiperatorv1alpha1.IsHPAEnabled(application.Spec.Replicas) { - err := r.GetClient().Delete(ctx, &horizontalPodAutoscaler) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - r.SetControllerFinishedOutcome(ctx, application, controllerName, nil) - return util.DoNotRequeue() - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &horizontalPodAutoscaler, func() error { - // Set application as owner of the horizontal pod autoscaler - err := ctrlutil.SetControllerReference(application, &horizontalPodAutoscaler, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&horizontalPodAutoscaler, *application) - util.SetCommonAnnotations(&horizontalPodAutoscaler) - - replicas, err := skiperatorv1alpha1.GetScalingReplicas(application.Spec.Replicas) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - horizontalPodAutoscaler.Spec = autoscalingv2.HorizontalPodAutoscalerSpec{ - ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: application.Name, - }, - MinReplicas: util.PointTo(int32(replicas.Min)), - MaxReplicas: int32(replicas.Max), - Metrics: []autoscalingv2.MetricSpec{ - { - Type: autoscalingv2.ResourceMetricSourceType, - Resource: &autoscalingv2.ResourceMetricSource{ - Name: "cpu", - Target: autoscalingv2.MetricTarget{ - Type: autoscalingv2.UtilizationMetricType, - AverageUtilization: util.PointTo(int32(replicas.TargetCpuUtilization)), - }, - }, - }, - }, - } - - return nil - }) - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} diff --git a/controllers/application/ingress_gateway.go b/controllers/application/ingress_gateway.go deleted file mode 100644 index 9c19b86a..00000000 --- a/controllers/application/ingress_gateway.go +++ /dev/null @@ -1,161 +0,0 @@ -package applicationcontroller - -import ( - "context" - "fmt" - "regexp" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - "golang.org/x/exp/slices" - networkingv1beta1api "istio.io/api/networking/v1beta1" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileIngressGateway(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "IngressGateway" - r.SetControllerProgressing(ctx, application, controllerName) - - // Generate separate gateway for each ingress - hosts, err := application.Spec.Hosts() - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.DoNotRequeue() - } - - // Generate separate gateway for each ingress - for _, h := range hosts { - - name := fmt.Sprintf("%s-ingress-%x", application.Name, util.GenerateHashFromName(h.Hostname)) - - gateway := networkingv1beta1.Gateway{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: name}} - shouldReconcile, err := r.ShouldReconcile(ctx, &gateway) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if !shouldReconcile { - continue - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &gateway, func() error { - // Set application as owner of the gateway - err := ctrlutil.SetControllerReference(application, &gateway, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&gateway, *application) - util.SetCommonAnnotations(&gateway) - - gateway.Spec.Selector = util.GetIstioGatewayLabelSelector(h.Hostname) - - gatewayServersToAdd := []*networkingv1beta1api.Server{} - - baseHttpGatewayServer := &networkingv1beta1api.Server{ - Hosts: []string{h.Hostname}, - Port: &networkingv1beta1api.Port{ - Number: 80, - Name: "http", - Protocol: "HTTP", - }, - } - - determinedCredentialName := application.Namespace + "-" + name - if h.UsesCustomCert() { - determinedCredentialName = *h.CustomCertificateSecret - } - - httpsGatewayServer := &networkingv1beta1api.Server{ - Hosts: []string{h.Hostname}, - Port: &networkingv1beta1api.Port{ - Number: 443, - Name: "https", - Protocol: "HTTPS", - }, - Tls: &networkingv1beta1api.ServerTLSSettings{ - Mode: networkingv1beta1api.ServerTLSSettings_SIMPLE, - CredentialName: determinedCredentialName, - }, - } - - gatewayServersToAdd = append(gatewayServersToAdd, baseHttpGatewayServer, httpsGatewayServer) - - gateway.Spec.Servers = gatewayServersToAdd - - return nil - }) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } - - // Clear out unused gateways - gateways := networkingv1beta1.GatewayList{} - err = r.GetClient().List(ctx, &gateways, client.InNamespace(application.Namespace)) - - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - for _, gateway := range gateways.Items { - shouldReconcile, err := r.ShouldReconcile(ctx, gateway) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if !shouldReconcile { - continue - } - - // Skip unrelated gateways - if !isIngressGateway(gateway) { - continue - } - - applicationOwnerIndex := slices.IndexFunc(gateway.GetOwnerReferences(), func(ownerReference metav1.OwnerReference) bool { - return ownerReference.Name == application.Name - }) - gatewayOwnedByThisApplication := applicationOwnerIndex != -1 - if !gatewayOwnedByThisApplication { - continue - } - - ingressGatewayInApplicationSpecIndex := slices.IndexFunc(hosts, func(h skiperatorv1alpha1.Host) bool { - ingressName := fmt.Sprintf("%s-ingress-%x", application.Name, util.GenerateHashFromName(h.Hostname)) - return gateway.Name == ingressName - }) - ingressGatewayInApplicationSpec := ingressGatewayInApplicationSpecIndex != -1 - if ingressGatewayInApplicationSpec { - continue - } - - // Delete the rest - err = r.GetClient().Delete(ctx, gateway) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} - -// Filter for gateways named like *-ingress-* -func isIngressGateway(gateway *networkingv1beta1.Gateway) bool { - match, _ := regexp.MatchString("^.*-ingress-.*$", gateway.Name) - - return match -} diff --git a/controllers/application/ingress_virtual_service.go b/controllers/application/ingress_virtual_service.go deleted file mode 100644 index 0c2ab0d1..00000000 --- a/controllers/application/ingress_virtual_service.go +++ /dev/null @@ -1,121 +0,0 @@ -package applicationcontroller - -import ( - "context" - "fmt" - "github.com/kartverket/skiperator/pkg/util" - "hash/fnv" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - networkingv1beta1api "istio.io/api/networking/v1beta1" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileIngressVirtualService(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "IngressVirtualService" - r.SetControllerProgressing(ctx, application, controllerName) - - virtualService := networkingv1beta1.VirtualService{ - ObjectMeta: v1.ObjectMeta{ - Name: application.Name + "-ingress", - Namespace: application.Namespace, - }, - } - - var err error - - if len(application.Spec.Ingresses) > 0 { - shouldReconcile, err := r.ShouldReconcile(ctx, &virtualService) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &virtualService, func() error { - - err := ctrlutil.SetControllerReference(application, &virtualService, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - virtualService.Spec = networkingv1beta1api.VirtualService{ - ExportTo: []string{".", "istio-system", "istio-gateways"}, - Gateways: r.getGatewaysFromApplication(application), - Hosts: application.Spec.Ingresses, - Http: []*networkingv1beta1api.HTTPRoute{}, - } - - if application.Spec.RedirectToHTTPS != nil && *application.Spec.RedirectToHTTPS { - virtualService.Spec.Http = append(virtualService.Spec.Http, &networkingv1beta1api.HTTPRoute{ - Name: "redirect-to-https", - Match: []*networkingv1beta1api.HTTPMatchRequest{ - { - WithoutHeaders: map[string]*networkingv1beta1api.StringMatch{ - ":path": { - MatchType: &networkingv1beta1api.StringMatch_Prefix{ - Prefix: "/.well-known/acme-challenge/", - }, - }, - }, - Port: 80, - }, - }, - Redirect: &networkingv1beta1api.HTTPRedirect{ - Scheme: "https", - RedirectCode: 308, - }, - }) - } - - virtualService.Spec.Http = append(virtualService.Spec.Http, &networkingv1beta1api.HTTPRoute{ - Name: "default-app-route", - Route: []*networkingv1beta1api.HTTPRouteDestination{ - { - Destination: &networkingv1beta1api.Destination{ - Host: application.Name, - Port: &networkingv1beta1api.PortSelector{ - Number: uint32(application.Spec.Port), - }, - }, - }, - }, - }) - - return nil - }) - - } else { - err = r.GetClient().Delete(ctx, &virtualService) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } - - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} - -func (r *ApplicationReconciler) getGatewaysFromApplication(application *skiperatorv1alpha1.Application) []string { - gateways := make([]string, 0, len(application.Spec.Ingresses)) - for _, hostname := range application.Spec.Ingresses { - // Generate gateway name - hash := fnv.New64() - _, _ = hash.Write([]byte(hostname)) - name := fmt.Sprintf("%s-ingress-%x", application.Name, hash.Sum64()) - gateways = append(gateways, name) - } - - return gateways -} diff --git a/controllers/application/maskinporten.go b/controllers/application/maskinporten.go deleted file mode 100644 index 55ee1891..00000000 --- a/controllers/application/maskinporten.go +++ /dev/null @@ -1,97 +0,0 @@ -package applicationcontroller - -import ( - "context" - "github.com/kartverket/skiperator/api/v1alpha1/digdirator" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - naisiov1 "github.com/nais/liberator/pkg/apis/nais.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileMaskinporten(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "Maskinporten" - r.SetControllerProgressing(ctx, application, controllerName) - - var err error - - maskinporten := naisiov1.MaskinportenClient{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "nais.io/v1", - Kind: "MaskinportenClient", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: application.Namespace, - Name: application.Name, - }, - } - - if maskinportenSpecifiedInSpec(application.Spec.Maskinporten) { - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &maskinporten, func() error { - err := ctrlutil.SetControllerReference(application, &maskinporten, r.GetScheme()) - if err != nil { - return err - } - - r.SetLabelsFromApplication(&maskinporten, *application) - util.SetCommonAnnotations(&maskinporten) - - maskinporten.Spec, err = getMaskinportenSpec(application) - return err - }) - - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return reconcile.Result{}, err - } - } else { - err = r.GetClient().Delete(ctx, &maskinporten) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return reconcile.Result{}, err - } - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return reconcile.Result{}, err -} - -func getMaskinportenSpec(application *skiperatorv1alpha1.Application) (naisiov1.MaskinportenClientSpec, error) { - secretName, err := getMaskinportenSecretName(application.Name) - if err != nil { - return naisiov1.MaskinportenClientSpec{}, err - } - - scopes := naisiov1.MaskinportenScope{} - if application.Spec.Maskinporten.Scopes != nil { - scopes = *application.Spec.Maskinporten.Scopes - } - - return naisiov1.MaskinportenClientSpec{ - ClientName: getClientNameMaskinporten(application.Name, application.Spec.Maskinporten), - SecretName: secretName, - Scopes: scopes, - }, nil -} - -func getClientNameMaskinporten(applicationName string, maskinportenSettings *digdirator.Maskinporten) string { - if maskinportenSettings.ClientName != nil { - return *maskinportenSettings.ClientName - } - - return applicationName -} - -func maskinportenSpecifiedInSpec(maskinportenSettings *digdirator.Maskinporten) bool { - return maskinportenSettings != nil && maskinportenSettings.Enabled -} - -func getMaskinportenSecretName(name string) (string, error) { - return util.GetSecretName("maskinporten", name) -} diff --git a/controllers/application/network_policy.go b/controllers/application/network_policy.go deleted file mode 100644 index cf66939d..00000000 --- a/controllers/application/network_policy.go +++ /dev/null @@ -1,85 +0,0 @@ -package applicationcontroller - -import ( - "context" - - "github.com/kartverket/skiperator/pkg/resourcegenerator/networking" - "sigs.k8s.io/controller-runtime/pkg/client" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileNetworkPolicy(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "NetworkPolicy" - r.SetControllerProgressing(ctx, application, controllerName) - - egressServices, err := r.GetEgressServices(ctx, application, application.Spec.AccessPolicy) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - namespaces, err := r.GetNamespaces(ctx, application) - if err != nil { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - networkPolicy := networkingv1.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: application.Namespace, - Name: application.Name, - }, - } - - shouldReconcile, err := r.ShouldReconcile(ctx, &networkPolicy) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - netpolSpec := networking.CreateNetPolSpec( - networking.NetPolOpts{ - AccessPolicy: application.Spec.AccessPolicy, - Ingresses: &application.Spec.Ingresses, - Port: &application.Spec.Port, - Namespace: application.Namespace, - Namespaces: &namespaces, - Name: application.Name, - RelatedServices: &egressServices, - PrometheusConfig: application.Spec.Prometheus, - IstioEnabled: r.IsIstioEnabledForNamespace(ctx, application.Namespace), - }, - ) - - if netpolSpec == nil { - err = client.IgnoreNotFound(r.GetClient().Delete(ctx, &networkPolicy)) - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &networkPolicy, func() error { - // Set application as owner of the network policy - err := ctrlutil.SetControllerReference(application, &networkPolicy, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&networkPolicy, *application) - util.SetCommonAnnotations(&networkPolicy) - - networkPolicy.Spec = *netpolSpec - - return nil - }) - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} diff --git a/controllers/application/peer_authentication.go b/controllers/application/peer_authentication.go deleted file mode 100644 index ca553a11..00000000 --- a/controllers/application/peer_authentication.go +++ /dev/null @@ -1,45 +0,0 @@ -package applicationcontroller - -import ( - "context" - "github.com/kartverket/skiperator/pkg/resourcegenerator/istio" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcilePeerAuthentication(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "PeerAuthentication" - r.SetControllerProgressing(ctx, application, controllerName) - - peerAuthentication := securityv1beta1.PeerAuthentication{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} - shouldReconcile, err := r.ShouldReconcile(ctx, &peerAuthentication) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &peerAuthentication, func() error { - // Set application as owner of the peer authentication - err := ctrlutil.SetControllerReference(application, &peerAuthentication, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - peerAuthentication.Spec = istio.GetPeerAuthentication(application.Name) - - r.SetLabelsFromApplication(&peerAuthentication, *application) - util.SetCommonAnnotations(&peerAuthentication) - - return nil - }) - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} diff --git a/controllers/application/pod_disruption_budget.go b/controllers/application/pod_disruption_budget.go deleted file mode 100644 index 077a2015..00000000 --- a/controllers/application/pod_disruption_budget.go +++ /dev/null @@ -1,92 +0,0 @@ -package applicationcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/k8sfeatures" - "github.com/kartverket/skiperator/pkg/util" - policyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcilePodDisruptionBudget(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "PodDisruptionBudget" - _, _ = r.SetControllerProgressing(ctx, application, controllerName) - - pdb := policyv1.PodDisruptionBudget{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} - shouldReconcile, err := r.ShouldReconcile(ctx, &pdb) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if *application.Spec.EnablePDB { - _, err := ctrlutil.CreateOrPatch(ctx, r.GetClient(), &pdb, func() error { - // Set application as owner of the PDB - err := ctrlutil.SetControllerReference(application, &pdb, r.GetScheme()) - if err != nil { - _, _ = r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&pdb, *application) - util.SetCommonAnnotations(&pdb) - var minReplicas uint - - replicas, err := skiperatorv1alpha1.GetStaticReplicas(application.Spec.Replicas) - if err != nil { - replicasStruct, err := skiperatorv1alpha1.GetScalingReplicas(application.Spec.Replicas) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } else { - minReplicas = replicasStruct.Min - } - } else { - minReplicas = replicas - } - - pdb.Spec = policyv1.PodDisruptionBudgetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: util.GetPodAppSelector(application.Name), - }, - MinAvailable: determineMinAvailable(minReplicas), - } - - if k8sfeatures.EnhancedPDBAvailable() { - pdb.Spec.UnhealthyPodEvictionPolicy = util.PointTo(policyv1.AlwaysAllow) - } - - return nil - }) - - _, _ = r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } else { - err := r.GetClient().Delete(ctx, &pdb) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - _, _ = r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.DoNotRequeue() - } -} - -func determineMinAvailable(replicasAvailable uint) *intstr.IntOrString { - var value intstr.IntOrString - - if replicasAvailable > 1 { - value = intstr.FromString("50%") - } else { - intstr.FromInt(0) - } - - return &value -} diff --git a/controllers/application/service.go b/controllers/application/service.go deleted file mode 100644 index bc7bbc58..00000000 --- a/controllers/application/service.go +++ /dev/null @@ -1,110 +0,0 @@ -package applicationcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/api/v1alpha1/podtypes" - "github.com/kartverket/skiperator/pkg/util" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "strings" -) - -const defaultPortName = "http" - -var defaultPrometheusPort = corev1.ServicePort{ - Name: util.IstioMetricsPortName.StrVal, - Protocol: corev1.ProtocolTCP, - Port: util.IstioMetricsPortNumber.IntVal, - TargetPort: util.IstioMetricsPortNumber, -} - -func (r *ApplicationReconciler) reconcileService(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "Service" - r.SetControllerProgressing(ctx, application, controllerName) - - service := corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} - shouldReconcile, err := r.ShouldReconcile(ctx, &service) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &service, func() error { - // Set application as owner of the service - err := ctrlutil.SetControllerReference(application, &service, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&service, *application) - util.SetCommonAnnotations(&service) - - // ServiceMonitor requires labels to be set on service to select it - labels := service.GetLabels() - if len(labels) == 0 { - labels = make(map[string]string) - } - labels["app"] = application.Name - service.SetLabels(labels) - - ports := append(getAdditionalPorts(application.Spec.AdditionalPorts), getServicePort(application.Spec.Port, application.Spec.AppProtocol)) - if r.IsIstioEnabledForNamespace(ctx, application.Namespace) { - ports = append(ports, defaultPrometheusPort) - } - - service.Spec = corev1.ServiceSpec{ - Selector: util.GetPodAppSelector(application.Name), - Type: corev1.ServiceTypeClusterIP, - Ports: ports, - } - - return nil - }) - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} - -func getAdditionalPorts(additionalPorts []podtypes.InternalPort) []corev1.ServicePort { - var ports []corev1.ServicePort - - for _, p := range additionalPorts { - ports = append(ports, corev1.ServicePort{ - Name: p.Name, - Port: p.Port, - Protocol: p.Protocol, - TargetPort: intstr.FromInt(int(p.Port)), - }) - } - - return ports -} - -func getServicePort(port int, appProtocol string) corev1.ServicePort { - var resolvedProtocol = corev1.ProtocolTCP - if strings.ToLower(appProtocol) == "udp" { - resolvedProtocol = corev1.ProtocolUDP - } - - var resolvedAppProtocol = appProtocol - if len(resolvedAppProtocol) == 0 { - resolvedAppProtocol = "http" - } else if port == 5432 { - // Legacy postgres hack - resolvedAppProtocol = "tcp" - } - - return corev1.ServicePort{ - Name: defaultPortName, - Protocol: resolvedProtocol, - AppProtocol: &resolvedAppProtocol, - Port: int32(port), - TargetPort: intstr.FromInt(port), - } -} diff --git a/controllers/application/service_account.go b/controllers/application/service_account.go deleted file mode 100644 index 282ce85f..00000000 --- a/controllers/application/service_account.go +++ /dev/null @@ -1,58 +0,0 @@ -package applicationcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - "golang.org/x/exp/maps" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ApplicationReconciler) reconcileServiceAccount(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "ServiceAccount" - r.SetControllerProgressing(ctx, application, controllerName) - - serviceAccount := corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} - - shouldReconcile, err := r.ShouldReconcile(ctx, &serviceAccount) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &serviceAccount, func() error { - // Set application as owner of the sidecar - err := ctrlutil.SetControllerReference(application, &serviceAccount, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - if util.IsCloudSqlProxyEnabled(application.Spec.GCP) { - setCloudSqlAnnotations(&serviceAccount, application) - } - - r.SetLabelsFromApplication(&serviceAccount, *application) - util.SetCommonAnnotations(&serviceAccount) - - return nil - }) - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} - -func setCloudSqlAnnotations(serviceAccount *corev1.ServiceAccount, application *skiperatorv1alpha1.Application) { - annotations := serviceAccount.GetAnnotations() - if len(annotations) == 0 { - annotations = make(map[string]string) - } - maps.Copy(annotations, map[string]string{ - "iam.gke.io/gcp-service-account": application.Spec.GCP.CloudSQLProxy.ServiceAccount, - }) - serviceAccount.SetAnnotations(annotations) -} diff --git a/controllers/application/service_monitor.go b/controllers/application/service_monitor.go deleted file mode 100644 index 764e1949..00000000 --- a/controllers/application/service_monitor.go +++ /dev/null @@ -1,91 +0,0 @@ -package applicationcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "strings" -) - -func (r *ApplicationReconciler) reconcileServiceMonitor(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "ServiceMonitor" - r.SetControllerProgressing(ctx, application, controllerName) - - if !r.isCrdPresent(ctx, "servicemonitors.monitoring.coreos.com") { - r.SetControllerFinishedOutcome(ctx, application, controllerName, nil) - return util.DoNotRequeue() - } - - serviceMonitor := pov1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{ - Namespace: application.Namespace, - Name: application.Name, - Labels: map[string]string{"instance": "primary"}, - }} - - shouldReconcile, err := r.ShouldReconcile(ctx, &serviceMonitor) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - if !r.IsIstioEnabledForNamespace(ctx, application.Namespace) { - err := client.IgnoreNotFound(r.GetClient().Delete(ctx, &serviceMonitor)) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - - r.SetControllerFinishedOutcome(ctx, application, controllerName, nil) - return util.DoNotRequeue() - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &serviceMonitor, func() error { - // Set application as owner of the service - err := ctrlutil.SetControllerReference(application, &serviceMonitor, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&serviceMonitor, *application) - util.SetCommonAnnotations(&serviceMonitor) - - serviceMonitor.Spec = pov1.ServiceMonitorSpec{ - Selector: metav1.LabelSelector{ - MatchLabels: util.GetPodAppSelector(application.Name), - }, - NamespaceSelector: pov1.NamespaceSelector{ - MatchNames: []string{application.Namespace}, - }, - Endpoints: []pov1.Endpoint{ - { - Path: util.IstioMetricsPath, - TargetPort: &util.IstioMetricsPortName, - MetricRelabelConfigs: []pov1.RelabelConfig{ - { - Action: "drop", - Regex: strings.Join(util.DefaultMetricDropList, "|"), - SourceLabels: []pov1.LabelName{"__name__"}, - }, - }, - }, - }, - } - - // Remove MetricRelabelConfigs if AllowAllMetrics is set to true - if application.Spec.Prometheus != nil && application.Spec.Prometheus.AllowAllMetrics { - serviceMonitor.Spec.Endpoints[0].MetricRelabelConfigs = nil - } - - return nil - }) - - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - - return util.RequeueWithError(err) -} diff --git a/controllers/namespace/controller.go b/controllers/namespace/controller.go deleted file mode 100644 index 15e518ba..00000000 --- a/controllers/namespace/controller.go +++ /dev/null @@ -1,85 +0,0 @@ -package namespacecontroller - -import ( - "context" - "fmt" - "github.com/kartverket/skiperator/pkg/util" - istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type NamespaceReconciler struct { - util.ReconcilerBase - Token string - Registry string -} - -func (r *NamespaceReconciler) isExcludedNamespace(ctx context.Context, namespace string) bool { - configMapNamespacedName := types.NamespacedName{Namespace: "skiperator-system", Name: "namespace-exclusions"} - - namespaceExclusionCMap, err := util.GetConfigMap(r.GetClient(), ctx, configMapNamespacedName) - if err != nil { - util.ErrDoPanic(err, "Something went wrong getting namespace-exclusion config map: %v") - } - - nameSpacesToExclude := namespaceExclusionCMap.Data - - exclusion, keyExists := nameSpacesToExclude[namespace] - - return (keyExists && exclusion == "true") -} - -//+kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch -//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=networking.istio.io,resources=sidecars,verbs=get;list;watch;create;update;patch;delete - -func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&corev1.Namespace{}). - Owns(&networkingv1.NetworkPolicy{}). - Owns(&istionetworkingv1beta1.Sidecar{}). - Owns(&corev1.Secret{}, builder.WithPredicates( - util.MatchesPredicate[*corev1.Secret](isImagePullSecret), - )). - Complete(r) -} - -func (r *NamespaceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - namespace := &corev1.Namespace{} - err := r.GetClient().Get(ctx, req.NamespacedName, namespace) - if errors.IsNotFound(err) { - return util.DoNotRequeue() - } else if err != nil { - r.EmitWarningEvent(namespace, "ReconcileStartFail", "something went wrong fetching the namespace, it might have been deleted") - - return util.RequeueWithError(err) - } - - if r.isExcludedNamespace(ctx, namespace.Name) { - return util.RequeueWithError(err) - } - - r.EmitNormalEvent(namespace, "ReconcileStart", fmt.Sprintf("Namespace %v has started reconciliation loop", namespace.Name)) - - controllerDuties := []func(context.Context, *corev1.Namespace) (reconcile.Result, error){ - r.reconcileDefaultDenyNetworkPolicy, - r.reconcileImagePullSecret, - r.reconcileSidecar, - } - - for _, fn := range controllerDuties { - if _, err := fn(ctx, namespace); err != nil { - return util.RequeueWithError(err) - } - } - - r.EmitNormalEvent(namespace, "ReconcileEnd", fmt.Sprintf("Namespace %v has finished reconciliation loop", namespace.Name)) - - return util.RequeueWithError(err) -} diff --git a/controllers/namespace/default_deny_network_policy.go b/controllers/namespace/default_deny_network_policy.go deleted file mode 100644 index 24e05f74..00000000 --- a/controllers/namespace/default_deny_network_policy.go +++ /dev/null @@ -1,148 +0,0 @@ -package namespacecontroller - -import ( - "context" - - "github.com/kartverket/skiperator/pkg/util" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *NamespaceReconciler) reconcileDefaultDenyNetworkPolicy(ctx context.Context, namespace *corev1.Namespace) (reconcile.Result, error) { - - networkPolicy := networkingv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Namespace: namespace.Name, Name: "default-deny"}} - _, err := ctrlutil.CreateOrPatch(ctx, r.GetClient(), &networkPolicy, func() error { - // Set namespace as owner of the network policy - err := ctrlutil.SetControllerReference(namespace, &networkPolicy, r.GetScheme()) - if err != nil { - return err - } - - networkPolicy.Spec = networkingv1.NetworkPolicySpec{ - PolicyTypes: []networkingv1.PolicyType{ - networkingv1.PolicyTypeIngress, - networkingv1.PolicyTypeEgress, - }, - Egress: []networkingv1.NetworkPolicyEgressRule{ - { - To: []networkingv1.NetworkPolicyPeer{ - // Egress rule for parts of internal server network - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "10.40.0.0/16", - }, - }, - // Egress rule for Internet - { - IPBlock: &networkingv1.IPBlock{ - CIDR: "0.0.0.0/0", - Except: []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"}, - }, - }, - }, - }, - // Egress rule for DNS - { - To: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": "kube-system"}, - }, - PodSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"k8s-app": "kube-dns"}, - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - // DNS Ports - { - Protocol: util.PointTo(corev1.ProtocolTCP), - Port: util.PointTo(intstr.FromInt(53)), - }, - { - Protocol: util.PointTo(corev1.ProtocolUDP), - Port: util.PointTo(intstr.FromInt(53)), - }, - }, - }, - // Egress rule for Istio XDS - { - To: []networkingv1.NetworkPolicyPeer{ - { - PodSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "istiod"}, - }, - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": "istio-system"}, - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - { - Port: util.PointTo(intstr.FromInt(15012)), - }, - }, - }, - // Egress rule for grafana-agent - { - To: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": "grafana-agent"}, - }, - PodSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/instance": "grafana-agent", - "app.kubernetes.io/name": "grafana-agent", - }, - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - { - Protocol: util.PointTo(corev1.ProtocolTCP), - Port: util.PointTo(intstr.FromInt(4317)), - }, - { - Protocol: util.PointTo(corev1.ProtocolTCP), - Port: util.PointTo(intstr.FromInt(4318)), - }, - }, - }, - // Egress rule for grafana-alloy - { - To: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": "grafana-alloy"}, - }, - PodSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/instance": "alloy", - "app.kubernetes.io/name": "alloy", - }, - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - { - Protocol: util.PointTo(corev1.ProtocolTCP), - Port: util.PointTo(intstr.FromInt(4317)), - }, - { - Protocol: util.PointTo(corev1.ProtocolTCP), - Port: util.PointTo(intstr.FromInt(4318)), - }, - }, - }, - }, - } - - return nil - }) - return util.RequeueWithError(err) -} diff --git a/controllers/namespace/image_pull_secret.go b/controllers/namespace/image_pull_secret.go deleted file mode 100644 index 9b6ed3c9..00000000 --- a/controllers/namespace/image_pull_secret.go +++ /dev/null @@ -1,58 +0,0 @@ -package namespacecontroller - -import ( - "bytes" - "context" - "encoding/json" - "github.com/kartverket/skiperator/pkg/util" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *NamespaceReconciler) reconcileImagePullSecret(ctx context.Context, namespace *corev1.Namespace) (reconcile.Result, error) { - secret := corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: namespace.Name, Name: "github-auth"}} - _, err := ctrlutil.CreateOrPatch(ctx, r.GetClient(), &secret, func() error { - // Set namespace as owner of the sidecar - err := ctrlutil.SetControllerReference(namespace, &secret, r.GetScheme()) - if err != nil { - return err - } - - secret.Type = corev1.SecretTypeDockerConfigJson - - cfg := dockerConfigJson{} - cfg.Auths = make(map[string]dockerConfigAuth, 1) - auth := dockerConfigAuth{} - auth.Auth = r.Token - cfg.Auths[r.Registry] = auth - - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - err = enc.Encode(cfg) - if err != nil { - return err - } - - secret.Data = make(map[string][]byte, 1) - secret.Data[".dockerconfigjson"] = buf.Bytes() - - return nil - }) - return util.RequeueWithError(err) -} - -// Filter for secrets named github-auth -func isImagePullSecret(secret *corev1.Secret) bool { - return secret.Name == "github-auth" -} - -type dockerConfigJson struct { - Auths map[string]dockerConfigAuth `json:"auths"` -} - -type dockerConfigAuth struct { - Auth string `json:"auth"` -} diff --git a/controllers/namespace/sidecar.go b/controllers/namespace/sidecar.go deleted file mode 100644 index de3b8bbe..00000000 --- a/controllers/namespace/sidecar.go +++ /dev/null @@ -1,33 +0,0 @@ -package namespacecontroller - -import ( - "context" - "github.com/kartverket/skiperator/pkg/util" - - networkingv1beta1api "istio.io/api/networking/v1beta1" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *NamespaceReconciler) reconcileSidecar(ctx context.Context, namespace *corev1.Namespace) (reconcile.Result, error) { - sidecar := networkingv1beta1.Sidecar{ObjectMeta: metav1.ObjectMeta{Namespace: namespace.Name, Name: "sidecar"}} - _, err := ctrlutil.CreateOrPatch(ctx, r.GetClient(), &sidecar, func() error { - // Set namespace as owner of the sidecar - err := ctrlutil.SetControllerReference(namespace, &sidecar, r.GetScheme()) - if err != nil { - return err - } - - sidecar.Spec = networkingv1beta1api.Sidecar{ - OutboundTrafficPolicy: &networkingv1beta1api.OutboundTrafficPolicy{ - Mode: networkingv1beta1api.OutboundTrafficPolicy_REGISTRY_ONLY, - }, - } - - return nil - }) - return util.RequeueWithError(err) -} diff --git a/controllers/routing/certificate.go b/controllers/routing/certificate.go deleted file mode 100644 index 1bf04bfe..00000000 --- a/controllers/routing/certificate.go +++ /dev/null @@ -1,106 +0,0 @@ -package routingcontroller - -import ( - "context" - certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" - certmanagermetav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -const IstioGatewayNamespace = "istio-gateways" - -func (r *RoutingReconciler) SkiperatorRoutingCertRequests(_ context.Context, obj client.Object) []reconcile.Request { - certificate, isCert := obj.(*certmanagerv1.Certificate) - - if !isCert { - return nil - } - - isSkiperatorRoutingOwned := certificate.Labels["app.kubernetes.io/managed-by"] == "skiperator" && - certificate.Labels["skiperator.kartverket.no/controller"] == "routing" - - requests := make([]reconcile.Request, 0) - - if isSkiperatorRoutingOwned { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: certificate.Labels["application.skiperator.no/app-namespace"], - Name: certificate.Labels["application.skiperator.no/app-name"], - }, - }) - } - - return requests -} - -func (r *RoutingReconciler) reconcileCertificate(ctx context.Context, routing *skiperatorv1alpha1.Routing) (reconcile.Result, error) { - h, err := routing.Spec.GetHost() - if err != nil { - err = r.setConditionCertificateSynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.DoNotRequeue() - } - - // Do not create a new certificate when a custom certificate secret is specified - if h.UsesCustomCert() { - err = r.setConditionCertificateSynced(ctx, routing, ConditionStatusTrue, ConditionMessageCertificateSkipped) - return util.RequeueWithError(err) - } - - certificateName, err := routing.GetCertificateName() - if err != nil { - err = r.setConditionCertificateSynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.RequeueWithError(err) - } - certificate := certmanagerv1.Certificate{ObjectMeta: metav1.ObjectMeta{Namespace: IstioGatewayNamespace, Name: certificateName}} - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &certificate, func() error { - certificate.Spec = certmanagerv1.CertificateSpec{ - IssuerRef: certmanagermetav1.ObjectReference{ - Kind: "ClusterIssuer", - Name: "cluster-issuer", // Name defined in https://github.com/kartverket/certificate-management/blob/main/clusterissuer.tf - }, - DNSNames: []string{h.Hostname}, - SecretName: certificateName, - } - - certificate.Labels = getLabels(certificate, routing) - - return nil - }) - if err != nil { - err = r.setConditionCertificateSynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.RequeueWithError(err) - } - - err = r.setConditionCertificateSynced(ctx, routing, ConditionStatusTrue, ConditionMessageCertificateSynced) - return util.RequeueWithError(err) -} - -func getLabels(certificate certmanagerv1.Certificate, routing *skiperatorv1alpha1.Routing) map[string]string { - certLabels := certificate.Labels - if len(certLabels) == 0 { - certLabels = make(map[string]string) - } - certLabels["app.kubernetes.io/managed-by"] = "skiperator" - - certLabels["skiperator.kartverket.no/controller"] = "routing" - certLabels["skiperator.kartverket.no/source-namespace"] = routing.Namespace - - return certLabels -} - -func (r *RoutingReconciler) GetSkiperatorRoutingCertificates(context context.Context) (certmanagerv1.CertificateList, error) { - certificates := certmanagerv1.CertificateList{} - err := r.GetClient().List(context, &certificates, client.MatchingLabels{ - "app.kubernetes.io/managed-by": "skiperator", - "skiperator.kartverket.no/controller": "routing", - }) - - return certificates, err -} diff --git a/controllers/routing/controller.go b/controllers/routing/controller.go deleted file mode 100644 index 714c5e33..00000000 --- a/controllers/routing/controller.go +++ /dev/null @@ -1,104 +0,0 @@ -package routingcontroller - -import ( - "context" - "fmt" - certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=routings;routings/status,verbs=get;list;watch;update -// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=applications;applications/status,verbs=get;list;watch -// +kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=networking.istio.io,resources=gateways;virtualservices,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete - -type RoutingReconciler struct { - util.ReconcilerBase -} - -func (r *RoutingReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&skiperatorv1alpha1.Routing{}). - Owns(&istionetworkingv1beta1.Gateway{}). - Owns(&networkingv1.NetworkPolicy{}). - Owns(&istionetworkingv1beta1.VirtualService{}). - Watches(&certmanagerv1.Certificate{}, handler.EnqueueRequestsFromMapFunc(r.SkiperatorRoutingCertRequests)). - Watches( - &skiperatorv1alpha1.Application{}, - handler.EnqueueRequestsFromMapFunc(r.SkiperatorApplicationsChanges)). - WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})). - Complete(r) -} - -func (r *RoutingReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - routing := &skiperatorv1alpha1.Routing{} - err := r.GetClient().Get(ctx, req.NamespacedName, routing) - - if errors.IsNotFound(err) { - return util.DoNotRequeue() - } else if err != nil { - r.EmitWarningEvent(routing, "ReconcileStartFail", "something went wrong fetching the Routing, it might have been deleted") - return util.RequeueWithError(err) - } - - r.EmitNormalEvent(routing, "ReconcileStart", fmt.Sprintf("Routing %v has started reconciliation loop", routing.Name)) - - controllerDuties := []func(context.Context, *skiperatorv1alpha1.Routing) (reconcile.Result, error){ - r.reconcileNetworkPolicy, - r.reconcileVirtualService, - r.reconcileGateway, - r.reconcileCertificate, - } - - for _, fn := range controllerDuties { - res, err := fn(ctx, routing) - if err != nil { - return res, err - } else if res.RequeueAfter > 0 || res.Requeue { - return res, nil - } - } - - r.EmitNormalEvent(routing, "ReconcileEnd", fmt.Sprintf("Routing %v has finished reconciliation loop", routing.Name)) - - err = r.GetClient().Update(ctx, routing) - return util.RequeueWithError(err) -} - -func (r *RoutingReconciler) SkiperatorApplicationsChanges(context context.Context, obj client.Object) []reconcile.Request { - application, isApplication := obj.(*skiperatorv1alpha1.Application) - - if !isApplication { - return nil - } - - // List all routings in the same namespace as the application - routesList := &skiperatorv1alpha1.RoutingList{} - if err := r.GetClient().List(context, routesList, &client.ListOptions{Namespace: application.Namespace}); err != nil { - return nil - } - - // Create a list of reconcile.Requests for each Routing in the same namespace as the application - requests := make([]reconcile.Request, 0) - for _, route := range routesList.Items { - requests = append(requests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: route.Namespace, - Name: route.Name, - }, - }) - } - - return requests -} diff --git a/controllers/routing/gateway.go b/controllers/routing/gateway.go deleted file mode 100644 index 1ec0ca22..00000000 --- a/controllers/routing/gateway.go +++ /dev/null @@ -1,80 +0,0 @@ -package routingcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - networkingv1beta1api "istio.io/api/networking/v1beta1" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *RoutingReconciler) reconcileGateway(ctx context.Context, routing *skiperatorv1alpha1.Routing) (reconcile.Result, error) { - h, err := routing.Spec.GetHost() - if err != nil { - err = r.setConditionGatewaySynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.DoNotRequeue() - } - - gateway := networkingv1beta1.Gateway{ObjectMeta: metav1.ObjectMeta{Namespace: routing.Namespace, Name: routing.GetGatewayName()}} - - var determinedCredentialName string - if h.UsesCustomCert() { - determinedCredentialName = *h.CustomCertificateSecret - } else { - determinedCredentialName, err = routing.GetCertificateName() - if err != nil { - err = r.setConditionGatewaySynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.RequeueWithError(err) - } - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &gateway, func() error { - err := ctrlutil.SetControllerReference(routing, &gateway, r.GetScheme()) - if err != nil { - return err - } - - util.SetCommonAnnotations(&gateway) - - gateway.Spec.Selector = util.GetIstioGatewayLabelSelector(h.Hostname) - gateway.Spec.Servers = []*networkingv1beta1api.Server{ - { - Hosts: []string{h.Hostname}, - Port: &networkingv1beta1api.Port{ - Number: 80, - Name: "http", - Protocol: "HTTP", - }, - }, - { - Hosts: []string{h.Hostname}, - Port: &networkingv1beta1api.Port{ - Number: 443, - Name: "https", - Protocol: "HTTPS", - }, - Tls: &networkingv1beta1api.ServerTLSSettings{ - Mode: networkingv1beta1api.ServerTLSSettings_SIMPLE, - CredentialName: determinedCredentialName, - }, - }, - } - - return nil - }) - if err != nil { - err = r.setConditionGatewaySynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.RequeueWithError(err) - } - - m := ConditionMessageGatewaySynced - if h.UsesCustomCert() { - m = ConditionMessageGatewaySyncedCustomCertificate - } - err = r.setConditionGatewaySynced(ctx, routing, ConditionStatusTrue, m) - return util.RequeueWithError(err) - -} diff --git a/controllers/routing/network_policy.go b/controllers/routing/network_policy.go deleted file mode 100644 index 0f8b4999..00000000 --- a/controllers/routing/network_policy.go +++ /dev/null @@ -1,131 +0,0 @@ -package routingcontroller - -import ( - "context" - "fmt" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "slices" -) - -func (r *RoutingReconciler) reconcileNetworkPolicy(ctx context.Context, routing *skiperatorv1alpha1.Routing) (reconcile.Result, error) { - var err error - - // Get map of unique network policies: map[networkPolicyName]targetApp - uniqueTargetApps := make(map[string]string) - for _, route := range routing.Spec.Routes { - uniqueTargetApps[getNetworkPolicyName(routing, route.TargetApp)] = route.TargetApp - } - - for netpolName, targetApp := range uniqueTargetApps { - networkPolicy := networkingv1.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: routing.Namespace, - Name: netpolName, - }, - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &networkPolicy, func() error { - applicationNamespacedName := types.NamespacedName{Namespace: routing.Namespace, Name: targetApp} - targetApplication, err := getApplication(r.GetClient(), ctx, applicationNamespacedName) - if err != nil { - return err - } - - err = ctrlutil.SetControllerReference(routing, &networkPolicy, r.GetScheme()) - if err != nil { - return err - } - - networkPolicy.Spec = networkingv1.NetworkPolicySpec{ - PodSelector: metav1.LabelSelector{ - MatchLabels: util.GetPodAppSelector(targetApp), - }, - PolicyTypes: []networkingv1.PolicyType{ - networkingv1.PolicyTypeIngress, - }, - Ingress: []networkingv1.NetworkPolicyIngressRule{ - { - From: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: util.GetIstioGatewaySelector(), - }, - PodSelector: &metav1.LabelSelector{ - MatchLabels: util.GetIstioGatewayLabelSelector(routing.Spec.Hostname), - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - { - Port: util.PointTo(intstr.FromInt32(int32(targetApplication.Spec.Port))), - }, - }, - }, - }, - } - util.SetCommonAnnotations(&networkPolicy) - return nil - }) - } - if err != nil { - err = r.setConditionNetworkPolicySynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.RequeueWithError(err) - } - - // Delete network policies that are not defined by routing resource anymore - networPolicyInNamespace := networkingv1.NetworkPolicyList{} - err = r.GetClient().List(ctx, &networPolicyInNamespace, client.InNamespace(routing.Namespace)) - if err != nil { - return util.RequeueWithError(err) - } - - var networkPoliciesToDelete []networkingv1.NetworkPolicy - for _, networkPolicy := range networPolicyInNamespace.Items { - ownerIndex := slices.IndexFunc(networkPolicy.GetOwnerReferences(), func(ownerReference metav1.OwnerReference) bool { - return ownerReference.Name == routing.Name - }) - networkPolicyOwnedByThisApplication := ownerIndex != -1 - if !networkPolicyOwnedByThisApplication { - continue - } - - _, ok := uniqueTargetApps[networkPolicy.Name] - if ok { - continue - } - - networkPoliciesToDelete = append(networkPoliciesToDelete, networkPolicy) - } - - for _, networkPolicy := range networkPoliciesToDelete { - err = r.GetClient().Delete(ctx, &networkPolicy) - err = client.IgnoreNotFound(err) - if err != nil { - err = r.setConditionNetworkPolicySynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.RequeueWithError(err) - } - } - - err = r.setConditionNetworkPolicySynced(ctx, routing, ConditionStatusTrue, ConditionMessageNetworkPolicySynced) - return util.RequeueWithError(err) -} - -func getNetworkPolicyName(routing *skiperatorv1alpha1.Routing, targetApp string) string { - return fmt.Sprintf("%s-%s-istio-ingress", routing.Name, targetApp) -} - -func getApplication(client client.Client, ctx context.Context, namespacedName types.NamespacedName) (skiperatorv1alpha1.Application, error) { - application := skiperatorv1alpha1.Application{} - - err := client.Get(ctx, namespacedName, &application) - - return application, err -} diff --git a/controllers/routing/status.go b/controllers/routing/status.go deleted file mode 100644 index 07165972..00000000 --- a/controllers/routing/status.go +++ /dev/null @@ -1,132 +0,0 @@ -package routingcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/log" -) - -const ( - ConditionStatusTrue = "True" - ConditionStatusFalse = "False" - ConditionStatusUnknown = "Unknown" - - ConditionTypeCertificateSynced = "CertificateSynced" - ConditionReasonCertificateSynced = "CertificateSynced" - ConditionMessageCertificateSynced = "Certificate has been synced" - ConditionMessageCertificateSkipped = "Certificate has been skipped (custom certificate secret in use)" - - ConditionTypeGatewaySynced = "GatewaySynced" - ConditionReasonGatewaySynced = "GatewaySynced" - ConditionMessageGatewaySynced = "Gateway has been synced" - ConditionMessageGatewaySyncedCustomCertificate = "Gateway has been synced (using a custom certificate)" - - ConditionTypeVirtualServiceSynced = "VirtualServiceSynced" - CoditionReasonVirtualServiceSynced = "VirtualServiceSynced" - ConditionMessageVirtualServiceSynced = "VirtualService has been synced" - - ConditionTypeNetworkPolicySynced = "NetworkPolicySynced" - ConditionReasonNetworkPolicySynced = "NetworkPolicySynced" - ConditionMessageNetworkPolicySynced = "NetworkPolicy has been synced" -) - -func (r *RoutingReconciler) setConditionCertificateSynced(ctx context.Context, routing *skiperatorv1alpha1.Routing, status metav1.ConditionStatus, message string) error { - if !r.containsCondition(ctx, routing, ConditionReasonCertificateSynced) { - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeCertificateSynced, status, - ConditionReasonCertificateSynced, message) - } else { - currentStatus := r.getConditionStatus(ctx, routing, ConditionTypeCertificateSynced) - if currentStatus != status { - r.deleteCondition(ctx, routing, ConditionTypeCertificateSynced, ConditionReasonCertificateSynced) - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeCertificateSynced, status, - ConditionReasonCertificateSynced, message) - } - } - return nil -} - -func (r *RoutingReconciler) setConditionGatewaySynced(ctx context.Context, routing *skiperatorv1alpha1.Routing, status metav1.ConditionStatus, message string) error { - if !r.containsCondition(ctx, routing, ConditionReasonGatewaySynced) { - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeGatewaySynced, status, - ConditionReasonGatewaySynced, message) - } else { - currentStatus := r.getConditionStatus(ctx, routing, ConditionTypeGatewaySynced) - if currentStatus != status { - r.deleteCondition(ctx, routing, ConditionTypeGatewaySynced, ConditionReasonGatewaySynced) - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeGatewaySynced, status, - ConditionReasonGatewaySynced, message) - } - } - return nil -} - -func (r *RoutingReconciler) setConditionVirtualServiceSynced(ctx context.Context, routing *skiperatorv1alpha1.Routing, status metav1.ConditionStatus, message string) error { - if !r.containsCondition(ctx, routing, CoditionReasonVirtualServiceSynced) { - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeVirtualServiceSynced, ConditionStatusTrue, - CoditionReasonVirtualServiceSynced, ConditionMessageVirtualServiceSynced) - } else { - currentStatus := r.getConditionStatus(ctx, routing, ConditionTypeVirtualServiceSynced) - if currentStatus != status { - r.deleteCondition(ctx, routing, ConditionTypeVirtualServiceSynced, CoditionReasonVirtualServiceSynced) - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeVirtualServiceSynced, status, - CoditionReasonVirtualServiceSynced, message) - } - } - return nil -} - -func (r *RoutingReconciler) setConditionNetworkPolicySynced(ctx context.Context, routing *skiperatorv1alpha1.Routing, status metav1.ConditionStatus, message string) error { - if !r.containsCondition(ctx, routing, ConditionReasonNetworkPolicySynced) { - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeNetworkPolicySynced, ConditionStatusTrue, - ConditionReasonNetworkPolicySynced, ConditionMessageNetworkPolicySynced) - } else { - currentStatus := r.getConditionStatus(ctx, routing, ConditionTypeNetworkPolicySynced) - if currentStatus != status { - r.deleteCondition(ctx, routing, ConditionTypeNetworkPolicySynced, ConditionReasonNetworkPolicySynced) - return util.AppendCondition(ctx, r.GetClient(), routing, ConditionTypeNetworkPolicySynced, status, - ConditionReasonNetworkPolicySynced, message) - } - } - return nil -} - -func (r *RoutingReconciler) getConditionStatus(ctx context.Context, routing *skiperatorv1alpha1.Routing, typeName string) metav1.ConditionStatus { - - var output metav1.ConditionStatus = ConditionStatusUnknown - for _, condition := range routing.Status.Conditions { - if condition.Type == typeName { - return condition.Status - } - } - return output -} - -func (r *RoutingReconciler) deleteCondition(ctx context.Context, routing *skiperatorv1alpha1.Routing, typeName string, reason string) error { - logger := log.FromContext(ctx) - var newConditions = make([]metav1.Condition, 0) - for _, condition := range routing.Status.Conditions { - if condition.Type != typeName && condition.Reason != reason { - newConditions = append(newConditions, condition) - } - } - routing.Status.Conditions = newConditions - - err := r.GetClient().Status().Update(ctx, routing) - if err != nil { - logger.Info("Routing resource status update failed") - } - - return nil -} - -func (r *RoutingReconciler) containsCondition(ctx context.Context, routing *skiperatorv1alpha1.Routing, reason string) bool { - output := false - for _, condition := range routing.Status.Conditions { - if condition.Reason == reason { - output = true - } - } - return output -} diff --git a/controllers/routing/virtual_service.go b/controllers/routing/virtual_service.go deleted file mode 100644 index 8bee9c63..00000000 --- a/controllers/routing/virtual_service.go +++ /dev/null @@ -1,113 +0,0 @@ -package routingcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - networkingv1beta1api "istio.io/api/networking/v1beta1" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *RoutingReconciler) reconcileVirtualService(ctx context.Context, routing *skiperatorv1alpha1.Routing) (reconcile.Result, error) { - virtualService := networkingv1beta1.VirtualService{ - ObjectMeta: v1.ObjectMeta{ - Name: routing.GetVirtualServiceName(), - Namespace: routing.Namespace, - }, - } - - var err error - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &virtualService, func() error { - - err := ctrlutil.SetControllerReference(routing, &virtualService, r.GetScheme()) - if err != nil { - return err - } - virtualService.Spec = networkingv1beta1api.VirtualService{ - ExportTo: []string{".", "istio-system", "istio-gateways"}, - Gateways: []string{ - routing.GetGatewayName(), - }, - Hosts: []string{ - routing.Spec.Hostname, - }, - Http: []*networkingv1beta1api.HTTPRoute{}, - } - - if routing.GetRedirectToHTTPS() { - virtualService.Spec.Http = append(virtualService.Spec.Http, &networkingv1beta1api.HTTPRoute{ - Name: "redirect-to-https", - Match: []*networkingv1beta1api.HTTPMatchRequest{ - { - WithoutHeaders: map[string]*networkingv1beta1api.StringMatch{ - ":path": { - MatchType: &networkingv1beta1api.StringMatch_Prefix{ - Prefix: "/.well-known/acme-challenge/", - }, - }, - }, - Port: 80, - }, - }, - Redirect: &networkingv1beta1api.HTTPRedirect{ - Scheme: "https", - RedirectCode: 308, - }, - }) - } - - for _, route := range routing.Spec.Routes { - applicationNamespacedName := types.NamespacedName{Namespace: routing.Namespace, Name: route.TargetApp} - targetApplication, err := getApplication(r.GetClient(), ctx, applicationNamespacedName) - if err != nil { - return err - } - - httpRoute := &networkingv1beta1api.HTTPRoute{ - Name: route.TargetApp, - Match: []*networkingv1beta1api.HTTPMatchRequest{ - { - Port: 443, - Uri: &networkingv1beta1api.StringMatch{ - MatchType: &networkingv1beta1api.StringMatch_Prefix{ - Prefix: route.PathPrefix, - }, - }, - }, - }, - Route: []*networkingv1beta1api.HTTPRouteDestination{ - { - Destination: &networkingv1beta1api.Destination{ - Host: targetApplication.Name, - Port: &networkingv1beta1api.PortSelector{ - Number: uint32(targetApplication.Spec.Port), - }, - }, - }, - }, - } - - if route.RewriteUri { - httpRoute.Rewrite = &networkingv1beta1api.HTTPRewrite{ - Uri: "/", - } - } - - virtualService.Spec.Http = append(virtualService.Spec.Http, httpRoute) - } - return err - }) - - if err != nil { - err = r.setConditionVirtualServiceSynced(ctx, routing, ConditionStatusFalse, err.Error()) - return util.RequeueWithError(err) - } - - err = r.setConditionVirtualServiceSynced(ctx, routing, ConditionStatusTrue, ConditionMessageVirtualServiceSynced) - return util.RequeueWithError(err) -} diff --git a/controllers/skipjob/controller.go b/controllers/skipjob/controller.go deleted file mode 100644 index fd77b261..00000000 --- a/controllers/skipjob/controller.go +++ /dev/null @@ -1,155 +0,0 @@ -package skipjobcontroller - -import ( - "context" - "fmt" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - batchv1 "k8s.io/api/batch/v1" - networkingv1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=skipjobs;skipjobs/status,verbs=get;list;watch;update -// +kubebuilder:rbac:groups=batch,resources=jobs;cronjobs,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods;pods/ephemeralcontainers,verbs=get;list;watch;create;update;patch;delete - -type SKIPJobReconciler struct { - util.ReconcilerBase -} - -func (r *SKIPJobReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - // GenerationChangedPredicate is now only applied to the SkipJob itself to allow status changes on Jobs/CronJobs to affect reconcile loops - For(&skiperatorv1alpha1.SKIPJob{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). - Owns(&batchv1.CronJob{}). - Owns(&batchv1.Job{}). - // This is added as the Jobs created by CronJobs are not owned by the SKIPJob directly, but rather through the CronJob - Watches(&batchv1.Job{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request { - job, isJob := object.(*batchv1.Job) - - if !isJob { - return nil - } - - if skipJobName, exists := job.Labels[SKIPJobReferenceLabelKey]; exists { - return []reconcile.Request{ - { - types.NamespacedName{ - Namespace: job.Namespace, - Name: skipJobName, - }, - }, - } - } - - return nil - })). - Owns(&networkingv1.NetworkPolicy{}). - Owns(&istionetworkingv1beta1.ServiceEntry{}). - // Some NetPol entries are not added unless an application is present. If we reconcile all jobs when there has been changes to NetPols, we can assume - // that changes to an Applications AccessPolicy will cause a reconciliation of Jobs - Watches(&networkingv1.NetworkPolicy{}, handler.EnqueueRequestsFromMapFunc(r.getJobsToReconcile)). - Complete(r) -} - -func (r *SKIPJobReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { - skipJob := &skiperatorv1alpha1.SKIPJob{} - err := r.GetClient().Get(ctx, req.NamespacedName, skipJob) - - if errors.IsNotFound(err) { - return util.DoNotRequeue() - } else if err != nil { - r.EmitWarningEvent(skipJob, "ReconcileStartFail", "something went wrong fetching the SKIPJob, it might have been deleted") - return util.RequeueWithError(err) - } - - tmpSkipJob := skipJob.DeepCopy() - err = skipJob.ApplyDefaults() - if err != nil { - return util.RequeueWithError(err) - } - - specDiff, err := util.GetObjectDiff(tmpSkipJob.Spec, skipJob.Spec) - if err != nil { - return util.RequeueWithError(err) - } - statusDiff, err := util.GetObjectDiff(tmpSkipJob.Status, skipJob.Status) - if err != nil { - return util.RequeueWithError(err) - } - - // If we update the SKIPJob initially on applied defaults before starting reconciling resources we allow all - // updates to be visible even though the controllerDuties may take some time. - if len(specDiff) > 0 { - err := r.GetClient().Update(ctx, skipJob) - return reconcile.Result{Requeue: true}, err - } - - if len(statusDiff) > 0 { - err := r.GetClient().Status().Update(ctx, skipJob) - return reconcile.Result{Requeue: true}, err - } - - r.EmitNormalEvent(skipJob, "ReconcileStart", fmt.Sprintf("SKIPJob %v has started reconciliation loop", skipJob.Name)) - - controllerDuties := []func(context.Context, *skiperatorv1alpha1.SKIPJob) (reconcile.Result, error){ - r.reconcileServiceAccount, - r.reconcileNetworkPolicy, - r.reconcileEgressServiceEntry, - r.reconcileConfigMap, - r.reconcileJob, - r.reconcilePodMonitor, - } - - for _, fn := range controllerDuties { - res, err := fn(ctx, skipJob) - if err != nil { - return res, err - } else if res.RequeueAfter > 0 || res.Requeue { - return res, nil - } - } - - r.EmitNormalEvent(skipJob, "ReconcileEnd", fmt.Sprintf("SKIPJob %v has finished reconciliation loop", skipJob.Name)) - - err = r.GetClient().Update(ctx, skipJob) - return util.RequeueWithError(err) -} - -func (r *SKIPJobReconciler) getJobsToReconcile(ctx context.Context, object client.Object) []reconcile.Request { - var jobsToReconcile skiperatorv1alpha1.SKIPJobList - var reconcileRequests []reconcile.Request - - owner := object.GetOwnerReferences() - if len(owner) == 0 { - return reconcileRequests - } - - // Assume only one owner - if owner[0].Kind != "Application" { - return reconcileRequests - } - - err := r.GetClient().List(ctx, &jobsToReconcile) - if err != nil { - return nil - } - for _, job := range jobsToReconcile.Items { - reconcileRequests = append(reconcileRequests, reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: job.Namespace, - Name: job.Name, - }, - }) - } - return reconcileRequests -} diff --git a/controllers/skipjob/egress_service_entry.go b/controllers/skipjob/egress_service_entry.go deleted file mode 100644 index 32ef30a2..00000000 --- a/controllers/skipjob/egress_service_entry.go +++ /dev/null @@ -1,58 +0,0 @@ -package skipjobcontroller - -import ( - "context" - "fmt" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/resourcegenerator/istio" - "github.com/kartverket/skiperator/pkg/util" - networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *SKIPJobReconciler) reconcileEgressServiceEntry(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) (reconcile.Result, error) { - serviceEntries, err := istio.GetServiceEntries(skipJob.Spec.Container.AccessPolicy, skipJob) - if err != nil { - r.EmitWarningEvent(skipJob, "ServiceEntryError", fmt.Sprintf("something went wrong when fetching service entries: %v", err.Error())) - return util.RequeueWithError(err) - } - - for _, serviceEntry := range serviceEntries { - // CreateOrPatch gets the object (from cache) before the mutating function is run, masquerading actual changes - // Restoring the Spec from a copy within the mutating func fixes this - desiredServiceEntry := serviceEntry.DeepCopy() - _, err := ctrlutil.CreateOrPatch(ctx, r.GetClient(), &serviceEntry, func() error { - serviceEntry.Spec = desiredServiceEntry.Spec - // Set application as owner of the service entry - err := ctrlutil.SetControllerReference(skipJob, &serviceEntry, r.GetScheme()) - if err != nil { - return err - } - util.SetCommonAnnotations(&serviceEntry) - - return nil - }) - - if err != nil { - return util.RequeueWithError(err) - } - } - - serviceEntriesInNamespace := networkingv1beta1.ServiceEntryList{} - err = r.GetClient().List(ctx, &serviceEntriesInNamespace, client.InNamespace(skipJob.Namespace)) - if err != nil { - return util.RequeueWithError(err) - } - - serviceEntriesToDelete := istio.GetServiceEntriesToDelete(serviceEntriesInNamespace.Items, skipJob.Name, serviceEntries) - for _, serviceEntry := range serviceEntriesToDelete { - err = r.DeleteObjectIfExists(ctx, &serviceEntry) - if err != nil { - return util.RequeueWithError(err) - } - } - - return util.RequeueWithError(err) -} diff --git a/controllers/skipjob/gcp_configmap.go b/controllers/skipjob/gcp_configmap.go deleted file mode 100644 index fff4a4c1..00000000 --- a/controllers/skipjob/gcp_configmap.go +++ /dev/null @@ -1,76 +0,0 @@ -package skipjobcontroller - -import ( - "context" - "fmt" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp" - "github.com/kartverket/skiperator/pkg/util" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *SKIPJobReconciler) reconcileConfigMap(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) (reconcile.Result, error) { - if skipJob.Spec.Container.GCP != nil { - gcpIdentityConfigMapNamespacedName := types.NamespacedName{Namespace: "skiperator-system", Name: "gcp-identity-config"} - gcpIdentityConfigMap, err := util.GetConfigMap(r.GetClient(), ctx, gcpIdentityConfigMapNamespacedName) - - if !util.ErrIsMissingOrNil( - r.GetRecorder(), - err, - fmt.Sprintf("cannot find configmap named %v in namespace %v", gcpIdentityConfigMapNamespacedName.Name, gcpIdentityConfigMapNamespacedName.Namespace), - skipJob, - ) { - return util.RequeueWithError(err) - } - - err = r.setupGCPAuthConfigMap(ctx, gcpIdentityConfigMap, skipJob) - if err != nil { - return util.RequeueWithError(err) - } - } else { - gcpAuthConfigMap := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: skipJob.Namespace, - Name: gcp.GetGCPConfigMapName(skipJob.Name), - }, - } - err := r.DeleteObjectIfExists(ctx, &gcpAuthConfigMap) - if err != nil { - return util.RequeueWithError(err) - } - - } - return util.DoNotRequeue() - -} - -func (r *SKIPJobReconciler) setupGCPAuthConfigMap(ctx context.Context, gcpIdentityConfigMap corev1.ConfigMap, skipJob *skiperatorv1alpha1.SKIPJob) error { - - gcpAuthConfigMapName := gcp.GetGCPConfigMapName(skipJob.Name) - gcpAuthConfigMap, err := gcp.GetGoogleServiceAccountCredentialsConfigMap( - ctx, - skipJob.Namespace, - gcpAuthConfigMapName, - skipJob.Spec.Container.GCP.Auth.ServiceAccount, - gcpIdentityConfigMap, - ) - if err != nil { - return err - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &gcpAuthConfigMap, func() error { - // Set application as owner of the configmap - err := ctrlutil.SetControllerReference(skipJob, &gcpAuthConfigMap, r.GetScheme()) - if err != nil { - return err - } - - return nil - }) - - return err -} diff --git a/controllers/skipjob/job.go b/controllers/skipjob/job.go deleted file mode 100644 index 46f1b5f3..00000000 --- a/controllers/skipjob/job.go +++ /dev/null @@ -1,317 +0,0 @@ -package skipjobcontroller - -import ( - "context" - "fmt" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/resourcegenerator/core" - "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp" - "github.com/kartverket/skiperator/pkg/util" - "golang.org/x/exp/maps" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "time" -) - -var ( - DefaultAwaitCronJobResourcesWait = time.Second * 10 - - SKIPJobReferenceLabelKey = "skiperator.kartverket.no/skipjobName" - - IsSKIPJobKey = "skiperator.kartverket.no/skipjob" -) - -func (r *SKIPJobReconciler) reconcileJob(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) (reconcile.Result, error) { - job := batchv1.Job{ObjectMeta: metav1.ObjectMeta{ - Namespace: skipJob.Namespace, - Name: skipJob.Name, - }} - - cronJob := batchv1.CronJob{ObjectMeta: metav1.ObjectMeta{ - Namespace: skipJob.Namespace, - Name: skipJob.Name, - }} - - gcpIdentityConfigMap, err := r.getGCPIdentityConfigMap(ctx, *skipJob) - if err != nil { - return util.RequeueWithError(err) - } - // By specifying port and path annotations, Istio will scrape metrics from the application - // and merge it together with its own metrics. - // - // See - // - https://superorbital.io/blog/istio-metrics-merging/ - // - https://androidexample365.com/an-example-of-how-istio-metrics-merging-works/ - istioEnabled := r.IsIstioEnabledForNamespace(ctx, skipJob.Namespace) - if istioEnabled && skipJob.Spec.Prometheus != nil { - skipJob.Annotations["prometheus.io/port"] = skipJob.Spec.Prometheus.Port.StrVal - skipJob.Annotations["prometheus.io/path"] = skipJob.Spec.Prometheus.Path - } - - if skipJob.Spec.Cron != nil { - err = r.GetClient().Get(ctx, types.NamespacedName{ - Namespace: cronJob.Namespace, - Name: cronJob.Name, - }, &cronJob) - - if errors.IsNotFound(err) { - err := ctrlutil.SetControllerReference(skipJob, &cronJob, r.GetScheme()) - if err != nil { - return util.RequeueWithError(err) - } - - util.SetCommonAnnotations(&cronJob) - - cronJob.Spec = getCronJobSpec(skipJob, nil, nil, gcpIdentityConfigMap) - - err = r.GetClient().Create(ctx, &cronJob) - if err != nil { - return util.RequeueWithError(err) - } - - log.FromContext(ctx).Info(fmt.Sprintf("cronjob %v/%v created, requeuing reconcile in %v seconds to await subresource creation", cronJob.Namespace, cronJob.Name, DefaultAwaitCronJobResourcesWait.Seconds())) - return reconcile.Result{RequeueAfter: 5}, nil - } else if err == nil { - currentSpec := cronJob.Spec - desiredSpec := getCronJobSpec(skipJob, cronJob.Spec.JobTemplate.Spec.Selector, cronJob.Spec.JobTemplate.Spec.Template.Labels, gcpIdentityConfigMap) - - cronJobSpecDiff, err := util.GetObjectDiff(currentSpec, desiredSpec) - if err != nil { - r.EmitWarningEvent(skipJob, "CouldNotUpdateCronJob", fmt.Sprintf("something went wrong when updating the CronJob subresource of SKIPJob %v: %v", skipJob.Name, err)) - return util.RequeueWithError(err) - } - - if len(cronJobSpecDiff) > 0 { - cronJob.Spec = desiredSpec - err = r.GetClient().Update(ctx, &cronJob) - if err != nil { - r.EmitWarningEvent(skipJob, "CouldNotUpdateCronJob", fmt.Sprintf("something went wrong when updating the CronJob subresource of SKIPJob %v: %v", skipJob.Name, err)) - return util.RequeueWithError(err) - } - } - } else if err != nil { - r.EmitWarningEvent(skipJob, "CouldNotGetCronJob", fmt.Sprintf("something went wrong when getting the CronJob subresource of SKIPJob %v: %v", skipJob.Name, err)) - return util.RequeueWithError(err) - } - } else { - err = r.GetClient().Get(ctx, types.NamespacedName{ - Namespace: skipJob.Namespace, - Name: job.Name, - }, &job) - - if errors.IsNotFound(err) { - util.SetCommonAnnotations(&job) - - err = ctrlutil.SetControllerReference(skipJob, &job, r.GetScheme()) - if err != nil { - return util.RequeueWithError(err) - } - - desiredSpec := getJobSpec(skipJob, job.Spec.Selector, job.Spec.Template.Labels, gcpIdentityConfigMap) - job.Labels = GetJobLabels(skipJob, job.Labels) - job.Spec = desiredSpec - - err := r.GetClient().Create(ctx, &job) - if err != nil { - r.EmitWarningEvent(skipJob, "CouldNotCreateJob", fmt.Sprintf("something went wrong when creating the Job subresource of SKIPJob %v: %v", skipJob.Name, err)) - return util.RequeueWithError(err) - } - - err = r.SetStatusRunning(ctx, skipJob) - - return util.RequeueWithError(err) - } else if err == nil { - currentSpec := job.Spec - desiredSpec := getJobSpec(skipJob, job.Spec.Selector, job.Spec.Template.Labels, gcpIdentityConfigMap) - - jobDiff, err := util.GetObjectDiff(currentSpec, desiredSpec) - if err != nil { - r.EmitWarningEvent(skipJob, "CouldNotUpdateJob", fmt.Sprintf("something went wrong when updating the Job subresource of SKIPJob %v: %v", skipJob.Name, err)) - return util.RequeueWithError(err) - } - - if len(jobDiff) > 0 { - job.Spec = desiredSpec - err := r.GetClient().Update(ctx, &job) - if err != nil { - r.EmitWarningEvent(skipJob, "CouldNotUpdateJob", fmt.Sprintf("something went wrong when updating the Job subresource of SKIPJob %v: %v", skipJob.Name, err)) - return util.RequeueWithError(err) - } - - return util.DoNotRequeue() - } - } else if err != nil { - r.EmitWarningEvent(skipJob, "CouldNotGetJob", fmt.Sprintf("something went wrong when getting the Job subresource of SKIPJob %v: %v", skipJob.Name, err)) - return util.RequeueWithError(err) - } - } - - jobsToCheckList := batchv1.JobList{} - - err = r.GetClient().List(ctx, &jobsToCheckList, client.MatchingLabels{ - SKIPJobReferenceLabelKey: skipJob.Name, - }) - if err != nil { - return util.RequeueWithError(err) - } - - if len(jobsToCheckList.Items) == 0 { - log.FromContext(ctx).Info(fmt.Sprintf("could not find any jobs related to SKIPJob %v/%v, skipping job checks", skipJob.Namespace, skipJob.Name)) - return util.DoNotRequeue() - } - - for _, job := range jobsToCheckList.Items { - if isFailed, failedJobMessage := isFailedJob(job); isFailed { - err = r.SetStatusFailed(ctx, skipJob, fmt.Sprintf("job %v/%v failed, reason: %v", job.Name, job.Namespace, failedJobMessage)) - if err != nil { - return util.RequeueWithError(err) - } - continue - } - - if job.Status.CompletionTime != nil { - err = r.SetStatusFinished(ctx, skipJob) - if err != nil { - return util.RequeueWithError(err) - } - continue - } - - err := r.SetStatusRunning(ctx, skipJob) - if err != nil { - return util.RequeueWithError(err) - } - } - - return util.DoNotRequeue() -} - -func isFailedJob(job batchv1.Job) (bool, string) { - for _, condition := range job.Status.Conditions { - if condition.Type == ConditionFailed && condition.Status == corev1.ConditionTrue { - return true, condition.Message - } - } - - return false, "" -} - -func (r *SKIPJobReconciler) getGCPIdentityConfigMap(ctx context.Context, skipJob skiperatorv1alpha1.SKIPJob) (*corev1.ConfigMap, error) { - if skipJob.Spec.Container.GCP != nil { - gcpIdentityConfigMapNamespacedName := types.NamespacedName{Namespace: "skiperator-system", Name: "gcp-identity-config"} - - configMap, err := util.GetConfigMap(r.GetClient(), ctx, gcpIdentityConfigMapNamespacedName) - if !util.ErrIsMissingOrNil( - r.GetRecorder(), - err, - "Cannot find configmap named "+gcpIdentityConfigMapNamespacedName.Name+" in namespace "+gcpIdentityConfigMapNamespacedName.Namespace, - &skipJob, - ) { - return nil, err - } - - return &configMap, nil - } else { - return nil, nil - } -} - -func getCronJobSpec(skipJob *skiperatorv1alpha1.SKIPJob, selector *metav1.LabelSelector, podLabels map[string]string, gcpIdentityConfigMap *corev1.ConfigMap) batchv1.CronJobSpec { - return batchv1.CronJobSpec{ - Schedule: skipJob.Spec.Cron.Schedule, - StartingDeadlineSeconds: skipJob.Spec.Cron.StartingDeadlineSeconds, - ConcurrencyPolicy: skipJob.Spec.Cron.ConcurrencyPolicy, - Suspend: skipJob.Spec.Cron.Suspend, - JobTemplate: batchv1.JobTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: GetJobLabels(skipJob, podLabels), - }, - Spec: getJobSpec(skipJob, selector, podLabels, gcpIdentityConfigMap), - }, - SuccessfulJobsHistoryLimit: util.PointTo(int32(3)), - FailedJobsHistoryLimit: util.PointTo(int32(1)), - } -} - -func GetJobLabels(skipJob *skiperatorv1alpha1.SKIPJob, labels map[string]string) map[string]string { - if len(labels) == 0 { - labels = make(map[string]string) - } - - // Used by hahaha to know that the Pod should be watched for killing sidecars - labels[IsSKIPJobKey] = "true" - maps.Copy(labels, util.GetPodAppSelector(skipJob.KindPostFixedName())) - - // Added to be able to add the SKIPJob to a reconcile queue when Watched Jobs are queued - labels[SKIPJobReferenceLabelKey] = skipJob.Name - - return labels -} - -func getJobSpec(skipJob *skiperatorv1alpha1.SKIPJob, selector *metav1.LabelSelector, podLabels map[string]string, gcpIdentityConfigMap *corev1.ConfigMap) batchv1.JobSpec { - podVolumes, containerVolumeMounts := core.GetContainerVolumeMountsAndPodVolumes(skipJob.Spec.Container.FilesFrom) - envVars := skipJob.Spec.Container.Env - - if skipJob.Spec.Container.GCP != nil { - gcpPodVolume := gcp.GetGCPContainerVolume(gcpIdentityConfigMap.Data["workloadIdentityPool"], skipJob.Name) - gcpContainerVolumeMount := gcp.GetGCPContainerVolumeMount() - gcpEnvVar := gcp.GetGCPEnvVar() - - podVolumes = append(podVolumes, gcpPodVolume) - containerVolumeMounts = append(containerVolumeMounts, gcpContainerVolumeMount) - envVars = append(envVars, gcpEnvVar) - } - - var skipJobContainer corev1.Container - skipJobContainer = core.CreateJobContainer(skipJob, containerVolumeMounts, envVars) - - var containers []corev1.Container - - containers = append(containers, skipJobContainer) - - jobSpec := batchv1.JobSpec{ - Parallelism: util.PointTo(int32(1)), - Completions: util.PointTo(int32(1)), - ActiveDeadlineSeconds: skipJob.Spec.Job.ActiveDeadlineSeconds, - PodFailurePolicy: nil, - BackoffLimit: skipJob.Spec.Job.BackoffLimit, - Selector: nil, - ManualSelector: nil, - Template: corev1.PodTemplateSpec{ - Spec: core.CreatePodSpec( - containers, - podVolumes, - skipJob.KindPostFixedName(), - skipJob.Spec.Container.Priority, - skipJob.Spec.Container.RestartPolicy, - skipJob.Spec.Container.PodSettings, - skipJob.Name, - ), - ObjectMeta: metav1.ObjectMeta{ - Labels: GetJobLabels(skipJob, nil), - }, - }, - TTLSecondsAfterFinished: skipJob.Spec.Job.TTLSecondsAfterFinished, - CompletionMode: util.PointTo(batchv1.NonIndexedCompletion), - Suspend: skipJob.Spec.Job.Suspend, - } - - // Jobs create their own selector with a random UUID. Upon creation of the Job we do not know this beforehand. - // Therefore, simply set these again if they already exist, which would be the case if reconciling an existing job. - if selector != nil { - jobSpec.Selector = selector - if jobSpec.Template.ObjectMeta.Labels == nil { - jobSpec.Template.ObjectMeta.Labels = map[string]string{} - } - maps.Copy(jobSpec.Template.ObjectMeta.Labels, podLabels) - } - - return jobSpec -} diff --git a/controllers/skipjob/network_policy.go b/controllers/skipjob/network_policy.go deleted file mode 100644 index a78bb89f..00000000 --- a/controllers/skipjob/network_policy.go +++ /dev/null @@ -1,56 +0,0 @@ -package skipjobcontroller - -import ( - "context" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/resourcegenerator/networking" - "github.com/kartverket/skiperator/pkg/util" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *SKIPJobReconciler) reconcileNetworkPolicy(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) (reconcile.Result, error) { - egressServices, err := r.GetEgressServices(ctx, skipJob, skipJob.Spec.Container.AccessPolicy) - if err != nil { - return util.RequeueWithError(err) - } - - namespaces, err := r.GetNamespaces(ctx, skipJob) - if err != nil { - return util.RequeueWithError(err) - } - - netpolOpts := networking.NetPolOpts{ - AccessPolicy: skipJob.Spec.Container.AccessPolicy, - Namespace: skipJob.Namespace, - Namespaces: &namespaces, - Name: skipJob.KindPostFixedName(), - RelatedServices: &egressServices, - IstioEnabled: r.IsIstioEnabledForNamespace(ctx, skipJob.Namespace), - } - - netpolSpec := networking.CreateNetPolSpec(netpolOpts) - - if netpolSpec == nil { - return util.DoNotRequeue() - } - - networkPolicy := networkingv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Namespace: skipJob.Namespace, Name: skipJob.KindPostFixedName()}} - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &networkPolicy, func() error { - err := ctrlutil.SetControllerReference(skipJob, &networkPolicy, r.GetScheme()) - if err != nil { - return err - } - - util.SetCommonAnnotations(&networkPolicy) - - networkPolicy.Spec = *netpolSpec - - return nil - }) - - return util.RequeueWithError(err) -} diff --git a/controllers/skipjob/pod_monitor.go b/controllers/skipjob/pod_monitor.go deleted file mode 100644 index 036c69ab..00000000 --- a/controllers/skipjob/pod_monitor.go +++ /dev/null @@ -1,66 +0,0 @@ -package skipjobcontroller - -import ( - "context" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/util" - pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "strings" -) - -func (r *SKIPJobReconciler) reconcilePodMonitor(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) (reconcile.Result, error) { - podMonitor := pov1.PodMonitor{ObjectMeta: metav1.ObjectMeta{ - Name: skipJob.Name + "-monitor", - Namespace: skipJob.Namespace, - Labels: map[string]string{"instance": "primary"}, - }} - - shouldReconcile, err := r.ShouldReconcile(ctx, &podMonitor) - if err != nil || !shouldReconcile { - return util.RequeueWithError(err) - } - - if skipJob.Spec.Prometheus == nil { - err := client.IgnoreNotFound(r.GetClient().Delete(ctx, &podMonitor)) - if err != nil { - return util.RequeueWithError(err) - } - return util.DoNotRequeue() - } - - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &podMonitor, func() error { - err := ctrlutil.SetControllerReference(skipJob, &podMonitor, r.GetScheme()) - if err != nil { - return err - } - podMonitor.Spec = pov1.PodMonitorSpec{ - Selector: metav1.LabelSelector{ - MatchLabels: util.GetPodAppSelector(skipJob.Name), - }, - NamespaceSelector: pov1.NamespaceSelector{ - MatchNames: []string{skipJob.Namespace}, - }, - PodMetricsEndpoints: []pov1.PodMetricsEndpoint{ - { - Path: util.IstioMetricsPath, - TargetPort: &util.IstioMetricsPortName, - }, - }, - } - if !skipJob.Spec.Prometheus.AllowAllMetrics { - podMonitor.Spec.PodMetricsEndpoints[0].MetricRelabelConfigs = []pov1.RelabelConfig{ - { - Action: "drop", - Regex: strings.Join(util.DefaultMetricDropList, "|"), - SourceLabels: []pov1.LabelName{"__name__"}, - }, - } - } - return nil - }) - return util.RequeueWithError(err) -} diff --git a/controllers/skipjob/service_account.go b/controllers/skipjob/service_account.go deleted file mode 100644 index 108c48e8..00000000 --- a/controllers/skipjob/service_account.go +++ /dev/null @@ -1,31 +0,0 @@ -package skipjobcontroller - -import ( - "context" - "github.com/kartverket/skiperator/pkg/util" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *SKIPJobReconciler) reconcileServiceAccount(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) (reconcile.Result, error) { - - serviceAccount := corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: skipJob.Namespace, Name: skipJob.KindPostFixedName()}} - - _, err := ctrlutil.CreateOrPatch(ctx, r.GetClient(), &serviceAccount, func() error { - // Set application as owner of the sidecar - err := ctrlutil.SetControllerReference(skipJob, &serviceAccount, r.GetScheme()) - if err != nil { - return err - } - - util.SetCommonAnnotations(&serviceAccount) - - return nil - }) - - return util.RequeueWithError(err) -} diff --git a/controllers/skipjob/status.go b/controllers/skipjob/status.go deleted file mode 100644 index 293f4625..00000000 --- a/controllers/skipjob/status.go +++ /dev/null @@ -1,194 +0,0 @@ -package skipjobcontroller - -import ( - "context" - "fmt" - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/log" -) - -const ( - ConditionRunning = "Running" - ConditionFinished = "Finished" - - ConditionFailed = "Failed" -) - -func (r *SKIPJobReconciler) getConditionRunning(skipJob *skiperatorv1alpha1.SKIPJob, status v1.ConditionStatus) v1.Condition { - return v1.Condition{ - Type: ConditionRunning, - Status: status, - ObservedGeneration: skipJob.Generation, - LastTransitionTime: v1.Now(), - Reason: "JobRunning", - Message: "Job has been created and is now running", - } -} - -func (r *SKIPJobReconciler) getConditionFinished(skipJob *skiperatorv1alpha1.SKIPJob, status v1.ConditionStatus) v1.Condition { - return v1.Condition{ - Type: ConditionFinished, - Status: status, - ObservedGeneration: skipJob.Generation, - LastTransitionTime: v1.Now(), - Reason: "JobFinished", - Message: "Job has finished", - } -} - -func (r *SKIPJobReconciler) getConditionFailed(skipJob *skiperatorv1alpha1.SKIPJob, status v1.ConditionStatus, err *string) v1.Condition { - conditionMessage := "Job failed previous run" - if err != nil { - conditionMessage = fmt.Sprintf("%v: %v", conditionMessage, *err) - } - return v1.Condition{ - Type: ConditionFailed, - Status: status, - ObservedGeneration: skipJob.Generation, - LastTransitionTime: v1.Now(), - Reason: "JobFailed", - Message: conditionMessage, - } -} - -func (r *SKIPJobReconciler) SetStatusFinished(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) error { - failedCondition := r.getConditionFailed(skipJob, v1.ConditionFalse, nil) - err := r.deleteCondition(ctx, skipJob, failedCondition) - if err != nil { - return err - } - - conditionsToAdd := []v1.Condition{ - r.getConditionRunning(skipJob, v1.ConditionFalse), - r.getConditionFinished(skipJob, v1.ConditionTrue), - } - - err = r.updateStatusWithCondition(ctx, skipJob, conditionsToAdd) - - return err -} - -func (r *SKIPJobReconciler) SetStatusRunning(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) error { - failedCondition := r.getConditionFailed(skipJob, v1.ConditionFalse, nil) - err := r.deleteCondition(ctx, skipJob, failedCondition) - if err != nil { - return err - } - - conditionsToAdd := []v1.Condition{ - r.getConditionRunning(skipJob, v1.ConditionTrue), - } - - finishedCondition := r.getConditionFinished(skipJob, v1.ConditionFalse) - _, exists := r.getSameConditionIfExists(&skipJob.Status.Conditions, &finishedCondition) - - if exists { - conditionsToAdd = append(conditionsToAdd, finishedCondition) - } - - err = r.updateStatusWithCondition(ctx, skipJob, conditionsToAdd) - - return err -} - -func (r *SKIPJobReconciler) SetStatusFailed(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob, errMessage string) error { - conditionsToAdd := []v1.Condition{ - r.getConditionFailed(skipJob, v1.ConditionTrue, &errMessage), - } - - finishedCondition := r.getConditionFinished(skipJob, v1.ConditionFalse) - _, exists := r.getSameConditionIfExists(&skipJob.Status.Conditions, &finishedCondition) - if exists { - conditionsToAdd = append(conditionsToAdd, finishedCondition) - } - - runningCondition := r.getConditionRunning(skipJob, v1.ConditionFalse) - _, exists = r.getSameConditionIfExists(&skipJob.Status.Conditions, &runningCondition) - if exists { - conditionsToAdd = append(conditionsToAdd, runningCondition) - } - - err := r.updateStatusWithCondition(ctx, skipJob, conditionsToAdd) - - return err -} - -func (r *SKIPJobReconciler) updateStatusWithCondition(ctx context.Context, in *skiperatorv1alpha1.SKIPJob, conditions []v1.Condition) error { - jobConditions := in.Status.Conditions - - for _, conditionToAdd := range conditions { - currentCondition, exists := r.getSameConditionIfExists(&jobConditions, &conditionToAdd) - if !exists { - in.Status.Conditions = append(in.Status.Conditions, conditionToAdd) - continue - } - - isSameType := conditionsHaveSameType(currentCondition, &conditionToAdd) - isSameStatus := conditionsHaveSameStatus(currentCondition, &conditionToAdd) - - if isSameType && isSameStatus { - continue - } - - if isSameType && !isSameStatus { - err := r.deleteCondition(ctx, in, *currentCondition) - if err != nil { - return err - } - in.Status.Conditions = append(in.Status.Conditions, conditionToAdd) - } - } - - err := r.GetClient().Status().Update(ctx, in) - if err != nil { - return err - } - - return nil -} - -func (r *SKIPJobReconciler) deleteCondition(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob, conditionToDelete v1.Condition) error { - logger := log.FromContext(ctx) - var newConditions []v1.Condition - - for _, condition := range skipJob.Status.Conditions { - if condition.Type != conditionToDelete.Type { - newConditions = append(newConditions, condition) - } - } - - skipJob.Status.Conditions = newConditions - err := r.GetClient().Status().Update(ctx, skipJob) - if err != nil { - logger.Error(err, "skipjob could not delete condition") - return err - } - return nil -} - -func (r *SKIPJobReconciler) GetLastCondition(conditions []v1.Condition) (*v1.Condition, bool) { - if len(conditions) == 0 { - return &v1.Condition{}, false - } - - return &conditions[len(conditions)-1], true -} - -func (r *SKIPJobReconciler) getSameConditionIfExists(currentConditions *[]v1.Condition, conditionToFind *v1.Condition) (*v1.Condition, bool) { - for _, condition := range *currentConditions { - if condition.Type == conditionToFind.Type { - return &condition, true - } - } - - return nil, false -} - -func conditionsHaveSameStatus(condition1 *v1.Condition, condition2 *v1.Condition) bool { - return condition1.Status == condition2.Status -} - -func conditionsHaveSameType(condition1 *v1.Condition, condition2 *v1.Condition) bool { - return condition1.Type == condition2.Type -} diff --git a/go.mod b/go.mod index 71e73c64..f65c25e8 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/nais/liberator v0.0.0-20240628110454-831759e25b73 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.75.1 github.com/r3labs/diff/v3 v3.0.1 + github.com/stretchr/testify v1.9.0 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 istio.io/api v1.22.3-0.20240703105953-437a88321a16 @@ -82,7 +83,7 @@ require ( github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/cli v25.0.5+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v26.1.4+incompatible // indirect + github.com/docker/docker v26.1.5+incompatible // indirect github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/dustinkirkland/golang-petname v0.0.0-20240428194347-eebcea082ee0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -215,8 +216,8 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.5.1 // indirect - k8s.io/apiserver v0.30.2 // indirect - k8s.io/component-base v0.30.2 // indirect + k8s.io/apiserver v0.30.3 // indirect + k8s.io/component-base v0.30.3 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b // indirect k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect diff --git a/go.sum b/go.sum index 6ec62f59..208a90ab 100644 --- a/go.sum +++ b/go.sum @@ -326,8 +326,8 @@ github.com/docker/cli v25.0.5+incompatible h1:3Llw3kcE1gOScEojA247iDD+p1l9hHeC7H github.com/docker/cli v25.0.5+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= -github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.5+incompatible h1:NEAxTwEjxV6VbBMBoGG3zPqbiJosIApZjxlbrG9q3/g= +github.com/docker/docker v26.1.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -1377,16 +1377,16 @@ k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbO k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/apiserver v0.30.2 h1:ACouHiYl1yFI2VFI3YGM+lvxgy6ir4yK2oLOsLI1/tw= -k8s.io/apiserver v0.30.2/go.mod h1:BOTdFBIch9Sv0ypSEcUR6ew/NUFGocRFNl72Ra7wTm8= +k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g= +k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/component-base v0.30.2 h1:pqGBczYoW1sno8q9ObExUqrYSKhtE5rW3y6gX88GZII= -k8s.io/component-base v0.30.2/go.mod h1:yQLkQDrkK8J6NtP+MGJOws+/PPeEXNpwFixsUI7h/OE= +k8s.io/component-base v0.30.3 h1:Ci0UqKWf4oiwy8hr1+E3dsnliKnkMLZMVbWzeorlk7s= +k8s.io/component-base v0.30.3/go.mod h1:C1SshT3rGPCuNtBs14RmVD2xW0EhRSeLvBh7AGk1quA= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.30.2 h1:VSZILO/tkzrz5Tu2j+yFQZ2Dc5JerQZX2GqhFJbQrfw= -k8s.io/kms v0.30.2/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= +k8s.io/kms v0.30.3 h1:NLg+oN45S2Y3U0WiLRzbS61AY/XrS5JBMZp531Z+Pho= +k8s.io/kms v0.30.3/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b h1:Q9xmGWBvOGd8UJyccgpYlLosk/JlfP3xQLNkQlHJeXw= k8s.io/kube-openapi v0.0.0-20240620174524-b456828f718b/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= diff --git a/internal/controllers/application.go b/internal/controllers/application.go new file mode 100644 index 00000000..05a2f677 --- /dev/null +++ b/internal/controllers/application.go @@ -0,0 +1,399 @@ +package controllers + +import ( + "context" + "fmt" + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/internal/controllers/common" + "github.com/kartverket/skiperator/pkg/log" + . "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/certificate" + "github.com/kartverket/skiperator/pkg/resourcegenerator/deployment" + "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp/auth" + "github.com/kartverket/skiperator/pkg/resourcegenerator/hpa" + "github.com/kartverket/skiperator/pkg/resourcegenerator/idporten" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/authorizationpolicy" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/gateway" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/peerauthentication" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/serviceentry" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/virtualservice" + "github.com/kartverket/skiperator/pkg/resourcegenerator/maskinporten" + networkpolicy "github.com/kartverket/skiperator/pkg/resourcegenerator/networkpolicy/dynamic" + "github.com/kartverket/skiperator/pkg/resourcegenerator/pdb" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + "github.com/kartverket/skiperator/pkg/resourcegenerator/service" + "github.com/kartverket/skiperator/pkg/resourcegenerator/serviceaccount" + "github.com/kartverket/skiperator/pkg/resourcegenerator/servicemonitor" + "github.com/kartverket/skiperator/pkg/util" + nais_io_v1 "github.com/nais/liberator/pkg/apis/nais.io/v1" + pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "golang.org/x/exp/maps" + networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" + appsv1 "k8s.io/api/apps/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/validation/field" + "regexp" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=applications;applications/status,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch +// +kubebuilder:rbac:groups=core,resources=services;configmaps;serviceaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.istio.io,resources=gateways;serviceentries;virtualservices,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=security.istio.io,resources=peerauthentications;authorizationpolicies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=monitoring.coreos.com,resources=podmonitors,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=nais.io,resources=maskinportenclients;idportenclients,verbs=get;list;watch;create;update;patch;delete + +type ApplicationReconciler struct { + common.ReconcilerBase +} + +const applicationFinalizer = "skip.statkart.no/finalizer" + +var hostMatchExpression = regexp.MustCompile(`^([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}$`) + +// TODO Watch applications that are using dynamic port allocation +func (r *ApplicationReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&skiperatorv1alpha1.Application{}). + Owns(&appsv1.Deployment{}). + Owns(&corev1.Service{}). + Owns(&corev1.ConfigMap{}). + Owns(&networkingv1beta1.ServiceEntry{}). + Owns(&networkingv1beta1.Gateway{}, builder.WithPredicates( + util.MatchesPredicate[*networkingv1beta1.Gateway](isIngressGateway), + )). + Owns(&autoscalingv2.HorizontalPodAutoscaler{}). + Owns(&networkingv1beta1.VirtualService{}). + Owns(&securityv1beta1.PeerAuthentication{}). + Owns(&corev1.ServiceAccount{}). + Owns(&policyv1.PodDisruptionBudget{}). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&securityv1beta1.AuthorizationPolicy{}). + Owns(&nais_io_v1.MaskinportenClient{}). + Owns(&nais_io_v1.IDPortenClient{}). + Owns(&pov1.ServiceMonitor{}). + Watches(&certmanagerv1.Certificate{}, handler.EnqueueRequestsFromMapFunc(handleApplicationCertRequest)). + WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})). + Complete(r) +} + +type reconciliationFunc func(reconciliation Reconciliation) error + +// TODO Clean up logs, events + +func (r *ApplicationReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + rLog := log.NewLogger().WithName(fmt.Sprintf("application-controller: %s", req.Name)) + rLog.Info("Starting reconcile", "request", req.Name) + + rdy := r.isClusterReady(ctx) + if !rdy { + panic("Cluster is not ready, missing servicemonitors.monitoring.coreos.com most likely") + } + + application, err := r.getApplication(req, ctx) + if application == nil { + rLog.Info("Application not found, cleaning up watched resources", "application", req.Name) + if errs := r.cleanUpWatchedResources(ctx, req.NamespacedName); len(errs) > 0 { + return common.RequeueWithError(fmt.Errorf("error when trying to clean up watched resources: %w", errs[0])) + } + return common.DoNotRequeue() + } else if err != nil { + return common.RequeueWithError(err) + } + + //TODO do we need this actually? + isApplicationMarkedToBeDeleted := application.GetDeletionTimestamp() != nil + if isApplicationMarkedToBeDeleted { + if err = r.finalizeApplication(application, ctx); err != nil { + return ctrl.Result{}, err + } + return common.DoNotRequeue() + } + + if !common.ShouldReconcile(application) { + return common.DoNotRequeue() + } + + if err := validateIngresses(application); err != nil { + rLog.Error(err, "invalid ingress in application manifest") + r.SetErrorState(ctx, application, err, "invalid ingress in application manifest", "InvalidApplication") + return common.RequeueWithError(err) + } + + // Copy application so we can check for diffs. Should be none on existing applications. + tmpApplication := application.DeepCopy() + + r.setApplicationDefaults(application, ctx) + + specDiff, err := util.GetObjectDiff(tmpApplication.Spec, application.Spec) + if err != nil { + return common.RequeueWithError(err) + } + + // Finalizer check is due to a bug when updating using controller-runtime + // See https://github.com/kubernetes-sigs/controller-runtime/issues/2453 + if len(specDiff) > 0 || (!ctrlutil.ContainsFinalizer(tmpApplication, applicationFinalizer) && ctrlutil.ContainsFinalizer(application, applicationFinalizer)) { + rLog.Debug("Queuing for spec diff") + err := r.GetClient().Update(ctx, application) + return reconcile.Result{Requeue: true}, err + } + + // TODO Removed status diff check here... why do we need that? Causing endless reconcile because timestamps are different (which makes sense) + if err = r.GetClient().Status().Update(ctx, application); err != nil { + return common.RequeueWithError(err) + } + + //Start the actual reconciliation + rLog.Debug("Starting reconciliation loop", "application", application.Name) + r.SetProgressingState(ctx, application, fmt.Sprintf("Application %v has started reconciliation loop", application.Name)) + + istioEnabled := r.IsIstioEnabledForNamespace(ctx, application.Namespace) + identityConfigMap, err := r.GetIdentityConfigMap(ctx) + if err != nil { + rLog.Error(err, "cant find identity config map") + } //TODO Error state? + + reconciliation := NewApplicationReconciliation(ctx, application, rLog, istioEnabled, r.GetRestConfig(), identityConfigMap) + + //TODO status and conditions in application object + funcs := []reconciliationFunc{ + certificate.Generate, + service.Generate, + auth.Generate, + serviceentry.Generate, + gateway.Generate, + virtualservice.Generate, + hpa.Generate, + peerauthentication.Generate, + serviceaccount.Generate, + networkpolicy.Generate, + authorizationpolicy.Generate, + pdb.Generate, + servicemonitor.Generate, + idporten.Generate, + maskinporten.Generate, + deployment.Generate, + } + + for _, f := range funcs { + if err = f(reconciliation); err != nil { + rLog.Error(err, "failed to generate application resource") + //At this point we don't have the gvk of the resource yet, so we can't set subresource status. + r.SetErrorState(ctx, application, err, "failed to generate application resource", "ResourceGenerationFailure") + return common.RequeueWithError(err) + } + } + + // We need to do this here, so we are sure it's done. Not setting GVK can cause big issues + if err = r.setApplicationResourcesDefaults(reconciliation.GetResources(), application); err != nil { + rLog.Error(err, "failed to set application resource defaults") + r.SetErrorState(ctx, application, err, "failed to set application resource defaults", "ResourceDefaultsFailure") + return common.RequeueWithError(err) + } + + if errs := r.GetProcessor().Process(reconciliation); len(errs) > 0 { + for _, err = range errs { + rLog.Error(err, "failed to process resource") + r.EmitWarningEvent(application, "ReconcileEndFail", fmt.Sprintf("Failed to process application resources: %s", err.Error())) + } + r.SetErrorState(ctx, application, fmt.Errorf("found %d errors", len(errs)), "failed to process application resources, see subresource status", "ProcessorFailure") + return common.RequeueWithError(err) + } + + r.updateConditions(application) + r.SetSyncedState(ctx, application, "Application has been reconciled") + + return common.DoNotRequeue() +} + +func (r *ApplicationReconciler) updateConditions(app *skiperatorv1alpha1.Application) { + var conditions []metav1.Condition + accessPolicy := app.Spec.AccessPolicy + if accessPolicy != nil && !common.IsInternalRulesValid(accessPolicy) { + conditions = append(conditions, common.GetInternalRulesCondition(app, metav1.ConditionFalse)) + } else { + conditions = append(conditions, common.GetInternalRulesCondition(app, metav1.ConditionTrue)) + } + app.Status.Conditions = conditions +} + +func (r *ApplicationReconciler) getApplication(req reconcile.Request, ctx context.Context) (*skiperatorv1alpha1.Application, error) { + application := &skiperatorv1alpha1.Application{} + if err := r.GetClient().Get(ctx, req.NamespacedName, application); err != nil { + if errors.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error when trying to get application: %w", err) + } + + return application, nil +} + +func (r *ApplicationReconciler) cleanUpWatchedResources(ctx context.Context, name types.NamespacedName) []error { + app := &skiperatorv1alpha1.Application{} + app.SetName(name.Name) + app.SetNamespace(name.Namespace) + + reconciliation := NewApplicationReconciliation(ctx, app, log.NewLogger(), false, nil, nil) + return r.GetProcessor().Process(reconciliation) +} + +func (r *ApplicationReconciler) finalizeApplication(application *skiperatorv1alpha1.Application, ctx context.Context) error { + if ctrlutil.ContainsFinalizer(application, applicationFinalizer) { + ctrlutil.RemoveFinalizer(application, applicationFinalizer) + err := r.GetClient().Update(ctx, application) + if err != nil { + return fmt.Errorf("Something went wrong when trying to finalize application. %w", err) + } + } + + return nil +} + +func (r *ApplicationReconciler) setApplicationResourcesDefaults(resources []client.Object, app *skiperatorv1alpha1.Application) error { + for _, resource := range resources { + if err := r.SetSubresourceDefaults(resources, app); err != nil { + return err + } + resourceutils.SetApplicationLabels(resource, app) + } + + //TODO should try to combine this with the above + resourceLabelsWithNoMatch := resourceutils.FindResourceLabelErrors(app, resources) + for k, _ := range resourceLabelsWithNoMatch { + r.EmitWarningEvent(app, "MistypedLabel", fmt.Sprintf("Resource label %s not a generated resource", k)) + } + return nil +} + +/* + * Set application defaults. For existing applications this shouldn't do anything + */ +func (r *ApplicationReconciler) setApplicationDefaults(application *skiperatorv1alpha1.Application, ctx context.Context) { + application.FillDefaultsSpec() + if !ctrlutil.ContainsFinalizer(application, applicationFinalizer) { + ctrlutil.AddFinalizer(application, applicationFinalizer) + } + + // Add labels to application + //TODO can we skip a step here? + if application.Labels == nil { + application.Labels = make(map[string]string) + } + maps.Copy(application.Labels, application.GetDefaultLabels()) + maps.Copy(application.Labels, application.Spec.Labels) + + // Add team label + if len(application.Spec.Team) == 0 { + if name, err := r.teamNameForNamespace(ctx, application); err == nil { + application.Spec.Team = name + } + } + + //We try to feed the access policy with port values dynamically, + //if unsuccessfull we just don't set ports, and rely on podselectors + r.UpdateAccessPolicy(ctx, application) + + application.FillDefaultsStatus() +} + +func (r *ApplicationReconciler) isClusterReady(ctx context.Context) bool { + if !r.isCrdPresent(ctx, "servicemonitors.monitoring.coreos.com") { + return false + } + return true +} + +func (r *ApplicationReconciler) teamNameForNamespace(ctx context.Context, app *skiperatorv1alpha1.Application) (string, error) { + ns := &corev1.Namespace{} + if err := r.GetClient().Get(ctx, types.NamespacedName{Name: app.Namespace}, ns); err != nil { + return "", err + } + + teamValue := ns.Labels["team"] + if len(teamValue) > 0 { + return teamValue, nil + } + return "", fmt.Errorf("missing value for team label") +} + +// Name in the form of "servicemonitors.monitoring.coreos.com". +func (r *ApplicationReconciler) isCrdPresent(ctx context.Context, name string) bool { + result, err := r.GetApiExtensionsClient().ApiextensionsV1().CustomResourceDefinitions().Get(ctx, name, metav1.GetOptions{}) + if err != nil || result == nil { + return false + } + + return true +} + +func handleApplicationCertRequest(_ context.Context, obj client.Object) []reconcile.Request { + cert, ok := obj.(*certmanagerv1.Certificate) + if !ok { + return nil + } + + isSkiperatorOwned := cert.Labels["app.kubernetes.io/managed-by"] == "skiperator" && + cert.Labels["skiperator.kartverket.no/controller"] == "application" + + requests := make([]reconcile.Request, 0) + + if isSkiperatorOwned { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: cert.Labels["application.skiperator.no/app-namespace"], + Name: cert.Labels["application.skiperator.no/app-name"], + }, + }) + } + + return requests +} + +func isIngressGateway(gateway *networkingv1beta1.Gateway) bool { + match, _ := regexp.MatchString("^.*-ingress-.*$", gateway.Name) + + return match +} + +// TODO should be handled better +func validateIngresses(application *skiperatorv1alpha1.Application) error { + var err error + hosts, err := application.Spec.Hosts() + if err != nil { + return err + } + + // TODO: Remove/rewrite? + for _, h := range hosts.AllHosts() { + if !hostMatchExpression.MatchString(h.Hostname) { + errMessage := fmt.Sprintf("ingress with value '%s' was not valid. ingress must be lower case, contain no spaces, be a non-empty string, and have a hostname/domain separated by a period", h.Hostname) + return errors.NewInvalid(application.GroupVersionKind().GroupKind(), application.Name, field.ErrorList{ + field.Invalid(field.NewPath("application").Child("spec").Child("ingresses"), application.Spec.Ingresses, errMessage), + }) + } + } + return nil +} diff --git a/internal/controllers/common/reconciler.go b/internal/controllers/common/reconciler.go new file mode 100644 index 00000000..57df9dbd --- /dev/null +++ b/internal/controllers/common/reconciler.go @@ -0,0 +1,235 @@ +package common + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/api/v1alpha1/podtypes" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + "github.com/kartverket/skiperator/pkg/resourceprocessor" + "github.com/kartverket/skiperator/pkg/util" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +// ReconcilerBase is a base struct from which all reconcilers can be derived from. By doing so your reconcilers will also inherit a set of utility functions +// To inherit the functionality just build your reconciler this way: +// +// type MyReconciler struct { +// util.ReconcilerBase +// ... other optional fields ... +// } +type ReconcilerBase struct { + client client.Client + extensionsClient *apiextensionsclient.Clientset + scheme *runtime.Scheme + restConfig *rest.Config + recorder record.EventRecorder + processor *resourceprocessor.ResourceProcessor + Logger logr.Logger +} + +func NewReconcilerBase( + client client.Client, + extensionsClient *apiextensionsclient.Clientset, + scheme *runtime.Scheme, + restConfig *rest.Config, + recorder record.EventRecorder, + processor *resourceprocessor.ResourceProcessor, +) ReconcilerBase { + return ReconcilerBase{ + client: client, + extensionsClient: extensionsClient, + scheme: scheme, + restConfig: restConfig, + recorder: recorder, + processor: processor, + } +} + +func NewFromManager(mgr manager.Manager, recorder record.EventRecorder, schemas []unstructured.UnstructuredList) ReconcilerBase { + extensionsClient, err := apiextensionsclient.NewForConfig(mgr.GetConfig()) + if err != nil { + ctrl.Log.Error(err, "could not create extensions client, won't be able to peek at CRDs") + } + processor := resourceprocessor.NewResourceProcessor(mgr.GetClient(), schemas, mgr.GetScheme()) + + return NewReconcilerBase(mgr.GetClient(), extensionsClient, mgr.GetScheme(), mgr.GetConfig(), recorder, processor) +} + +// GetClient returns the underlying client +func (r *ReconcilerBase) GetClient() client.Client { + return r.client +} + +// GetApiExtensionsClient returns the underlying API Extensions client +func (r *ReconcilerBase) GetApiExtensionsClient() *apiextensionsclient.Clientset { + return r.extensionsClient +} + +// GetRestConfig returns the underlying rest config +func (r *ReconcilerBase) GetRestConfig() *rest.Config { + return r.restConfig +} + +// GetRecorder returns the underlying recorder +func (r *ReconcilerBase) GetRecorder() record.EventRecorder { + return r.recorder +} + +// GetScheme returns the scheme +func (r *ReconcilerBase) GetScheme() *runtime.Scheme { + return r.scheme +} + +func (r *ReconcilerBase) GetProcessor() *resourceprocessor.ResourceProcessor { + return r.processor +} + +func (r *ReconcilerBase) EmitWarningEvent(object runtime.Object, reason string, message string) { + r.GetRecorder().Event( + object, + corev1.EventTypeWarning, reason, + message, + ) +} + +func (r *ReconcilerBase) EmitNormalEvent(object runtime.Object, reason string, message string) { + r.GetRecorder().Event( + object, + corev1.EventTypeNormal, reason, + message, + ) +} + +func (r *ReconcilerBase) GetIdentityConfigMap(ctx context.Context) (*corev1.ConfigMap, error) { + namespacedName := types.NamespacedName{Name: "gcp-identity-config", Namespace: "skiperator-system"} + identityConfigMap := &corev1.ConfigMap{} + if err := r.client.Get(ctx, namespacedName, identityConfigMap); err != nil { + return nil, err + } + return identityConfigMap, nil +} + +func (r *ReconcilerBase) IsIstioEnabledForNamespace(ctx context.Context, namespaceName string) bool { + namespace := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceName, + }, + } + + err := r.GetClient().Get(ctx, client.ObjectKeyFromObject(&namespace), &namespace) + if err != nil { + return false + } + + v, exists := namespace.Labels[util.IstioRevisionLabel] + + return exists && len(v) > 0 +} + +func (r *ReconcilerBase) SetSubresourceDefaults(resources []client.Object, skipObj client.Object) error { + for _, resource := range resources { + if err := resourceutils.AddGVK(r.GetScheme(), resource); err != nil { + return err + } + resourceutils.SetCommonAnnotations(resource) + if err := resourceutils.SetOwnerReference(skipObj, resource, r.GetScheme()); err != nil { + return err + } + } + return nil +} + +func (r *ReconcilerBase) SetErrorState(ctx context.Context, skipObj v1alpha1.SKIPObject, err error, message string, reason string) { + r.EmitWarningEvent(skipObj, reason, message) + skipObj.GetStatus().SetSummaryError(message + ": " + err.Error()) + r.updateStatus(ctx, skipObj) +} + +func (r *ReconcilerBase) SetProgressingState(ctx context.Context, skipObj v1alpha1.SKIPObject, message string) { + r.EmitNormalEvent(skipObj, "ReconcileStart", message) + skipObj.GetStatus().SetSummaryProgressing() + r.updateStatus(ctx, skipObj) +} + +func (r *ReconcilerBase) SetSyncedState(ctx context.Context, skipObj v1alpha1.SKIPObject, message string) { + r.EmitNormalEvent(skipObj, "ReconcileEndSuccess", message) + skipObj.GetStatus().SetSummarySynced() + r.updateStatus(ctx, skipObj) +} + +func (r *ReconcilerBase) updateStatus(ctx context.Context, skipObj v1alpha1.SKIPObject) { + latestObj := skipObj.DeepCopyObject().(v1alpha1.SKIPObject) + key := client.ObjectKeyFromObject(skipObj) + + if err := r.GetClient().Get(ctx, key, latestObj); err != nil { + r.Logger.Error(err, "Failed to get latest object version") + } + latestObj.SetStatus(*skipObj.GetStatus()) + if err := r.GetClient().Status().Update(ctx, latestObj); err != nil { + r.Logger.Error(err, "Failed to update status") + } +} + +func (r *ReconcilerBase) getTargetApplication(ctx context.Context, appName string, namespace string) (*v1alpha1.Application, error) { + application := &v1alpha1.Application{} + if err := r.GetClient().Get(ctx, types.NamespacedName{Name: appName, Namespace: namespace}, application); err != nil { + return nil, fmt.Errorf("error when trying to get target application: %w", err) + } + + return application, nil +} + +func (r *ReconcilerBase) UpdateAccessPolicy(ctx context.Context, obj v1alpha1.SKIPObject) { + if obj.GetCommonSpec().AccessPolicy == nil { + return + } + + if obj.GetCommonSpec().AccessPolicy.Outbound != nil { + if err := r.setPortsForRules(ctx, obj.GetCommonSpec().AccessPolicy.Outbound.Rules, obj.GetNamespace()); err != nil { + r.EmitWarningEvent(obj, "InvalidAccessPolicy", fmt.Sprintf("failed to set ports for outbound rules: %s", err.Error())) + } + } +} + +func (r *ReconcilerBase) setPortsForRules(ctx context.Context, rules []podtypes.InternalRule, namespace string) error { + for i := range rules { + rule := &rules[i] + if len(rule.Ports) != 0 { + continue + } + if rule.Namespace != "" { + namespace = rule.Namespace + } else if len(rule.NamespacesByLabel) != 0 { + selector := metav1.LabelSelector{MatchLabels: rule.NamespacesByLabel} + selectorString, _ := metav1.LabelSelectorAsSelector(&selector) + namespaces := &corev1.NamespaceList{} + if err := r.GetClient().List(ctx, namespaces, &client.ListOptions{LabelSelector: selectorString}); err != nil { + return err + } + if len(namespaces.Items) > 1 || len(namespaces.Items) == 0 { + return fmt.Errorf("expected exactly one namespace, but found %d", len(namespaces.Items)) + } + namespace = namespaces.Items[0].Name + } + targetApp, err := r.getTargetApplication(ctx, rule.Application, namespace) + if err != nil { + return err + } + rule.Ports = []networkingv1.NetworkPolicyPort{{Port: util.PointTo(intstr.FromInt32(int32(targetApp.Spec.Port)))}} + } + return nil +} diff --git a/internal/controllers/common/util.go b/internal/controllers/common/util.go new file mode 100644 index 00000000..cc272a5c --- /dev/null +++ b/internal/controllers/common/util.go @@ -0,0 +1,57 @@ +package common + +import ( + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/api/v1alpha1/podtypes" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func DoNotRequeue() (reconcile.Result, error) { + return reconcile.Result{}, nil +} + +// TODO: exponential backoff +func RequeueWithError(err error) (reconcile.Result, error) { + return reconcile.Result{}, err +} + +func ShouldReconcile(obj client.Object) bool { + labels := obj.GetLabels() + return labels["skiperator.kartverket.no/ignore"] != "true" +} + +func IsNamespaceTerminating(namespace *corev1.Namespace) bool { + return namespace.Status.Phase == corev1.NamespaceTerminating +} + +func IsInternalRulesValid(accessPolicy *podtypes.AccessPolicy) bool { + if accessPolicy == nil || accessPolicy.Outbound == nil { + return true + } + + for _, rule := range accessPolicy.Outbound.Rules { + if len(rule.Ports) == 0 { + return false + } + } + + return true +} + +func GetInternalRulesCondition(obj skiperatorv1alpha1.SKIPObject, status metav1.ConditionStatus) metav1.Condition { + message := "Internal rules are valid" + if status == metav1.ConditionFalse { + message = "Internal rules are invalid, applications or namespaces defined might not exist or have invalid ports" + } + return metav1.Condition{ + Type: "InternalRulesValid", + Status: status, + ObservedGeneration: obj.GetGeneration(), + LastTransitionTime: metav1.Now(), + Reason: "ApplicationReconciled", + Message: message, + } +} diff --git a/internal/controllers/common/util_test.go b/internal/controllers/common/util_test.go new file mode 100644 index 00000000..1d32ea71 --- /dev/null +++ b/internal/controllers/common/util_test.go @@ -0,0 +1,16 @@ +package common + +import ( + "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/testutil" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestShouldReconcile(t *testing.T) { + r := testutil.GetTestMinimalAppReconciliation() + app := r.GetSKIPObject().(*v1alpha1.Application) + assert.True(t, ShouldReconcile(app)) + app.Labels["skiperator.kartverket.no/ignore"] = "true" + assert.False(t, ShouldReconcile(app)) +} diff --git a/internal/controllers/namespace.go b/internal/controllers/namespace.go new file mode 100644 index 00000000..3435cd84 --- /dev/null +++ b/internal/controllers/namespace.go @@ -0,0 +1,138 @@ +package controllers + +import ( + "context" + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/internal/controllers/common" + "github.com/kartverket/skiperator/pkg/log" + . "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/github" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/sidecar" + "github.com/kartverket/skiperator/pkg/resourcegenerator/networkpolicy/defaultdeny" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + "github.com/kartverket/skiperator/pkg/util" + istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type NamespaceReconciler struct { + common.ReconcilerBase + Token string + Registry string +} + +//+kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=networking.istio.io,resources=sidecars,verbs=get;list;watch;create;update;patch;delete + +func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.Namespace{}). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&istionetworkingv1beta1.Sidecar{}). + Owns(&corev1.Secret{}, builder.WithPredicates( + util.MatchesPredicate[*corev1.Secret](github.IsImagePullSecret), + )). + Complete(r) +} + +// TODO Move controller to argocd +func (r *NamespaceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + rLog := log.NewLogger().WithName(fmt.Sprintf("namespace-controller: %s", req.Name)) + rLog.Debug("Starting reconcile for request", "requestName", req.Name) + + namespace := &corev1.Namespace{} + err := r.GetClient().Get(ctx, req.NamespacedName, namespace) + if errors.IsNotFound(err) || common.IsNamespaceTerminating(namespace) { + rLog.Debug("Namespace is terminating or not found", "name", namespace.Name) + return common.DoNotRequeue() + } else if err != nil { + rLog.Error(err, "something went wrong fetching the namespace") + r.EmitWarningEvent(namespace, "ReconcileStartFail", "something went wrong fetching the namespace, it might have been deleted") + return common.RequeueWithError(err) + } + + if r.isExcludedNamespace(ctx, namespace.Name) { + rLog.Debug("Namespace is excluded from reconciliation", "name", namespace.Name) + return common.DoNotRequeue() + } + //This is a hack because namespace shouldn't be here. We need this to keep things generic + SKIPNamespace := skiperatorv1alpha1.SKIPNamespace{Namespace: namespace} + + istioEnabled := r.IsIstioEnabledForNamespace(ctx, namespace.Name) + identityConfigMap, err := r.GetIdentityConfigMap(ctx) + if err != nil { + rLog.Error(err, "cant find identity config map") + } + + rLog.Debug("Starting reconciliation", "namespace", namespace.Name) + r.EmitNormalEvent(namespace, "ReconcileStart", fmt.Sprintf("Namespace %v has started reconciliation loop", namespace.Name)) + reconciliation := NewNamespaceReconciliation(ctx, SKIPNamespace, rLog, istioEnabled, r.GetRestConfig(), identityConfigMap) + + ps, err := github.NewImagePullSecret(r.Token, r.Registry) + if err != nil { + rLog.Error(err, "failed to create image pull secret") + return common.RequeueWithError(err) + } + + funcs := []reconciliationFunc{ + ps.Generate, + sidecar.Generate, + defaultdeny.Generate, + } + + for _, f := range funcs { + if err = f(reconciliation); err != nil { + rLog.Error(err, "failed to generate namespace resource") + return common.RequeueWithError(err) + } + } + + if err = r.setResourceDefaults(reconciliation.GetResources(), &SKIPNamespace); err != nil { + rLog.Error(err, "Failed to set namespace resource defaults") + r.EmitWarningEvent(namespace, "ReconcileEndFail", "Failed to set namespace resource defaults") + return common.RequeueWithError(err) + } + + if errs := r.GetProcessor().Process(reconciliation); len(errs) > 0 { + rLog.Error(errs[0], "failed to process resources - returning only the first error", "numberOfErrors", len(errs)) + return common.RequeueWithError(errs[0]) + } + + r.EmitNormalEvent(namespace, "ReconcileEnd", fmt.Sprintf("Namespace %v has finished reconciliation loop", namespace.Name)) + + return common.DoNotRequeue() +} + +func (r *NamespaceReconciler) setResourceDefaults(resources []client.Object, skipns *skiperatorv1alpha1.SKIPNamespace) error { + for _, resource := range resources { + if err := resourceutils.AddGVK(r.GetScheme(), resource); err != nil { + return err + } + resourceutils.SetNamespaceLabels(resource, skipns) + } + return nil +} + +func (r *NamespaceReconciler) isExcludedNamespace(ctx context.Context, namespace string) bool { + configMapNamespacedName := types.NamespacedName{Namespace: "skiperator-system", Name: "namespace-exclusions"} + + namespaceExclusionCMap, err := util.GetConfigMap(r.GetClient(), ctx, configMapNamespacedName) + if err != nil { + util.ErrDoPanic(err, "Something went wrong getting namespace-exclusion config map: %v") + } + + nameSpacesToExclude := namespaceExclusionCMap.Data + + exclusion, keyExists := nameSpacesToExclude[namespace] + + return keyExists && exclusion == "true" +} diff --git a/internal/controllers/routing.go b/internal/controllers/routing.go new file mode 100644 index 00000000..d99afdc1 --- /dev/null +++ b/internal/controllers/routing.go @@ -0,0 +1,231 @@ +package controllers + +import ( + "context" + "fmt" + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/internal/controllers/common" + "github.com/kartverket/skiperator/pkg/log" + . "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/certificate" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/gateway" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/virtualservice" + networkpolicy "github.com/kartverket/skiperator/pkg/resourcegenerator/networkpolicy/dynamic" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=routings;routings/status,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=applications;applications/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=networking.k8s.io,resources=networkpolicies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=networking.istio.io,resources=gateways;virtualservices,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete + +type RoutingReconciler struct { + common.ReconcilerBase +} + +func (r *RoutingReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&skiperatorv1alpha1.Routing{}). + Owns(&istionetworkingv1beta1.Gateway{}). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&istionetworkingv1beta1.VirtualService{}). + Watches(&certmanagerv1.Certificate{}, handler.EnqueueRequestsFromMapFunc(r.skiperatorRoutingCertRequests)). + Watches( + &skiperatorv1alpha1.Application{}, + handler.EnqueueRequestsFromMapFunc(r.skiperatorApplicationsChanges)). + WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})). + Complete(r) +} + +func (r *RoutingReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + rLog := log.NewLogger().WithName(fmt.Sprintf("routing-controller: %s", req.Name)) + rLog.Debug("Starting reconcile for request", "request", req.Name) + + routing, err := r.getRouting(ctx, req) + if routing == nil { + rLog.Info("Routing not found, cleaning up watched resources", "routing", req.Name) + if errs := r.cleanUpWatchedResources(ctx, req.NamespacedName); len(errs) > 0 { + return common.RequeueWithError(fmt.Errorf("error when trying to clean up watched resources: %w", errs[0])) + } + return common.DoNotRequeue() + } else if err != nil { + r.EmitWarningEvent(routing, "ReconcileStartFail", "something went wrong fetching the Routing, it might have been deleted") + return common.RequeueWithError(err) + } + + if !common.ShouldReconcile(routing) { + return common.DoNotRequeue() + } + + if err := r.setDefaultSpec(ctx, routing); err != nil { + rLog.Error(err, "error when trying to set default spec") + r.SetErrorState(ctx, routing, err, "error when trying to set default spec", "DefaultSpecFailure") + return common.RequeueWithError(err) + } + + //Start the actual reconciliation + rLog.Debug("Starting reconciliation loop", "routing", routing.Name) + r.SetProgressingState(ctx, routing, fmt.Sprintf("Routing %v has started reconciliation loop", routing.Name)) + + istioEnabled := r.IsIstioEnabledForNamespace(ctx, routing.Namespace) + identityConfigMap, err := r.GetIdentityConfigMap(ctx) + if err != nil { + rLog.Error(err, "can't find identity config map") + } + + reconciliation := NewRoutingReconciliation(ctx, routing, rLog, istioEnabled, r.GetRestConfig(), identityConfigMap) + resourceGeneration := []reconciliationFunc{ + networkpolicy.Generate, + virtualservice.Generate, + gateway.Generate, + certificate.Generate, + } + + for _, f := range resourceGeneration { + if err := f(reconciliation); err != nil { + rLog.Error(err, "failed to generate routing resource") + //At this point we don't have the gvk of the resource yet, so we can't set subresource status. + r.SetErrorState(ctx, routing, err, "failed to generate routing resource", "ResourceGenerationFailure") + return common.RequeueWithError(err) + } + } + + // We need to do this here, so we are sure it's done. Not setting GVK can cause big issues + if err = r.setRoutingResourceDefaults(reconciliation.GetResources(), routing); err != nil { + rLog.Error(err, "failed to set routing resource defaults") + r.SetErrorState(ctx, routing, err, "failed to set routing resource defaults", "ResourceDefaultsFailure") + return common.RequeueWithError(err) + } + + if errs := r.GetProcessor().Process(reconciliation); len(errs) > 0 { + for _, err = range errs { + rLog.Error(err, "failed to process resource") + r.EmitWarningEvent(routing, "ReconcileEndFail", fmt.Sprintf("Failed to process routing resources: %s", err.Error())) + } + r.SetErrorState(ctx, routing, fmt.Errorf("found %d errors", len(errs)), "failed to process routing resources, see subresource status", "ProcessorFailure") + return common.RequeueWithError(err) + } + + r.SetSyncedState(ctx, routing, "Routing has been reconciled") + + return common.DoNotRequeue() +} + +func (r *RoutingReconciler) getRouting(ctx context.Context, req reconcile.Request) (*skiperatorv1alpha1.Routing, error) { + routing := &skiperatorv1alpha1.Routing{} + if err := r.GetClient().Get(ctx, req.NamespacedName, routing); err != nil { + if errors.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error when trying to get routing: %w", err) + } + + return routing, nil +} + +func (r *RoutingReconciler) cleanUpWatchedResources(ctx context.Context, name types.NamespacedName) []error { + route := &skiperatorv1alpha1.Routing{} + route.SetName(name.Name) + route.SetNamespace(name.Namespace) + + reconciliation := NewRoutingReconciliation(ctx, route, log.NewLogger(), false, nil, nil) + return r.GetProcessor().Process(reconciliation) +} + +// TODO Do this with application too for dynamic port allocation? +func (r *RoutingReconciler) setDefaultSpec(ctx context.Context, routing *skiperatorv1alpha1.Routing) error { + for i := range routing.Spec.Routes { + route := &routing.Spec.Routes[i] // Get a pointer to the route in the slice + if route.Port == 0 { + app, err := r.getTargetApplication(ctx, route.TargetApp, routing.Namespace) + if err != nil { + return err + } + route.Port = int32(app.Spec.Port) + } + } + return nil +} + +func (r *RoutingReconciler) setRoutingResourceDefaults(resources []client.Object, routing *skiperatorv1alpha1.Routing) error { + for _, resource := range resources { + if err := r.SetSubresourceDefaults(resources, routing); err != nil { + return err + } + resourceutils.SetRoutingLabels(resource, routing) + } + return nil +} + +func (r *RoutingReconciler) skiperatorApplicationsChanges(context context.Context, obj client.Object) []reconcile.Request { + application, isApplication := obj.(*skiperatorv1alpha1.Application) + + if !isApplication { + return nil + } + + // List all routings in the same namespace as the application + routesList := &skiperatorv1alpha1.RoutingList{} + if err := r.GetClient().List(context, routesList, &client.ListOptions{Namespace: application.Namespace}); err != nil { + return nil + } + + // Create a list of reconcile.Requests for each Routing in the same namespace as the application + requests := make([]reconcile.Request, 0) + for _, route := range routesList.Items { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: route.Namespace, + Name: route.Name, + }, + }) + } + + return requests +} + +// TODO figure out what this does +// TODO have to do something about the hardcoded labels everywhere +func (r *RoutingReconciler) skiperatorRoutingCertRequests(_ context.Context, obj client.Object) []reconcile.Request { + certificate, isCert := obj.(*certmanagerv1.Certificate) + + if !isCert { + return nil + } + + isSkiperatorRoutingOwned := certificate.Labels["app.kubernetes.io/managed-by"] == "skiperator" && + certificate.Labels["skiperator.kartverket.no/controller"] == "routing" + + requests := make([]reconcile.Request, 0) + + if isSkiperatorRoutingOwned { + requests = append(requests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: certificate.Labels["application.skiperator.no/app-namespace"], + Name: certificate.Labels["application.skiperator.no/app-name"], + }, + }) + } + + return requests +} + +func (r *RoutingReconciler) getTargetApplication(ctx context.Context, appName string, namespace string) (*skiperatorv1alpha1.Application, error) { + application := &skiperatorv1alpha1.Application{} + if err := r.GetClient().Get(ctx, types.NamespacedName{Name: appName, Namespace: namespace}, application); err != nil { + return nil, fmt.Errorf("error when trying to get target application: %w", err) + } + + return application, nil +} diff --git a/internal/controllers/skipjob.go b/internal/controllers/skipjob.go new file mode 100644 index 00000000..3dd3bf76 --- /dev/null +++ b/internal/controllers/skipjob.go @@ -0,0 +1,341 @@ +package controllers + +import ( + "context" + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/internal/controllers/common" + "github.com/kartverket/skiperator/pkg/log" + . "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp/auth" + "github.com/kartverket/skiperator/pkg/resourcegenerator/istio/serviceentry" + "github.com/kartverket/skiperator/pkg/resourcegenerator/job" + networkpolicy "github.com/kartverket/skiperator/pkg/resourcegenerator/networkpolicy/dynamic" + "github.com/kartverket/skiperator/pkg/resourcegenerator/podmonitor" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + "github.com/kartverket/skiperator/pkg/resourcegenerator/serviceaccount" + "github.com/kartverket/skiperator/pkg/util" + istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +const ( + ConditionRunning = "Running" + ConditionFinished = "Finished" + ConditionFailed = "Failed" +) + +// +kubebuilder:rbac:groups=skiperator.kartverket.no,resources=skipjobs;skipjobs/status,verbs=get;list;watch;update +// +kubebuilder:rbac:groups=batch,resources=jobs;cronjobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods;pods/ephemeralcontainers,verbs=get;list;watch;create;update;patch;delete + +// leave an empty line over this comment +type SKIPJobReconciler struct { + common.ReconcilerBase +} + +// TODO Watch applications that are using dynamic port allocation +func (r *SKIPJobReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + // GenerationChangedPredicate is now only applied to the SkipJob itself to allow status changes on Jobs/CronJobs to affect reconcile loops + For(&skiperatorv1alpha1.SKIPJob{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Owns(&batchv1.CronJob{}). + Owns(&batchv1.Job{}). + // This is added as the Jobs created by CronJobs are not owned by the SKIPJob directly, but rather through the CronJob + Watches(&batchv1.Job{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, object client.Object) []reconcile.Request { + batchJob, isJob := object.(*batchv1.Job) + + if !isJob { + return nil + } + + if skipJobName, exists := batchJob.Labels[skiperatorv1alpha1.SKIPJobReferenceLabelKey]; exists { + return []reconcile.Request{ + { + types.NamespacedName{ + Namespace: batchJob.Namespace, + Name: skipJobName, + }, + }, + } + } + + return nil + })). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&istionetworkingv1beta1.ServiceEntry{}). + // Some NetPol entries are not added unless an application is present. If we reconcile all jobs when there has been changes to NetPols, we can assume + // that changes to an Applications AccessPolicy will cause a reconciliation of Jobs + Watches(&networkingv1.NetworkPolicy{}, handler.EnqueueRequestsFromMapFunc(r.getJobsToReconcile)). + Complete(r) +} + +func (r *SKIPJobReconciler) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) { + rLog := log.NewLogger().WithName(fmt.Sprintf("skipjob-controller: %s", req.Name)) + rLog.Debug("Starting reconcile for request", "request", req.Name) + + skipJob, err := r.getSKIPJob(ctx, req) + if skipJob == nil { + return common.DoNotRequeue() + } else if err != nil { + r.EmitWarningEvent(skipJob, "ReconcileStartFail", "something went wrong fetching the SKIPJob, it might have been deleted") + return common.RequeueWithError(err) + } + + tmpSkipJob := skipJob.DeepCopy() + //TODO make sure we don't update the skipjob/application/routing after this step, it will cause endless reconciliations + //check that resource request limit 0.3 doesn't overwrite to 300m + err = r.setSKIPJobDefaults(ctx, skipJob) + if err != nil { + return common.RequeueWithError(err) + } + + specDiff, err := util.GetObjectDiff(tmpSkipJob.Spec, skipJob.Spec) + if err != nil { + return common.RequeueWithError(err) + } + + // If we update the SKIPJob initially on applied defaults before starting reconciling resources we allow all + // updates to be visible even though the controllerDuties may take some time. + if len(specDiff) > 0 { + err := r.GetClient().Update(ctx, skipJob) + return reconcile.Result{Requeue: true}, err + } + + // TODO Removed status diff check here... why do we need that? Causing endless reconcile because timestamps are different (which makes sense) + if err = r.GetClient().Status().Update(ctx, skipJob); err != nil { + return common.RequeueWithError(err) + } + + //Start the actual reconciliation + rLog.Debug("Starting reconciliation loop") + r.SetProgressingState(ctx, skipJob, fmt.Sprintf("SKIPJob %v has started reconciliation loop", skipJob.Name)) + + istioEnabled := r.IsIstioEnabledForNamespace(ctx, skipJob.Namespace) + identityConfigMap, err := r.GetIdentityConfigMap(ctx) + if err != nil { + rLog.Error(err, "can't find identity config map") + } //TODO Error state? + + reconciliation := NewJobReconciliation(ctx, skipJob, rLog, istioEnabled, r.GetRestConfig(), identityConfigMap) + + resourceGeneration := []reconciliationFunc{ + serviceaccount.Generate, + networkpolicy.Generate, + serviceentry.Generate, + auth.Generate, + job.Generate, + podmonitor.Generate, + } + + for _, f := range resourceGeneration { + if err := f(reconciliation); err != nil { + rLog.Error(err, "failed to generate skipjob resource") + //At this point we don't have the gvk of the resource yet, so we can't set subresource status. + r.SetErrorState(ctx, skipJob, err, "failed to generate skipjob resource", "ResourceGenerationFailure") + return common.RequeueWithError(err) + } + } + + if err = r.setResourceDefaults(reconciliation.GetResources(), skipJob); err != nil { + rLog.Error(err, "error when trying to set resource defaults") + r.SetErrorState(ctx, skipJob, err, "failed to set skipjob resource defaults", "ResourceDefaultsFailure") + return common.RequeueWithError(err) + } + + if errs := r.GetProcessor().Process(reconciliation); len(errs) > 0 { + for _, err = range errs { + rLog.Error(err, "failed to process resource") + r.EmitWarningEvent(skipJob, "ReconcileEndFail", fmt.Sprintf("Failed to process skipjob resources: %s", err.Error())) + } + r.SetErrorState(ctx, skipJob, fmt.Errorf("found %d errors", len(errs)), "failed to process skipjob resources, see subresource status", "ProcessorFailure") + return common.RequeueWithError(err) + } + + //TODO consider if we need better handling of status updates in context of summary, conditions and subresources + if err = r.updateConditions(ctx, skipJob); err != nil { + rLog.Error(err, "failed to update conditions") + r.SetErrorState(ctx, skipJob, err, "failed to update conditions", "ConditionsFailure") + return common.RequeueWithError(err) + } + + r.SetSyncedState(ctx, skipJob, "SKIPJob has been reconciled") + + return common.RequeueWithError(err) +} + +func (r *SKIPJobReconciler) getSKIPJob(ctx context.Context, req reconcile.Request) (*skiperatorv1alpha1.SKIPJob, error) { + skipJob := &skiperatorv1alpha1.SKIPJob{} + if err := r.GetClient().Get(ctx, req.NamespacedName, skipJob); err != nil { + if errors.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error when trying to get routing: %w", err) + } + + return skipJob, nil +} + +func (r *SKIPJobReconciler) setSKIPJobDefaults(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) error { + if err := skipJob.FillDefaultSpec(); err != nil { + return fmt.Errorf("error when trying to fill default spec: %w", err) + } + resourceutils.SetSKIPJobLabels(skipJob, skipJob) + skipJob.FillDefaultStatus() + //We try to feed the access policy with port values dynamically, + //if unsuccessfull we just don't set ports, and rely on podselectors + r.UpdateAccessPolicy(ctx, skipJob) + + return nil +} + +func (r *SKIPJobReconciler) setResourceDefaults(resources []client.Object, skipJob *skiperatorv1alpha1.SKIPJob) error { + for _, resource := range resources { + if err := resourceutils.AddGVK(r.GetScheme(), resource); err != nil { + return err + } + resourceutils.SetSKIPJobLabels(resource, skipJob) + if err := resourceutils.SetOwnerReference(skipJob, resource, r.GetScheme()); err != nil { + return err + } + } + return nil +} + +func (r *SKIPJobReconciler) getJobsToReconcile(ctx context.Context, object client.Object) []reconcile.Request { + var jobsToReconcile skiperatorv1alpha1.SKIPJobList + var reconcileRequests []reconcile.Request + + owner := object.GetOwnerReferences() + if len(owner) == 0 { + return reconcileRequests + } + + // Assume only one owner + if owner[0].Kind != "Application" { + return reconcileRequests + } + + err := r.GetClient().List(ctx, &jobsToReconcile) + if err != nil { + return nil + } + for _, j := range jobsToReconcile.Items { + reconcileRequests = append(reconcileRequests, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Namespace: j.Namespace, + Name: j.Name, + }, + }) + } + return reconcileRequests +} + +func (r *SKIPJobReconciler) getConditionRunning(skipJob *skiperatorv1alpha1.SKIPJob, status v1.ConditionStatus) v1.Condition { + return v1.Condition{ + Type: ConditionRunning, + Status: status, + ObservedGeneration: skipJob.Generation, + LastTransitionTime: v1.Now(), + Reason: "JobRunning", + Message: "Job has been created and is now running", + } +} + +func (r *SKIPJobReconciler) getConditionFinished(skipJob *skiperatorv1alpha1.SKIPJob, status v1.ConditionStatus) v1.Condition { + return v1.Condition{ + Type: ConditionFinished, + Status: status, + ObservedGeneration: skipJob.Generation, + LastTransitionTime: v1.Now(), + Reason: "JobFinished", + Message: "Job has finished", + } +} + +func (r *SKIPJobReconciler) getConditionFailed(skipJob *skiperatorv1alpha1.SKIPJob, status v1.ConditionStatus, err *string) v1.Condition { + conditionMessage := "Job failed previous run" + if err != nil { + conditionMessage = fmt.Sprintf("%v: %v", conditionMessage, *err) + } + return v1.Condition{ + Type: ConditionFailed, + Status: status, + ObservedGeneration: skipJob.Generation, + LastTransitionTime: v1.Now(), + Reason: "JobFailed", + Message: conditionMessage, + } +} + +func (r *SKIPJobReconciler) updateConditions(ctx context.Context, skipJob *skiperatorv1alpha1.SKIPJob) error { + jobList := &batchv1.JobList{} + err := r.GetClient().List(ctx, jobList, + client.InNamespace(skipJob.Namespace), + client.MatchingLabels(skipJob.GetDefaultLabels()), + ) + if err != nil { + return fmt.Errorf("failed to list jobs: %w", err) + } + if len(jobList.Items) == 0 { + return nil + } + + //find last job to set conditions, cronjobs have multiple jobs + lastJob := &batchv1.Job{} + for _, liveJob := range jobList.Items { + if lastJob.CreationTimestamp.Before(&liveJob.CreationTimestamp) { + lastJob = &liveJob + } + } + if isFailed, failedJobMessage := isFailedJob(lastJob); isFailed { + skipJob.Status.Conditions = []v1.Condition{ + r.getConditionFailed(skipJob, v1.ConditionTrue, &failedJobMessage), + r.getConditionRunning(skipJob, v1.ConditionFalse), + r.getConditionFinished(skipJob, v1.ConditionFalse), + } + } else if lastJob.Status.CompletionTime != nil { + skipJob.Status.Conditions = []v1.Condition{ + r.getConditionFailed(skipJob, v1.ConditionFalse, nil), + r.getConditionRunning(skipJob, v1.ConditionFalse), + r.getConditionFinished(skipJob, v1.ConditionTrue), + } + } else { + skipJob.Status.Conditions = []v1.Condition{ + r.getConditionFailed(skipJob, v1.ConditionFalse, nil), + r.getConditionRunning(skipJob, v1.ConditionTrue), + r.getConditionFinished(skipJob, v1.ConditionFalse), + } + } + + // Invalid port condition + accessPolicy := skipJob.Spec.Container.AccessPolicy + if accessPolicy != nil && !common.IsInternalRulesValid(accessPolicy) { + skipJob.Status.Conditions = append(skipJob.Status.Conditions, common.GetInternalRulesCondition(skipJob, v1.ConditionFalse)) + } else { + skipJob.Status.Conditions = append(skipJob.Status.Conditions, common.GetInternalRulesCondition(skipJob, v1.ConditionTrue)) + } + + return nil +} + +// think it can be done easier +func isFailedJob(job *batchv1.Job) (bool, string) { + for _, condition := range job.Status.Conditions { + if condition.Type == ConditionFailed && condition.Status == corev1.ConditionTrue { + return true, condition.Message + } + } + return false, "" +} diff --git a/pkg/certs/k8s.go b/pkg/certs/k8s.go deleted file mode 100644 index beb5eb93..00000000 --- a/pkg/certs/k8s.go +++ /dev/null @@ -1,40 +0,0 @@ -package certs - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "github.com/kartverket/skiperator/pkg/util" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func AssertWellKnownTlsCert(c client.Client, ctx context.Context, certName string) (bool, []string, error) { - secret, err := util.GetSecret(c, ctx, types.NamespacedName{Namespace: "istio-gateways", Name: certName}) - if err != nil { - return false, nil, err - } - - if secret.Type != corev1.SecretTypeTLS { - return false, nil, fmt.Errorf("secret %s/%s is not a TLS secret. actual type: %s", secret.Namespace, secret.Name, secret.Type) - } - - certData := secret.Data[corev1.TLSCertKey] - if len(certData) == 0 { - return false, nil, fmt.Errorf("secret %s/%s does not contain a valid certificate", secret.Namespace, secret.Name) - } - - certKeyData := secret.Data[corev1.TLSPrivateKeyKey] - if len(certKeyData) == 0 { - return false, nil, fmt.Errorf("secret %s/%s does not contain a valid private key", secret.Namespace, secret.Name) - } - - cert, err := tls.X509KeyPair(certData, certKeyData) - if err != nil { - return false, nil, fmt.Errorf("failed to parse certificate/key pair: %w", err) - } - - x509.VerifyOptions{} -} diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 00000000..57fe5fea --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,48 @@ +/* + * Thin wrapper for the controller-runtime logger. + * Just to make it easier to log different levels + */ + +package log + +import ( + "github.com/go-logr/logr" + ctrl "sigs.k8s.io/controller-runtime" +) + +type Logger interface { + Error(err error, msg string, keysAndValues ...interface{}) + Info(msg string, keysAndValues ...interface{}) + Debug(msg string, keysAndValues ...interface{}) + WithName(name string) Logger + GetLogger() logr.Logger +} + +type logger struct { + logr.Logger +} + +// TODO add warn +func (l *logger) Error(err error, msg string, keysAndValues ...interface{}) { + l.Logger.Error(err, msg, keysAndValues...) +} + +func (l *logger) Info(msg string, keysAndValues ...interface{}) { + l.Logger.Info(msg, keysAndValues...) +} + +func (l *logger) Debug(msg string, keysAndValues ...interface{}) { + l.Logger.V(1).Info(msg, keysAndValues...) +} + +func (l *logger) WithName(name string) Logger { + return &logger{Logger: l.Logger.WithName(name)} +} + +func (l *logger) GetLogger() logr.Logger { + return l.Logger +} + +func NewLogger() Logger { + return &logger{Logger: ctrl.Log} +} diff --git a/pkg/reconciliation/application.go b/pkg/reconciliation/application.go new file mode 100644 index 00000000..ffc6aa11 --- /dev/null +++ b/pkg/reconciliation/application.go @@ -0,0 +1,32 @@ +package reconciliation + +import ( + "context" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" +) + +type ApplicationReconciliation struct { + baseReconciliation +} + +func NewApplicationReconciliation(ctx context.Context, application *skiperatorv1alpha1.Application, + logger log.Logger, istioEnabled bool, restConfig *rest.Config, + identityConfigMap *corev1.ConfigMap) *ApplicationReconciliation { + return &ApplicationReconciliation{ + baseReconciliation: baseReconciliation{ + ctx: ctx, + logger: logger, + istioEnabled: istioEnabled, + restConfig: restConfig, + identityConfigMap: identityConfigMap, + skipObject: application, + }, + } +} + +func (r *ApplicationReconciliation) GetType() ObjectType { + return ApplicationType +} diff --git a/pkg/reconciliation/namespace.go b/pkg/reconciliation/namespace.go new file mode 100644 index 00000000..1b9918b7 --- /dev/null +++ b/pkg/reconciliation/namespace.go @@ -0,0 +1,32 @@ +package reconciliation + +import ( + "context" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" +) + +type NamespaceReconciliation struct { + baseReconciliation +} + +func NewNamespaceReconciliation(ctx context.Context, namespace skiperatorv1alpha1.SKIPObject, + logger log.Logger, istioEnabled bool, + restConfig *rest.Config, identityConfigMap *corev1.ConfigMap) *NamespaceReconciliation { + return &NamespaceReconciliation{ + baseReconciliation: baseReconciliation{ + ctx: ctx, + logger: logger, + istioEnabled: istioEnabled, + restConfig: restConfig, + identityConfigMap: identityConfigMap, + skipObject: namespace, + }, + } +} + +func (r *NamespaceReconciliation) GetType() ObjectType { + return NamespaceType +} diff --git a/pkg/reconciliation/reconciliation.go b/pkg/reconciliation/reconciliation.go new file mode 100644 index 00000000..8f489fd8 --- /dev/null +++ b/pkg/reconciliation/reconciliation.go @@ -0,0 +1,73 @@ +package reconciliation + +import ( + "context" + "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ObjectType string + +const ( + ApplicationType ObjectType = "Application" + JobType ObjectType = "SKIPJob" + NamespaceType ObjectType = "Namespace" + RoutingType ObjectType = "Routing" +) + +type Reconciliation interface { + GetLogger() log.Logger + GetCtx() context.Context //TODO: remove ctx from this interface + IsIstioEnabled() bool + GetSKIPObject() v1alpha1.SKIPObject + GetType() ObjectType + GetResources() []client.Object + AddResource(client.Object) + GetIdentityConfigMap() *corev1.ConfigMap + GetRestConfig() *rest.Config +} + +type baseReconciliation struct { + ctx context.Context + logger log.Logger + resources []client.Object + istioEnabled bool + restConfig *rest.Config + identityConfigMap *corev1.ConfigMap + skipObject v1alpha1.SKIPObject +} + +func (b *baseReconciliation) GetLogger() log.Logger { + return b.logger +} + +func (b *baseReconciliation) GetCtx() context.Context { + return b.ctx +} + +func (b *baseReconciliation) IsIstioEnabled() bool { + return b.istioEnabled +} + +func (b *baseReconciliation) GetResources() []client.Object { + return b.resources +} + +func (b *baseReconciliation) AddResource(object client.Object) { + b.resources = append(b.resources, object) +} + +func (b *baseReconciliation) GetIdentityConfigMap() *corev1.ConfigMap { + return b.identityConfigMap +} + +func (b *baseReconciliation) GetRestConfig() *rest.Config { + return b.restConfig +} + +func (b *baseReconciliation) GetSKIPObject() v1alpha1.SKIPObject { + return b.skipObject +} diff --git a/pkg/reconciliation/routing.go b/pkg/reconciliation/routing.go new file mode 100644 index 00000000..617f9a6e --- /dev/null +++ b/pkg/reconciliation/routing.go @@ -0,0 +1,32 @@ +package reconciliation + +import ( + "context" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" +) + +type RoutingReconciliation struct { + baseReconciliation +} + +func NewRoutingReconciliation(ctx context.Context, routing *skiperatorv1alpha1.Routing, + logger log.Logger, istioEnabled bool, restConfig *rest.Config, + identityConfigMap *corev1.ConfigMap) *RoutingReconciliation { + return &RoutingReconciliation{ + baseReconciliation: baseReconciliation{ + ctx: ctx, + logger: logger, + istioEnabled: istioEnabled, + restConfig: restConfig, + identityConfigMap: identityConfigMap, + skipObject: routing, + }, + } +} + +func (r *RoutingReconciliation) GetType() ObjectType { + return RoutingType +} diff --git a/pkg/reconciliation/skipjob.go b/pkg/reconciliation/skipjob.go new file mode 100644 index 00000000..d07db5bd --- /dev/null +++ b/pkg/reconciliation/skipjob.go @@ -0,0 +1,30 @@ +package reconciliation + +import ( + "context" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" +) + +type JobReconciliation struct { + baseReconciliation +} + +func NewJobReconciliation(ctx context.Context, job *skiperatorv1alpha1.SKIPJob, logger log.Logger, istioEnabled bool, restConfig *rest.Config, identityConfigMap *corev1.ConfigMap) *JobReconciliation { + return &JobReconciliation{ + baseReconciliation: baseReconciliation{ + ctx: ctx, + logger: logger, + istioEnabled: istioEnabled, + restConfig: restConfig, + identityConfigMap: identityConfigMap, + skipObject: job, + }, + } +} + +func (j *JobReconciliation) GetType() ObjectType { + return JobType +} diff --git a/pkg/resourcegenerator/certificate/application.go b/pkg/resourcegenerator/certificate/application.go new file mode 100644 index 00000000..357f5a84 --- /dev/null +++ b/pkg/resourcegenerator/certificate/application.go @@ -0,0 +1,56 @@ +package certificate + +import ( + "fmt" + + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + v1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.ApplicationType, generateForApplication) +} + +func generateForApplication(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate certificates for application", "application", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("certificate only supports application type") + } + + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + return fmt.Errorf("failed to cast object to Application") + } + + hosts, err := application.Spec.Hosts() + if err != nil { + return fmt.Errorf("failure to get hosts from application: %w", err) + } + + // Generate separate cert for each ingress + for _, h := range hosts.AllHosts() { + if h.UsesCustomCert() { + continue + } + certificateName := fmt.Sprintf("%s-%s-ingress-%x", application.Namespace, application.Name, util.GenerateHashFromName(h.Hostname)) + certificate := certmanagerv1.Certificate{ObjectMeta: metav1.ObjectMeta{Namespace: "istio-gateways", Name: certificateName}} + + certificate.Spec = certmanagerv1.CertificateSpec{ + IssuerRef: v1.ObjectReference{ + Kind: "ClusterIssuer", + Name: "cluster-issuer", // Name defined in https://github.com/kartverket/certificate-management/blob/main/clusterissuer.tf + }, + DNSNames: []string{h.Hostname}, + SecretName: certificateName, + } + r.AddResource(&certificate) + } + ctxLog.Debug("Finished generating certificates for application", "application", application.Name) + return nil +} diff --git a/pkg/resourcegenerator/certificate/certificate.go b/pkg/resourcegenerator/certificate/certificate.go new file mode 100644 index 00000000..2c07ffa9 --- /dev/null +++ b/pkg/resourcegenerator/certificate/certificate.go @@ -0,0 +1,16 @@ +package certificate + +import ( + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils/generator" +) + +const ( + IstioGatewayNamespace = "istio-gateways" +) + +var multiGenerator = generator.NewMulti() + +func Generate(r reconciliation.Reconciliation) error { + return multiGenerator.Generate(r, "Certificate") +} diff --git a/pkg/resourcegenerator/certificate/routing.go b/pkg/resourcegenerator/certificate/routing.go new file mode 100644 index 00000000..a99e0277 --- /dev/null +++ b/pkg/resourcegenerator/certificate/routing.go @@ -0,0 +1,60 @@ +package certificate + +import ( + "fmt" + + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + certmanagermetav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.RoutingType, generateForRouting) +} + +func generateForRouting(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate certificates for routing", "routing", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.RoutingType { + return fmt.Errorf("certificate only supports routing type") + } + routing, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Routing) + if !ok { + return fmt.Errorf("failed to cast object to routing") + } + + h, err := routing.Spec.GetHost() + if err != nil { + return err + } + + // Do not create a new certificate when a custom certificate secret is specified + if h.UsesCustomCert() { + ctxLog.Debug("Skipping certificate generation for routing", "routing", routing.Name, "reason", "custom certificate secret specified") + return nil + } + + certificateName, err := routing.GetCertificateName() + if err != nil { + return err + } + + certificate := certmanagerv1.Certificate{ObjectMeta: metav1.ObjectMeta{Namespace: IstioGatewayNamespace, Name: certificateName}} + + certificate.Spec = certmanagerv1.CertificateSpec{ + IssuerRef: certmanagermetav1.ObjectReference{ + Kind: "ClusterIssuer", + Name: "cluster-issuer", // Name defined in https://github.com/kartverket/certificate-management/blob/main/clusterissuer.tf + }, + DNSNames: []string{h.Hostname}, + SecretName: certificateName, + } + + r.AddResource(&certificate) + + ctxLog.Debug("Finished generating certificates for routing", "routing", routing.Name) + return nil +} diff --git a/pkg/resourcegenerator/core/constants.go b/pkg/resourcegenerator/core/constants.go deleted file mode 100644 index 5a920111..00000000 --- a/pkg/resourcegenerator/core/constants.go +++ /dev/null @@ -1,12 +0,0 @@ -package core - -// Based on https://kubernetes.io/docs/reference/labels-annotations-taints/ - -type SkiperatorTopologyKey string - -const ( - // Hostname is the value populated by the Kubelet. - Hostname SkiperatorTopologyKey = "kubernetes.io/hostname" - // OnPremFailureDomain is populated to the underlying ESXi hostname by the GKE on VMware tooling. - OnPremFailureDomain SkiperatorTopologyKey = "onprem.gke.io/failure-domain-name" -) diff --git a/controllers/application/deployment.go b/pkg/resourcegenerator/deployment/deployment.go similarity index 50% rename from controllers/application/deployment.go rename to pkg/resourcegenerator/deployment/deployment.go index 34bca0c9..bb0d6199 100644 --- a/controllers/application/deployment.go +++ b/pkg/resourcegenerator/deployment/deployment.go @@ -1,29 +1,25 @@ -package applicationcontroller +package deployment import ( - "context" goerrors "errors" "fmt" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/idporten" + "github.com/kartverket/skiperator/pkg/resourcegenerator/maskinporten" + "github.com/kartverket/skiperator/pkg/resourcegenerator/pod" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + "github.com/kartverket/skiperator/pkg/resourcegenerator/volume" "strings" "github.com/go-logr/logr" skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/pkg/resourcegenerator/core" "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp" "github.com/kartverket/skiperator/pkg/util" "golang.org/x/exp/maps" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" ) const ( @@ -32,48 +28,40 @@ const ( DefaultDigdiratorIDportenMountPath = "/var/run/secrets/skip/idporten" ) -var ( - deploymentLog = ctrl.Log.WithName("deployment") -) +// TODO should clean up +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in deployment resource", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate deployment resource") + return err + } + + ctxLog.Debug("Attempting to generate deployment resource for application", "application", application.Name) -func (r *ApplicationReconciler) defineDeployment(ctx context.Context, application *skiperatorv1alpha1.Application) (appsv1.Deployment, error) { deployment := appsv1.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, ObjectMeta: metav1.ObjectMeta{ Namespace: application.Namespace, Name: application.Name, }, } - podOpts := core.PodOpts{ - IstioEnabled: r.IsIstioEnabledForNamespace(ctx, application.Namespace), + podOpts := pod.PodOpts{ + IstioEnabled: r.IsIstioEnabled(), } - skiperatorContainer := core.CreateApplicationContainer(application, podOpts) + skiperatorContainer := pod.CreateApplicationContainer(application, podOpts) var err error - podVolumes, containerVolumeMounts := core.GetContainerVolumeMountsAndPodVolumes(application.Spec.FilesFrom) + podVolumes, containerVolumeMounts := volume.GetContainerVolumeMountsAndPodVolumes(application.Spec.FilesFrom) if util.IsGCPAuthEnabled(application.Spec.GCP) { - gcpIdentityConfigMapNamespacedName := types.NamespacedName{Namespace: "skiperator-system", Name: "gcp-identity-config"} - gcpIdentityConfigMap := corev1.ConfigMap{} - - gcpIdentityConfigMap, err := util.GetConfigMap(r.GetClient(), ctx, gcpIdentityConfigMapNamespacedName) - if !util.ErrIsMissingOrNil( - r.GetRecorder(), - err, - "Cannot find configmap named "+gcpIdentityConfigMapNamespacedName.Name+" in namespace "+gcpIdentityConfigMapNamespacedName.Namespace, - application, - ) { - r.SetControllerError(ctx, application, controllerName, err) - return deployment, err - } - - gcpPodVolume := gcp.GetGCPContainerVolume(gcpIdentityConfigMap.Data["workloadIdentityPool"], application.Name) + gcpPodVolume := gcp.GetGCPContainerVolume(r.GetIdentityConfigMap().Data["workloadIdentityPool"], application.Name) gcpContainerVolumeMount := gcp.GetGCPContainerVolumeMount() gcpEnvVar := gcp.GetGCPEnvVar() @@ -82,11 +70,11 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio skiperatorContainer.Env = append(skiperatorContainer.Env, gcpEnvVar) } - if idportenSpecifiedInSpec(application.Spec.IDPorten) { - secretName, err := getIDPortenSecretName(application.Name) + if idporten.IdportenSpecifiedInSpec(application.Spec.IDPorten) { + secretName, err := idporten.GetIDPortenSecretName(application.Name) if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return deployment, err + ctxLog.Error(err, "could not get idporten secret name") + return err } podVolumes, containerVolumeMounts = appendDigdiratorSecretVolumeMount( &skiperatorContainer, @@ -97,11 +85,11 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio ) } - if maskinportenSpecifiedInSpec(application.Spec.Maskinporten) { - secretName, err := getMaskinportenSecretName(application.Name) + if maskinporten.MaskinportenSpecifiedInSpec(application.Spec.Maskinporten) { + secretName, err := maskinporten.GetMaskinportenSecretName(application.Name) if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return deployment, err + ctxLog.Error(err, "could not get maskinporten secret name") + return err } podVolumes, containerVolumeMounts = appendDigdiratorSecretVolumeMount( &skiperatorContainer, @@ -135,11 +123,10 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio // See // - https://superorbital.io/blog/istio-metrics-merging/ // - https://androidexample365.com/an-example-of-how-istio-metrics-merging-works/ - istioEnabled := r.IsIstioEnabledForNamespace(ctx, application.Namespace) - if istioEnabled { + if r.IsIstioEnabled() { if application.Spec.Prometheus != nil { // If the application has exposed metrics - generatedSpecAnnotations["prometheus.io/port"] = resolveToPortNumber(application.Spec.Prometheus.Port, application) + generatedSpecAnnotations["prometheus.io/port"] = resolveToPortNumber(application.Spec.Prometheus.Port, application, ctxLog.GetLogger()) generatedSpecAnnotations["prometheus.io/path"] = application.Spec.Prometheus.Path } else { // The application doesn't have any custom metrics exposed so we'll disable metrics merging @@ -157,7 +144,7 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio containers = append(containers, skiperatorContainer) if util.IsCloudSqlProxyEnabled(application.Spec.GCP) { - cloudSqlProxyContainer := core.CreateCloudSqlProxyContainer(application.Spec.GCP.CloudSQLProxy) + cloudSqlProxyContainer := pod.CreateCloudSqlProxyContainer(application.Spec.GCP.CloudSQLProxy) containers = append(containers, cloudSqlProxyContainer) } @@ -170,7 +157,7 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio Labels: podTemplateLabels, Annotations: generatedSpecAnnotations, }, - Spec: core.CreatePodSpec( + Spec: pod.CreatePodSpec( containers, podVolumes, application.Name, @@ -181,7 +168,10 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio ), } - r.SetLabelsFromApplication(&podForDeploymentTemplate, *application) + //we need to set the pod labels like this as its a template, not a resource. + //TODO: figure out a smoother solution? + resourceutils.SetApplicationLabels(&podForDeploymentTemplate, application) + resourceutils.SetCommonAnnotations(&podForDeploymentTemplate) deployment.Spec = appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{MatchLabels: util.GetPodAppSelector(application.Name)}, @@ -198,7 +188,7 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio } // Setting replicas to 0 if manifest has replicas set to 0 or replicas.min/max set to 0 - if shouldScaleToZero(application.Spec.Replicas) { + if resourceutils.ShouldScaleToZero(application.Spec.Replicas) { deployment.Spec.Replicas = util.PointTo(int32(0)) } @@ -208,106 +198,37 @@ func (r *ApplicationReconciler) defineDeployment(ctx context.Context, applicatio } else if replicas, err := skiperatorv1alpha1.GetScalingReplicas(application.Spec.Replicas); err == nil { deployment.Spec.Replicas = util.PointTo(int32(replicas.Min)) } else { - r.SetControllerError(ctx, application, controllerName, err) - return deployment, err + ctxLog.Error(err, "could not get replicas from application spec") + return err } } - r.SetLabelsFromApplication(&deployment, *application) - util.SetCommonAnnotations(&deployment) - // add an external link to argocd ingresses := application.Spec.Ingresses - if len(ingresses) > 0 { - deployment.ObjectMeta.Annotations[AnnotationKeyLinkPrefix] = fmt.Sprintf("https://%s", ingresses[0]) - } - - // Set application as owner of the deployment - err = ctrlutil.SetControllerReference(application, &deployment, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return deployment, err + if deployment.Annotations == nil { + deployment.Annotations = make(map[string]string) } - return *r.resolveDigest(ctx, &deployment), nil -} - -func (r *ApplicationReconciler) reconcileDeployment(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "Deployment" - r.SetControllerProgressing(ctx, application, controllerName) - - deployment := appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: application.Name, - Namespace: application.Namespace, - }, - } - deploymentDefinition, err := r.defineDeployment(ctx, application) - - shouldReconcile, err := r.ShouldReconcile(ctx, &deployment) - if err != nil || !shouldReconcile { - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) - return util.RequeueWithError(err) + if len(ingresses) > 0 { + deployment.Annotations[AnnotationKeyLinkPrefix] = fmt.Sprintf("https://%s", ingresses[0]) } - err = r.GetClient().Get(ctx, client.ObjectKeyFromObject(&deployment), &deployment) + err = util.ResolveImageTags(r.GetCtx(), ctxLog.GetLogger(), r.GetRestConfig(), &deployment) if err != nil { - if errors.IsNotFound(err) { - r.EmitNormalEvent(application, "NotFound", fmt.Sprintf("deployment resource for application %s not found, creating deployment", application.Name)) - err = r.GetClient().Create(ctx, &deploymentDefinition) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } else { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } - } else { - if !shouldScaleToZero(application.Spec.Replicas) && skiperatorv1alpha1.IsHPAEnabled(application.Spec.Replicas) { - // Ignore replicas set by HPA when checking diff - if int32(*deployment.Spec.Replicas) > 0 { - deployment.Spec.Replicas = nil - } - } - - // The command "kubectl rollout restart" puts an annotation on the deployment template in order to track - // rollouts of different replicasets. This annotation must not trigger a new reconcile, and a quick and easy - // fix is to just remove it from the map before hashing and checking the diff. - if _, rolloutIssued := deployment.Spec.Template.Annotations["kubectl.kubernetes.io/restartedAt"]; rolloutIssued { - delete(deployment.Spec.Template.Annotations, "kubectl.kubernetes.io/restartedAt") - } - - deployment = *r.resolveDigest(ctx, &deployment) - - if diffBetween(deployment, deploymentDefinition) { - patch := client.MergeFrom(deployment.DeepCopy()) - err = r.GetClient().Patch(ctx, &deploymentDefinition, patch) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return util.RequeueWithError(err) - } + //TODO fix this + // Exclude dummy image used in tests for decreased verbosity + if !strings.Contains(err.Error(), "https://index.docker.io/v2/library/image/manifests/latest") { + ctxLog.Error(err, "could not resolve container image to digest") + return err } } - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) + r.AddResource(&deployment) - return util.RequeueWithError(err) + ctxLog.Debug("successfully created deployment resource") + return nil } -func (r *ApplicationReconciler) resolveDigest(ctx context.Context, input *appsv1.Deployment) *appsv1.Deployment { - res, err := util.ResolveImageTags(ctx, logr.Discard(), r.GetRestConfig(), input) - if err != nil { - // Exclude dummy image used in tests for decreased verbosity - if !strings.Contains(err.Error(), "https://index.docker.io/v2/library/image/manifests/latest") { - deploymentLog.Error(err, "could not resolve container image to digest") - } - return input - } - // FIXME: Consider setting imagePullPolicy=IfNotPresent when the image has been resolved to - // a digest in order to reduce registry usage and spin-up times. - return res -} func appendDigdiratorSecretVolumeMount(skiperatorContainer *corev1.Container, volumeMounts []corev1.VolumeMount, volumes []corev1.Volume, secretName string, mountPath string) ([]corev1.Volume, []corev1.VolumeMount) { skiperatorContainer.EnvFrom = append(skiperatorContainer.EnvFrom, corev1.EnvFromSource{ SecretRef: &corev1.SecretEnvSource{ @@ -347,19 +268,7 @@ func getRollingUpdateStrategy(updateStrategy string) *appsv1.RollingUpdateDeploy } } -func shouldScaleToZero(jsonReplicas *apiextensionsv1.JSON) bool { - replicas, err := skiperatorv1alpha1.GetStaticReplicas(jsonReplicas) - if err == nil && replicas == 0 { - return true - } - replicasStruct, err := skiperatorv1alpha1.GetScalingReplicas(jsonReplicas) - if err == nil && (replicasStruct.Min == 0 || replicasStruct.Max == 0) { - return true - } - return false -} - -func resolveToPortNumber(port intstr.IntOrString, application *skiperatorv1alpha1.Application) string { +func resolveToPortNumber(port intstr.IntOrString, application *skiperatorv1alpha1.Application, ctxLog logr.Logger) string { if numericPort := port.IntValue(); numericPort > 0 { return fmt.Sprintf("%d", numericPort) } @@ -376,21 +285,6 @@ func resolveToPortNumber(port intstr.IntOrString, application *skiperatorv1alpha } } - deploymentLog.Error(goerrors.New("port not found"), "could not resolve port name to a port number", "desiredPortName", desiredPortName) + ctxLog.Error(goerrors.New("port not found"), "could not resolve port name to a port number", "desiredPortName", desiredPortName) return desiredPortName } - -func diffBetween(deployment appsv1.Deployment, definition appsv1.Deployment) bool { - deploymentHash := util.GetHashForStructs([]interface{}{&deployment.Spec, &deployment.Labels}) - deploymentDefinitionHash := util.GetHashForStructs([]interface{}{&definition.Spec, &definition.Labels}) - if deploymentHash != deploymentDefinitionHash { - return true - } - - // Same mechanism as "pod-template-hash" - if apiequality.Semantic.DeepEqual(deployment.DeepCopy().Spec, definition.DeepCopy().Spec) { - return false - } - - return true -} diff --git a/pkg/resourcegenerator/deployment/deployment_test.go b/pkg/resourcegenerator/deployment/deployment_test.go new file mode 100644 index 00000000..c38590e5 --- /dev/null +++ b/pkg/resourcegenerator/deployment/deployment_test.go @@ -0,0 +1,23 @@ +package deployment + +import ( + "github.com/kartverket/skiperator/pkg/testutil" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + "testing" +) + +func TestDeploymentMinimalAppShouldHaveLabels(t *testing.T) { + // Setup + r := testutil.GetTestMinimalAppReconciliation() + // Test + err := Generate(r) + + // Assert + assert.Nil(t, err) + assert.Equal(t, 1, len(r.GetResources())) + depl := r.GetResources()[0].(*appsv1.Deployment) + appLabel := map[string]string{"app": "minimal"} + assert.Equal(t, appLabel["app"], depl.Spec.Selector.MatchLabels["app"]) + assert.Equal(t, appLabel["app"], depl.Spec.Template.Labels["app"]) +} diff --git a/pkg/resourcegenerator/gcp/auth/configmap.go b/pkg/resourcegenerator/gcp/auth/configmap.go new file mode 100644 index 00000000..f90b7a9d --- /dev/null +++ b/pkg/resourcegenerator/gcp/auth/configmap.go @@ -0,0 +1,77 @@ +package auth + +import ( + "encoding/json" + "fmt" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + CredentialsMountPath = "/var/run/secrets/tokens/gcp-ksa" +) + +type WorkloadIdentityCredentials struct { + Type string `json:"type"` + Audience string `json:"audience"` + ServiceAccountImpersonationUrl string `json:"service_account_impersonation_url"` + SubjectTokenType string `json:"subject_token_type"` + TokenUrl string `json:"token_url"` + CredentialSource CredentialSource `json:"credential_source"` +} + +type CredentialSource struct { + File string `json:"file"` +} + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + + if r.GetType() == reconciliation.ApplicationType || r.GetType() == reconciliation.JobType { + return getConfigMap(r, r.GetIdentityConfigMap()) + } else { + err := fmt.Errorf("unsupported type %s in gcp configmap", r.GetType()) + ctxLog.Error(err, "Failed to generate gcp configmap") + return err + } +} + +func getConfigMap(r reconciliation.Reconciliation, gcpIdentityConfigMap *corev1.ConfigMap) error { + if r.GetSKIPObject().GetCommonSpec().GCP == nil || r.GetSKIPObject().GetCommonSpec().GCP.Auth.ServiceAccount == "" { + return nil + } + + ctxLog := r.GetLogger() + ctxLog.Debug("Generating gcp configmap", "type", r.GetType()) + + object := r.GetSKIPObject() + gcpAuthConfigMapName := gcp.GetGCPConfigMapName(object.GetName()) + gcpConfigMap := corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: object.GetNamespace(), Name: gcpAuthConfigMapName}} + + credentials := WorkloadIdentityCredentials{ + Type: "external_account", + Audience: "identitynamespace:" + gcpIdentityConfigMap.Data["workloadIdentityPool"] + ":" + gcpIdentityConfigMap.Data["identityProvider"], + ServiceAccountImpersonationUrl: "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/" + r.GetSKIPObject().GetCommonSpec().GCP.Auth.ServiceAccount + ":generateAccessToken", + SubjectTokenType: "urn:ietf:params:oauth:token-type:jwt", + TokenUrl: "https://sts.googleapis.com/v1/token", + CredentialSource: CredentialSource{ + File: fmt.Sprintf("%v/token", CredentialsMountPath), + }, + } + + credentialsBytes, err := json.Marshal(credentials) + if err != nil { + ctxLog.Error(err, "could not marshall gcp identity config map") + return err + } + + gcpConfigMap.Data = map[string]string{ + "config": string(credentialsBytes), + } + r.AddResource(&gcpConfigMap) + + ctxLog.Debug("Finished generating configmap", "type", r.GetType(), "name", object.GetName()) + return nil +} diff --git a/pkg/resourcegenerator/gcp/workload_identity.go b/pkg/resourcegenerator/gcp/workload_identity.go index 3f35ce41..d9a68349 100644 --- a/pkg/resourcegenerator/gcp/workload_identity.go +++ b/pkg/resourcegenerator/gcp/workload_identity.go @@ -1,15 +1,12 @@ package gcp import ( - "context" - "encoding/json" "fmt" "github.com/kartverket/skiperator/pkg/util" corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/log" ) +// TODO move to a more suitable pkg var ( CredentialsMountPath = "/var/run/secrets/tokens/gcp-ksa" CredentialsFileName = "google-application-credentials.json" @@ -17,48 +14,6 @@ var ( ServiceAccountTokenExpiration = int64(60 * 60 * 24 * 2) // Two days ) -type WorkloadIdentityCredentials struct { - Type string `json:"type"` - Audience string `json:"audience"` - ServiceAccountImpersonationUrl string `json:"service_account_impersonation_url"` - SubjectTokenType string `json:"subject_token_type"` - TokenUrl string `json:"token_url"` - CredentialSource CredentialSource `json:"credential_source"` -} -type CredentialSource struct { - File string `json:"file"` -} - -func GetGoogleServiceAccountCredentialsConfigMap(ctx context.Context, namespace string, name string, gcpServiceAccount string, workloadIdentityConfigMap corev1.ConfigMap) (corev1.ConfigMap, error) { - logger := log.FromContext(ctx) - gcpConfigMap := corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} - - credentials := WorkloadIdentityCredentials{ - Type: "external_account", - Audience: "identitynamespace:" + workloadIdentityConfigMap.Data["workloadIdentityPool"] + ":" + workloadIdentityConfigMap.Data["identityProvider"], - ServiceAccountImpersonationUrl: "https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/" + gcpServiceAccount + ":generateAccessToken", - SubjectTokenType: "urn:ietf:params:oauth:token-type:jwt", - TokenUrl: "https://sts.googleapis.com/v1/token", - CredentialSource: CredentialSource{ - File: fmt.Sprintf("%v/token", CredentialsMountPath), - }, - } - - gcpConfigMap.ObjectMeta.Annotations = util.CommonAnnotations - - credentialsBytes, err := json.Marshal(credentials) - if err != nil { - logger.Error(err, "could not marshall gcp identity config map") - return corev1.ConfigMap{}, err - } - - gcpConfigMap.Data = map[string]string{ - "config": string(credentialsBytes), - } - - return gcpConfigMap, nil -} - func GetGCPConfigMapName(ownerName string) string { return ownerName + "-gcp-auth" } diff --git a/pkg/resourcegenerator/github/image_pull_secret.go b/pkg/resourcegenerator/github/image_pull_secret.go new file mode 100644 index 00000000..7b0f9aca --- /dev/null +++ b/pkg/resourcegenerator/github/image_pull_secret.go @@ -0,0 +1,61 @@ +package github + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/kartverket/skiperator/pkg/reconciliation" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type imagePullSecret struct { + payload []byte +} + +func NewImagePullSecret(token, registry string) (*imagePullSecret, error) { + cfg := dockerConfigJson{} + cfg.Auths = make(map[string]dockerConfigAuth, 1) + auth := dockerConfigAuth{} + auth.Auth = token + cfg.Auths[registry] = auth + + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + err := enc.Encode(cfg) + if err != nil { + return nil, err + } + + return &imagePullSecret{payload: buf.Bytes()}, nil +} + +func (ips *imagePullSecret) Generate(r reconciliation.Reconciliation) error { + if r.GetType() != reconciliation.NamespaceType { + return fmt.Errorf("image pull secret only supports namespace type") + } + // SKIPObject here is a namespace, so thats why we use GetName, not GetNamespace. + // Should NOT be called from any other controller than namespace-controller + secret := corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: r.GetSKIPObject().GetName(), Name: "github-auth"}} + secret.Type = corev1.SecretTypeDockerConfigJson + + secret.Data = make(map[string][]byte, 1) + secret.Data[".dockerconfigjson"] = ips.payload + + r.AddResource(&secret) + return nil +} + +// IsImagePullSecret filters for secrets named github-auth +func IsImagePullSecret(secret *corev1.Secret) bool { + return secret.Name == "github-auth" +} + +type dockerConfigJson struct { + Auths map[string]dockerConfigAuth `json:"auths"` +} + +type dockerConfigAuth struct { + Auth string `json:"auth"` +} diff --git a/pkg/resourcegenerator/hpa/horizontal_pod_autoscaler.go b/pkg/resourcegenerator/hpa/horizontal_pod_autoscaler.go new file mode 100644 index 00000000..c7e5eb57 --- /dev/null +++ b/pkg/resourcegenerator/hpa/horizontal_pod_autoscaler.go @@ -0,0 +1,64 @@ +package hpa + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + "github.com/kartverket/skiperator/pkg/util" + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in horizontal pod autoscaler", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate horizontal pod autoscaler") + return err + } + + ctxLog.Debug("Attempting to generate HPA for application", "application", application.Name) + + if resourceutils.ShouldScaleToZero(application.Spec.Replicas) || !skiperatorv1alpha1.IsHPAEnabled(application.Spec.Replicas) { + ctxLog.Debug("Skipping horizontal pod autoscaler generation for application") + return nil + } + + horizontalPodAutoscaler := autoscalingv2.HorizontalPodAutoscaler{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} + + replicas, err := skiperatorv1alpha1.GetScalingReplicas(application.Spec.Replicas) + if err != nil { + return err + } + + horizontalPodAutoscaler.Spec = autoscalingv2.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: application.Name, + }, + MinReplicas: util.PointTo(int32(replicas.Min)), + MaxReplicas: int32(replicas.Max), + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: util.PointTo(int32(replicas.TargetCpuUtilization)), + }, + }, + }, + }, + } + + r.AddResource(&horizontalPodAutoscaler) + + return nil +} diff --git a/controllers/application/idporten.go b/pkg/resourcegenerator/idporten/idporten.go similarity index 73% rename from controllers/application/idporten.go rename to pkg/resourcegenerator/idporten/idporten.go index 341306f9..cff0d91f 100644 --- a/controllers/application/idporten.go +++ b/pkg/resourcegenerator/idporten/idporten.go @@ -1,22 +1,18 @@ -package applicationcontroller +package idporten import ( - "context" - "github.com/kartverket/skiperator/api/v1alpha1/digdirator" - "net/url" - "path" - + "fmt" skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/api/v1alpha1/digdirator" + "github.com/kartverket/skiperator/pkg/reconciliation" "github.com/kartverket/skiperator/pkg/util" "github.com/kartverket/skiperator/pkg/util/array" - naisiov1 "github.com/nais/liberator/pkg/apis/nais.io/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - digdiratorClients "github.com/nais/digdirator/pkg/clients" digdiratorTypes "github.com/nais/digdirator/pkg/digdir/types" + naisiov1 "github.com/nais/liberator/pkg/apis/nais.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "net/url" + "path" ) const ( @@ -26,9 +22,21 @@ const ( KVBaseURL = "https://kartverket.no" ) -func (r *ApplicationReconciler) reconcileIDPorten(ctx context.Context, application *skiperatorv1alpha1.Application) (reconcile.Result, error) { - controllerName := "IDPorten" - r.SetControllerProgressing(ctx, application, controllerName) +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in idporten resource", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate idporten resource") + return err + } + if application.Spec.IDPorten == nil { + return nil + } + ctxLog.Debug("Attempting to generate id porten resource for application", "application", application.Name) var err error @@ -43,38 +51,15 @@ func (r *ApplicationReconciler) reconcileIDPorten(ctx context.Context, applicati }, } - if idportenSpecifiedInSpec(application.Spec.IDPorten) { - _, err = ctrlutil.CreateOrPatch(ctx, r.GetClient(), &idporten, func() error { - // Set application as owner of the sidecar - err := ctrlutil.SetControllerReference(application, &idporten, r.GetScheme()) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return err - } - - r.SetLabelsFromApplication(&idporten, *application) - util.SetCommonAnnotations(&idporten) - - idporten.Spec, err = getIDPortenSpec(application) - return err - }) - - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return reconcile.Result{}, err - } - } else { - err = r.GetClient().Delete(ctx, &idporten) - err = client.IgnoreNotFound(err) - if err != nil { - r.SetControllerError(ctx, application, controllerName, err) - return reconcile.Result{}, err - } + idporten.Spec, err = getIDPortenSpec(application) + if err != nil { + return err } - r.SetControllerFinishedOutcome(ctx, application, controllerName, err) + r.AddResource(&idporten) + ctxLog.Debug("Finished generating id porten resource for application", "application", application.Name) - return reconcile.Result{}, err + return nil } // Assumes application.Spec.IDPorten != nil @@ -91,14 +76,20 @@ func getIDPortenSpec(application *skiperatorv1alpha1.Application) (naisiov1.IDPo } ingress := KVBaseURL - if len(application.Spec.Ingresses) != 0 { - ingress = application.Spec.Ingresses[0] + hosts, err := application.Spec.Hosts() + if err != nil { + return naisiov1.IDPortenClientSpec{}, err + } + + ingresses := hosts.Hostnames() + if len(ingresses) != 0 { + ingress = ingresses[0] } ingress = util.EnsurePrefix(ingress, "https://") scopes := getScopes(integrationType, application.Spec.IDPorten.Scopes) - redirectURIs, err := buildURIs(application.Spec.Ingresses, application.Spec.IDPorten.RedirectPath, DefaultClientCallbackPath) + redirectURIs, err := buildURIs(ingresses, application.Spec.IDPorten.RedirectPath, DefaultClientCallbackPath) if err != nil { return naisiov1.IDPortenClientSpec{}, nil } @@ -113,7 +104,7 @@ func getIDPortenSpec(application *skiperatorv1alpha1.Application) (naisiov1.IDPo return naisiov1.IDPortenClientSpec{}, nil } - secretName, err := getIDPortenSecretName(application.Name) + secretName, err := GetIDPortenSecretName(application.Name) if err != nil { return naisiov1.IDPortenClientSpec{}, err } @@ -207,10 +198,10 @@ func buildURIs(ingresses []string, pathSeg string, fallback string) ([]naisiov1. }) } -func idportenSpecifiedInSpec(mp *digdirator.IDPorten) bool { +func IdportenSpecifiedInSpec(mp *digdirator.IDPorten) bool { return mp != nil && mp.Enabled } -func getIDPortenSecretName(name string) (string, error) { +func GetIDPortenSecretName(name string) (string, error) { return util.GetSecretName("idporten", name) } diff --git a/pkg/resourcegenerator/istio/authorizationpolicy/authorization_policy.go b/pkg/resourcegenerator/istio/authorizationpolicy/authorization_policy.go new file mode 100644 index 00000000..c7fc1e75 --- /dev/null +++ b/pkg/resourcegenerator/istio/authorizationpolicy/authorization_policy.go @@ -0,0 +1,92 @@ +package authorizationpolicy + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + securityv1beta1api "istio.io/api/security/v1beta1" + typev1beta1 "istio.io/api/type/v1beta1" + securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in AuthorizationPolicy", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate AuthorizationPolicy") + return err + } + ctxLog.Debug("Attempting to generate AuthorizationPolicy for application", "application", application.Name) + + defaultDenyPaths := []string{ + "/actuator*", + } + defaultDenyAuthPolicy := getDefaultDenyPolicy(application, defaultDenyPaths) + + if application.Spec.AuthorizationSettings != nil { + if application.Spec.AuthorizationSettings.AllowAll == true { + return nil + } + } + + if application.Spec.AuthorizationSettings != nil { + + // As of now we only use one rule and one operation for all default denies. No need to loop over them all + defaultDenyToOperation := defaultDenyAuthPolicy.Spec.Rules[0].To[0].Operation + defaultDenyToOperation.NotPaths = nil + + if len(application.Spec.AuthorizationSettings.AllowList) > 0 { + for _, endpoint := range application.Spec.AuthorizationSettings.AllowList { + defaultDenyToOperation.NotPaths = append(defaultDenyToOperation.NotPaths, endpoint) + } + } + } + + ctxLog.Debug("Finished generating AuthorizationPolicy for application", "application", application.Name) + r.AddResource(&defaultDenyAuthPolicy) + + return nil +} + +func getGeneralFromRule() []*securityv1beta1api.Rule_From { + return []*securityv1beta1api.Rule_From{ + { + Source: &securityv1beta1api.Source{ + Namespaces: []string{"istio-gateways"}, + }, + }, + } +} + +func getDefaultDenyPolicy(application *skiperatorv1alpha1.Application, denyPaths []string) securityv1beta1.AuthorizationPolicy { + return securityv1beta1.AuthorizationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: application.Namespace, + Name: application.Name + "-deny", + }, + Spec: securityv1beta1api.AuthorizationPolicy{ + Action: securityv1beta1api.AuthorizationPolicy_DENY, + Rules: []*securityv1beta1api.Rule{ + { + To: []*securityv1beta1api.Rule_To{ + { + Operation: &securityv1beta1api.Operation{ + Paths: denyPaths, + }, + }, + }, + From: getGeneralFromRule(), + }, + }, + Selector: &typev1beta1.WorkloadSelector{ + MatchLabels: util.GetPodAppSelector(application.Name), + }, + }, + } +} diff --git a/pkg/resourcegenerator/istio/gateway/application.go b/pkg/resourcegenerator/istio/gateway/application.go new file mode 100644 index 00000000..51460698 --- /dev/null +++ b/pkg/resourcegenerator/istio/gateway/application.go @@ -0,0 +1,80 @@ +package gateway + +import ( + "fmt" + + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + networkingv1beta1api "istio.io/api/networking/v1beta1" + networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.ApplicationType, generateForApplication) +} + +func generateForApplication(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate gateway for application", "objectname", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("gateway only supports Application type") + } + + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + return fmt.Errorf("failed to cast object to Application") + } + + hosts, err := application.Spec.Hosts() + if err != nil { + return fmt.Errorf("failure to get hosts from application: %w", err) + } + + // Generate separate gateway for each ingress + for _, h := range hosts.AllHosts() { + name := fmt.Sprintf("%s-ingress-%x", application.Name, util.GenerateHashFromName(h.Hostname)) + gateway := networkingv1beta1.Gateway{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: name}} + + gateway.Spec.Selector = util.GetIstioGatewayLabelSelector(h.Hostname) + + gatewayServersToAdd := []*networkingv1beta1api.Server{} + + baseHttpGatewayServer := &networkingv1beta1api.Server{ + Hosts: []string{h.Hostname}, + Port: &networkingv1beta1api.Port{ + Number: 80, + Name: "http", + Protocol: "HTTP", + }, + } + + determinedCredentialName := application.Namespace + "-" + name + if h.UsesCustomCert() { + determinedCredentialName = *h.CustomCertificateSecret + } + + httpsGatewayServer := &networkingv1beta1api.Server{ + Hosts: []string{h.Hostname}, + Port: &networkingv1beta1api.Port{ + Number: 443, + Name: "https", + Protocol: "HTTPS", + }, + Tls: &networkingv1beta1api.ServerTLSSettings{ + Mode: networkingv1beta1api.ServerTLSSettings_SIMPLE, + CredentialName: determinedCredentialName, + }, + } + + gatewayServersToAdd = append(gatewayServersToAdd, baseHttpGatewayServer, httpsGatewayServer) + + gateway.Spec.Servers = gatewayServersToAdd + r.AddResource(&gateway) + } + + ctxLog.Debug("Finished generating ingress gateways for application", "application", application.Name) + return nil +} diff --git a/pkg/resourcegenerator/istio/gateway/gateway.go b/pkg/resourcegenerator/istio/gateway/gateway.go new file mode 100644 index 00000000..6c0e275a --- /dev/null +++ b/pkg/resourcegenerator/istio/gateway/gateway.go @@ -0,0 +1,12 @@ +package gateway + +import ( + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils/generator" +) + +var multiGenerator = generator.NewMulti() + +func Generate(r reconciliation.Reconciliation) error { + return multiGenerator.Generate(r, "Gateway") +} diff --git a/pkg/resourcegenerator/istio/gateway/routing.go b/pkg/resourcegenerator/istio/gateway/routing.go new file mode 100644 index 00000000..714dbc87 --- /dev/null +++ b/pkg/resourcegenerator/istio/gateway/routing.go @@ -0,0 +1,76 @@ +package gateway + +import ( + "fmt" + + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + networkingv1beta1api "istio.io/api/networking/v1beta1" + networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.RoutingType, generateForRouting) +} + +func generateForRouting(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate gateway for routing", "routing", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.RoutingType { + return fmt.Errorf("gateway only supports routing type") + } + routing, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Routing) + if !ok { + return fmt.Errorf("failed to cast object to routing") + } + + h, err := routing.Spec.GetHost() + if err != nil { + return err + } + + gateway := networkingv1beta1.Gateway{ObjectMeta: metav1.ObjectMeta{Namespace: routing.Namespace, Name: routing.GetGatewayName()}} + + var determinedCredentialName string + if h.UsesCustomCert() { + determinedCredentialName = *h.CustomCertificateSecret + } else { + determinedCredentialName, err = routing.GetCertificateName() + if err != nil { + return err + } + } + + gateway.Spec.Selector = util.GetIstioGatewayLabelSelector(h.Hostname) + gateway.Spec.Servers = []*networkingv1beta1api.Server{ + { + Hosts: []string{h.Hostname}, + Port: &networkingv1beta1api.Port{ + Number: 80, + Name: "http", + Protocol: "HTTP", + }, + }, + { + Hosts: []string{h.Hostname}, + Port: &networkingv1beta1api.Port{ + Number: 443, + Name: "https", + Protocol: "HTTPS", + }, + Tls: &networkingv1beta1api.ServerTLSSettings{ + Mode: networkingv1beta1api.ServerTLSSettings_SIMPLE, + CredentialName: determinedCredentialName, + }, + }, + } + + r.AddResource(&gateway) + + ctxLog.Debug("Finished generating ingress gateways for routing", "routing", routing.Name) + return nil + +} diff --git a/pkg/resourcegenerator/istio/peer_authentication.go b/pkg/resourcegenerator/istio/peer_authentication.go deleted file mode 100644 index 691fcfc9..00000000 --- a/pkg/resourcegenerator/istio/peer_authentication.go +++ /dev/null @@ -1,18 +0,0 @@ -package istio - -import ( - "github.com/kartverket/skiperator/pkg/util" - securityv1beta1api "istio.io/api/security/v1beta1" - typev1beta1 "istio.io/api/type/v1beta1" -) - -func GetPeerAuthentication(ownerName string) securityv1beta1api.PeerAuthentication { - return securityv1beta1api.PeerAuthentication{ - Selector: &typev1beta1.WorkloadSelector{ - MatchLabels: util.GetPodAppSelector(ownerName), - }, - Mtls: &securityv1beta1api.PeerAuthentication_MutualTLS{ - Mode: securityv1beta1api.PeerAuthentication_MutualTLS_STRICT, - }, - } -} diff --git a/pkg/resourcegenerator/istio/peerauthentication/peer_authentication.go b/pkg/resourcegenerator/istio/peerauthentication/peer_authentication.go new file mode 100644 index 00000000..342bcff1 --- /dev/null +++ b/pkg/resourcegenerator/istio/peerauthentication/peer_authentication.go @@ -0,0 +1,43 @@ +package peerauthentication + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + securityv1beta1api "istio.io/api/security/v1beta1" + typev1beta1 "istio.io/api/type/v1beta1" + securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in peer authentication", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate peer authentication") + return err + } + ctxLog.Debug("Attempting to generate peer authentication for application", "application", application.Name) + + peerAuthentication := securityv1beta1.PeerAuthentication{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} + + peerAuthentication.Spec = securityv1beta1api.PeerAuthentication{ + Selector: &typev1beta1.WorkloadSelector{ + MatchLabels: util.GetPodAppSelector(application.Name), + }, + Mtls: &securityv1beta1api.PeerAuthentication_MutualTLS{ + Mode: securityv1beta1api.PeerAuthentication_MutualTLS_STRICT, + }, + } + + ctxLog.Debug("Finished generating peer authentication for application", "application", application.Name) + + r.AddResource(&peerAuthentication) + + return nil +} diff --git a/pkg/resourcegenerator/istio/service_entry.go b/pkg/resourcegenerator/istio/serviceentry/serviceentry.go similarity index 71% rename from pkg/resourcegenerator/istio/service_entry.go rename to pkg/resourcegenerator/istio/serviceentry/serviceentry.go index 47b8d741..2a7d60a0 100644 --- a/pkg/resourcegenerator/istio/service_entry.go +++ b/pkg/resourcegenerator/istio/serviceentry/serviceentry.go @@ -1,12 +1,12 @@ -package istio +package serviceentry import ( "errors" "fmt" skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" "github.com/kartverket/skiperator/api/v1alpha1/podtypes" + "github.com/kartverket/skiperator/pkg/reconciliation" "github.com/kartverket/skiperator/pkg/util" - "golang.org/x/exp/slices" networkingv1beta1api "istio.io/api/networking/v1beta1" networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -14,46 +14,31 @@ import ( "strings" ) -func setCloudSqlRule(accessPolicy *podtypes.AccessPolicy, object client.Object) (*podtypes.AccessPolicy, error) { - application, ok := object.(*skiperatorv1alpha1.Application) - if !ok { - return accessPolicy, nil - } - - if !util.IsCloudSqlProxyEnabled(application.Spec.GCP) { - return accessPolicy, nil - } - - if application.Spec.GCP.CloudSQLProxy.IP == "" { - return nil, errors.New("cloud sql proxy IP is not set") - } - - // The istio validation webhook will reject the service entry if the host is not a valid DNS name, such as an IP address. - // So we generate something that will not crash with other apps in the same namespace. - externalRule := &podtypes.ExternalRule{ - Host: fmt.Sprintf("%s-%x.cloudsql", application.Name, util.GenerateHashFromName(application.Spec.Image)), - Ip: application.Spec.GCP.CloudSQLProxy.IP, - Ports: []podtypes.ExternalPort{{Name: "cloudsqlproxy", Port: 3307, Protocol: "TCP"}}, - } +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() - if accessPolicy == nil { - accessPolicy = &podtypes.AccessPolicy{} + if r.GetType() == reconciliation.ApplicationType || r.GetType() == reconciliation.JobType { + return getServiceEntries(r) + } else { + err := fmt.Errorf("unsupported type %s in service entry", r.GetType()) + ctxLog.Error(err, "Failed to generate service entry") + return err } - - (*accessPolicy).Outbound.External = append((*accessPolicy).Outbound.External, *externalRule) - - return accessPolicy, nil } -func GetServiceEntries(accessPolicy *podtypes.AccessPolicy, object client.Object) ([]networkingv1beta1.ServiceEntry, error) { - var serviceEntries []networkingv1beta1.ServiceEntry +func getServiceEntries(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate service entries", "type", r.GetType()) + + object := r.GetSKIPObject() + accessPolicy := object.GetCommonSpec().AccessPolicy accessPolicy, err := setCloudSqlRule(accessPolicy, object) if err != nil { - return nil, err + return err } - if accessPolicy != nil { + if accessPolicy != nil && accessPolicy.Outbound != nil { for _, rule := range (*accessPolicy).Outbound.External { serviceEntryName := fmt.Sprintf("%s-egress-%x", object.GetName(), util.GenerateHashFromName(rule.Host)) @@ -70,7 +55,7 @@ func GetServiceEntries(accessPolicy *podtypes.AccessPolicy, object client.Object ports, err := getPorts(rule.Ports, rule.Ip) if err != nil { - return nil, err + return err } serviceEntry := networkingv1beta1.ServiceEntry{ @@ -89,39 +74,12 @@ func GetServiceEntries(accessPolicy *podtypes.AccessPolicy, object client.Object }, } - serviceEntries = append(serviceEntries, serviceEntry) - } - } - - return serviceEntries, nil -} - -func GetServiceEntriesToDelete(serviceEntriesInNamespace []*networkingv1beta1.ServiceEntry, ownerName string, currentEgresses []networkingv1beta1.ServiceEntry) []networkingv1beta1.ServiceEntry { - var serviceEntriesToDelete []networkingv1beta1.ServiceEntry - - for _, serviceEntry := range serviceEntriesInNamespace { - - ownerIndex := slices.IndexFunc(serviceEntry.GetOwnerReferences(), func(ownerReference metav1.OwnerReference) bool { - return ownerReference.Name == ownerName - }) - serviceEntryOwnedByThisApplication := ownerIndex != -1 - if !serviceEntryOwnedByThisApplication { - continue - } - - serviceEntryInCurrentEgresses := slices.IndexFunc(currentEgresses, func(inSpecEntry networkingv1beta1.ServiceEntry) bool { - return inSpecEntry.Name == serviceEntry.Name - }) - - serviceEntryInOwnerSpec := serviceEntryInCurrentEgresses != -1 - if serviceEntryInOwnerSpec { - continue + r.AddResource(&serviceEntry) } - - serviceEntriesToDelete = append(serviceEntriesToDelete, *serviceEntry) } - return serviceEntriesToDelete + ctxLog.Debug("Finished generating service entries for type", "type", r.GetType(), "name", object.GetName()) + return nil } func getPorts(externalPorts []podtypes.ExternalPort, ruleIP string) ([]*networkingv1beta1api.ServicePort, error) { @@ -161,3 +119,38 @@ func getIpData(ip string) (networkingv1beta1api.ServiceEntry_Resolution, []strin return networkingv1beta1api.ServiceEntry_STATIC, []string{ip}, []*networkingv1beta1api.WorkloadEntry{{Address: ip}} } + +func setCloudSqlRule(accessPolicy *podtypes.AccessPolicy, object client.Object) (*podtypes.AccessPolicy, error) { + application, ok := object.(*skiperatorv1alpha1.Application) + if !ok { + return accessPolicy, nil + } + + if !util.IsCloudSqlProxyEnabled(application.Spec.GCP) { + return accessPolicy, nil + } + + if application.Spec.GCP.CloudSQLProxy.IP == "" { + return nil, errors.New("cloud sql proxy IP is not set") + } + + // The istio validation webhook will reject the service entry if the host is not a valid DNS name, such as an IP address. + // So we generate something that will not crash with other apps in the same namespace. + externalRule := &podtypes.ExternalRule{ + Host: fmt.Sprintf("%s-%x.cloudsql", application.Name, util.GenerateHashFromName(application.Spec.Image)), + Ip: application.Spec.GCP.CloudSQLProxy.IP, + Ports: []podtypes.ExternalPort{{Name: "cloudsqlproxy", Port: 3307, Protocol: "TCP"}}, + } + + if accessPolicy == nil { + accessPolicy = &podtypes.AccessPolicy{} + } + + if accessPolicy.Outbound == nil { + accessPolicy.Outbound = &podtypes.OutboundPolicy{} + } + + accessPolicy.Outbound.External = append(accessPolicy.Outbound.External, *externalRule) + + return accessPolicy, nil +} diff --git a/pkg/resourcegenerator/istio/sidecar/sidecar.go b/pkg/resourcegenerator/istio/sidecar/sidecar.go new file mode 100644 index 00000000..441b1b11 --- /dev/null +++ b/pkg/resourcegenerator/istio/sidecar/sidecar.go @@ -0,0 +1,32 @@ +package sidecar + +import ( + "fmt" + "github.com/kartverket/skiperator/pkg/reconciliation" + networkingv1beta1api "istio.io/api/networking/v1beta1" + networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO investigate: this doesn't seem to be doing anything on the cluster today? +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate istio sidecar resource for namespace", "namespace", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.NamespaceType { + return fmt.Errorf("istio sidecar resource only supports the namespace type") + } + + sidecar := networkingv1beta1.Sidecar{ObjectMeta: metav1.ObjectMeta{Namespace: r.GetSKIPObject().GetName(), Name: "sidecar"}} + + sidecar.Spec = networkingv1beta1api.Sidecar{ + OutboundTrafficPolicy: &networkingv1beta1api.OutboundTrafficPolicy{ + Mode: networkingv1beta1api.OutboundTrafficPolicy_REGISTRY_ONLY, + }, + } + + r.AddResource(&sidecar) + + ctxLog.Debug("Finished generating default deny network policy for namespace", "namespace", r.GetSKIPObject().GetName()) + return nil +} diff --git a/pkg/resourcegenerator/istio/virtualservice/application.go b/pkg/resourcegenerator/istio/virtualservice/application.go new file mode 100644 index 00000000..dd8e5fda --- /dev/null +++ b/pkg/resourcegenerator/istio/virtualservice/application.go @@ -0,0 +1,102 @@ +package virtualservice + +import ( + "fmt" + "hash/fnv" + + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + networkingv1beta1api "istio.io/api/networking/v1beta1" + networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.ApplicationType, generateForApplication) +} + +func generateForApplication(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate virtual service for application", "application", r.GetSKIPObject().GetName()) + + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + return fmt.Errorf("failed to cast object to Application") + } + + virtualService := networkingv1beta1.VirtualService{ + ObjectMeta: v1.ObjectMeta{ + Name: application.Name + "-ingress", + Namespace: application.Namespace, + }, + } + + hosts, err := application.Spec.Hosts() + if err != nil { + return err + } + + if len(hosts.Hostnames()) > 0 { + virtualService.Spec = networkingv1beta1api.VirtualService{ + ExportTo: []string{".", "istio-system", "istio-gateways"}, + Gateways: getGatewaysFromApplication(application), + Hosts: hosts.Hostnames(), + Http: []*networkingv1beta1api.HTTPRoute{}, + } + + if application.Spec.RedirectToHTTPS != nil && *application.Spec.RedirectToHTTPS { + virtualService.Spec.Http = append(virtualService.Spec.Http, &networkingv1beta1api.HTTPRoute{ + Name: "redirect-to-https", + Match: []*networkingv1beta1api.HTTPMatchRequest{ + { + WithoutHeaders: map[string]*networkingv1beta1api.StringMatch{ + ":path": { + MatchType: &networkingv1beta1api.StringMatch_Prefix{ + Prefix: "/.well-known/acme-challenge/", + }, + }, + }, + Port: 80, + }, + }, + Redirect: &networkingv1beta1api.HTTPRedirect{ + Scheme: "https", + RedirectCode: 308, + }, + }) + } + + virtualService.Spec.Http = append(virtualService.Spec.Http, &networkingv1beta1api.HTTPRoute{ + Name: "default-app-route", + Route: []*networkingv1beta1api.HTTPRouteDestination{ + { + Destination: &networkingv1beta1api.Destination{ + Host: application.Name, + Port: &networkingv1beta1api.PortSelector{ + Number: uint32(application.Spec.Port), + }, + }, + }, + }, + }) + r.AddResource(&virtualService) + ctxLog.Debug("Added virtual service to application", "application", application.Name) + } + + ctxLog.Debug("Finished generating virtual service for application", "application", application.Name) + return nil +} + +func getGatewaysFromApplication(application *skiperatorv1alpha1.Application) []string { + hosts, _ := application.Spec.Hosts() + gateways := make([]string, 0, hosts.Count()) + for _, hostname := range hosts.Hostnames() { + // Generate gateway name + hash := fnv.New64() + _, _ = hash.Write([]byte(hostname)) + name := fmt.Sprintf("%s-ingress-%x", application.Name, hash.Sum64()) + gateways = append(gateways, name) + } + + return gateways +} diff --git a/pkg/resourcegenerator/istio/virtualservice/routing.go b/pkg/resourcegenerator/istio/virtualservice/routing.go new file mode 100644 index 00000000..e18f2036 --- /dev/null +++ b/pkg/resourcegenerator/istio/virtualservice/routing.go @@ -0,0 +1,103 @@ +package virtualservice + +import ( + "fmt" + + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + networkingv1beta1api "istio.io/api/networking/v1beta1" + networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.RoutingType, generateForRouting) +} + +func generateForRouting(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate virtual service for routing", "routing", r.GetSKIPObject().GetName()) + + routing, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Routing) + if !ok { + return fmt.Errorf("failed to cast object to Application") + } + + virtualService := networkingv1beta1.VirtualService{ + ObjectMeta: v1.ObjectMeta{ + Name: routing.GetVirtualServiceName(), + Namespace: routing.Namespace, + }, + } + + virtualService.Spec = networkingv1beta1api.VirtualService{ + ExportTo: []string{".", "istio-system", "istio-gateways"}, + Gateways: []string{ + routing.GetGatewayName(), + }, + Hosts: []string{ + routing.Spec.Hostname, + }, + Http: []*networkingv1beta1api.HTTPRoute{}, + } + + if routing.GetRedirectToHTTPS() { + virtualService.Spec.Http = append(virtualService.Spec.Http, &networkingv1beta1api.HTTPRoute{ + Name: "redirect-to-https", + Match: []*networkingv1beta1api.HTTPMatchRequest{ + { + WithoutHeaders: map[string]*networkingv1beta1api.StringMatch{ + ":path": { + MatchType: &networkingv1beta1api.StringMatch_Prefix{ + Prefix: "/.well-known/acme-challenge/", + }, + }, + }, + Port: 80, + }, + }, + Redirect: &networkingv1beta1api.HTTPRedirect{ + Scheme: "https", + RedirectCode: 308, + }, + }) + } + + for _, route := range routing.Spec.Routes { + + httpRoute := &networkingv1beta1api.HTTPRoute{ + Name: route.TargetApp, + Match: []*networkingv1beta1api.HTTPMatchRequest{ + { + Port: 443, + Uri: &networkingv1beta1api.StringMatch{ + MatchType: &networkingv1beta1api.StringMatch_Prefix{ + Prefix: route.PathPrefix, + }, + }, + }, + }, + Route: []*networkingv1beta1api.HTTPRouteDestination{ + { + Destination: &networkingv1beta1api.Destination{ + Host: route.TargetApp, + Port: &networkingv1beta1api.PortSelector{ + Number: uint32(route.Port), + }, + }, + }, + }, + } + + if route.RewriteUri { + httpRoute.Rewrite = &networkingv1beta1api.HTTPRewrite{ + Uri: "/", + } + } + + virtualService.Spec.Http = append(virtualService.Spec.Http, httpRoute) + } + r.AddResource(&virtualService) + ctxLog.Debug("Finished generating virtual service for routing", "routing", routing.Name) + return nil +} diff --git a/pkg/resourcegenerator/istio/virtualservice/virtual_service.go b/pkg/resourcegenerator/istio/virtualservice/virtual_service.go new file mode 100644 index 00000000..f98b8428 --- /dev/null +++ b/pkg/resourcegenerator/istio/virtualservice/virtual_service.go @@ -0,0 +1,12 @@ +package virtualservice + +import ( + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils/generator" +) + +var multiGenerator = generator.NewMulti() + +func Generate(r reconciliation.Reconciliation) error { + return multiGenerator.Generate(r, "VirtualService") +} diff --git a/pkg/resourcegenerator/job/job.go b/pkg/resourcegenerator/job/job.go new file mode 100644 index 00000000..16f1589e --- /dev/null +++ b/pkg/resourcegenerator/job/job.go @@ -0,0 +1,123 @@ +package job + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/gcp" + "github.com/kartverket/skiperator/pkg/resourcegenerator/pod" + "github.com/kartverket/skiperator/pkg/resourcegenerator/volume" + "github.com/kartverket/skiperator/pkg/util" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TODO completely butchered, need to be thorougly checked +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate job for skipjob", "skipjob", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.JobType { + return fmt.Errorf("job only supports skipjob type, got %s", r.GetType()) + } + + skipJob := r.GetSKIPObject().(*skiperatorv1alpha1.SKIPJob) + + meta := metav1.ObjectMeta{ + Namespace: skipJob.Namespace, + Name: skipJob.Name, + Labels: map[string]string{"app": skipJob.KindPostFixedName()}, + } + job := batchv1.Job{ObjectMeta: meta} + cronJob := batchv1.CronJob{ObjectMeta: meta} + + // By specifying port and path annotations, Istio will scrape metrics from the application + // and merge it together with its own metrics. + // + // See + // - https://superorbital.io/blog/istio-metrics-merging/ + // - https://androidexample365.com/an-example-of-how-istio-metrics-merging-works/ + if r.IsIstioEnabled() && skipJob.Spec.Prometheus != nil { + skipJob.Annotations["prometheus.io/port"] = skipJob.Spec.Prometheus.Port.StrVal + skipJob.Annotations["prometheus.io/path"] = skipJob.Spec.Prometheus.Path + } + + if skipJob.Spec.Cron != nil { + cronJob.Spec = getCronJobSpec(skipJob, cronJob.Spec.JobTemplate.Spec.Selector, cronJob.Spec.JobTemplate.Spec.Template.Labels, r.GetIdentityConfigMap()) + r.AddResource(&cronJob) + } else { + job.Spec = getJobSpec(skipJob, job.Spec.Selector, job.Spec.Template.Labels, r.GetIdentityConfigMap()) + r.AddResource(&job) + } + + return nil +} + +func getCronJobSpec(skipJob *skiperatorv1alpha1.SKIPJob, selector *metav1.LabelSelector, podLabels map[string]string, gcpIdentityConfigMap *corev1.ConfigMap) batchv1.CronJobSpec { + return batchv1.CronJobSpec{ + Schedule: skipJob.Spec.Cron.Schedule, + StartingDeadlineSeconds: skipJob.Spec.Cron.StartingDeadlineSeconds, + ConcurrencyPolicy: skipJob.Spec.Cron.ConcurrencyPolicy, + Suspend: skipJob.Spec.Cron.Suspend, + JobTemplate: batchv1.JobTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: skipJob.GetDefaultLabels(), + }, + Spec: getJobSpec(skipJob, selector, podLabels, gcpIdentityConfigMap), + }, + SuccessfulJobsHistoryLimit: util.PointTo(int32(3)), + FailedJobsHistoryLimit: util.PointTo(int32(1)), + } +} + +func getJobSpec(skipJob *skiperatorv1alpha1.SKIPJob, selector *metav1.LabelSelector, podLabels map[string]string, gcpIdentityConfigMap *corev1.ConfigMap) batchv1.JobSpec { + podVolumes, containerVolumeMounts := volume.GetContainerVolumeMountsAndPodVolumes(skipJob.Spec.Container.FilesFrom) + envVars := skipJob.Spec.Container.Env + + if skipJob.Spec.Container.GCP != nil { + gcpPodVolume := gcp.GetGCPContainerVolume(gcpIdentityConfigMap.Data["workloadIdentityPool"], skipJob.Name) + gcpContainerVolumeMount := gcp.GetGCPContainerVolumeMount() + gcpEnvVar := gcp.GetGCPEnvVar() + + podVolumes = append(podVolumes, gcpPodVolume) + containerVolumeMounts = append(containerVolumeMounts, gcpContainerVolumeMount) + envVars = append(envVars, gcpEnvVar) + } + + var skipJobContainer corev1.Container + skipJobContainer = pod.CreateJobContainer(skipJob, containerVolumeMounts, envVars) + + var containers []corev1.Container + + containers = append(containers, skipJobContainer) + + jobSpec := batchv1.JobSpec{ + Parallelism: util.PointTo(int32(1)), + Completions: util.PointTo(int32(1)), + ActiveDeadlineSeconds: skipJob.Spec.Job.ActiveDeadlineSeconds, + PodFailurePolicy: nil, + BackoffLimit: skipJob.Spec.Job.BackoffLimit, + Selector: nil, + ManualSelector: nil, + Template: corev1.PodTemplateSpec{ + Spec: pod.CreatePodSpec( + containers, + podVolumes, + skipJob.KindPostFixedName(), + skipJob.Spec.Container.Priority, + skipJob.Spec.Container.RestartPolicy, + skipJob.Spec.Container.PodSettings, + skipJob.Name, + ), + ObjectMeta: metav1.ObjectMeta{ + Labels: skipJob.GetDefaultLabels(), + }, + }, + TTLSecondsAfterFinished: skipJob.Spec.Job.TTLSecondsAfterFinished, + CompletionMode: util.PointTo(batchv1.NonIndexedCompletion), + Suspend: skipJob.Spec.Job.Suspend, + } + + return jobSpec +} diff --git a/pkg/resourcegenerator/maskinporten/maskinporten.go b/pkg/resourcegenerator/maskinporten/maskinporten.go new file mode 100644 index 00000000..3bd9cadf --- /dev/null +++ b/pkg/resourcegenerator/maskinporten/maskinporten.go @@ -0,0 +1,87 @@ +package maskinporten + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/api/v1alpha1/digdirator" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + naisiov1 "github.com/nais/liberator/pkg/apis/nais.io/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in maskin porten resource", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate maskin porten resource") + return err + } + + if !MaskinportenSpecifiedInSpec(application.Spec.Maskinporten) { + ctxLog.Info("Maskinporten not specified in spec, skipping generation") + return nil + } + + ctxLog.Debug("Attempting to generate maskin porten resource for application", "application", application.Name) + + var err error + + maskinporten := naisiov1.MaskinportenClient{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "nais.io/v1", + Kind: "MaskinportenClient", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: application.Namespace, + Name: application.Name, + }, + } + + maskinporten.Spec, err = getMaskinportenSpec(application) + if err != nil { + return err + } + + r.AddResource(&maskinporten) + ctxLog.Debug("Finished generating maskin porten resource for application", "application", application.Name) + return nil +} + +func getMaskinportenSpec(application *skiperatorv1alpha1.Application) (naisiov1.MaskinportenClientSpec, error) { + secretName, err := GetMaskinportenSecretName(application.Name) + if err != nil { + return naisiov1.MaskinportenClientSpec{}, err + } + + scopes := naisiov1.MaskinportenScope{} + if application.Spec.Maskinporten.Scopes != nil { + scopes = *application.Spec.Maskinporten.Scopes + } + + return naisiov1.MaskinportenClientSpec{ + ClientName: getClientNameMaskinporten(application.Name, application.Spec.Maskinporten), + SecretName: secretName, + Scopes: scopes, + }, nil +} + +func getClientNameMaskinporten(applicationName string, maskinportenSettings *digdirator.Maskinporten) string { + if maskinportenSettings.ClientName != nil { + return *maskinportenSettings.ClientName + } + + return applicationName +} + +func MaskinportenSpecifiedInSpec(maskinportenSettings *digdirator.Maskinporten) bool { + return maskinportenSettings != nil && maskinportenSettings.Enabled +} + +func GetMaskinportenSecretName(name string) (string, error) { + return util.GetSecretName("maskinporten", name) +} diff --git a/pkg/resourcegenerator/networking/network_policy.go b/pkg/resourcegenerator/networking/network_policy.go deleted file mode 100644 index 263c0ca4..00000000 --- a/pkg/resourcegenerator/networking/network_policy.go +++ /dev/null @@ -1,313 +0,0 @@ -package networking - -import ( - skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" - "github.com/kartverket/skiperator/api/v1alpha1/podtypes" - "github.com/kartverket/skiperator/pkg/util" - corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - GrafanaAgentName = "grafana-agent" - GrafanaAgentNamespace = GrafanaAgentName -) - -type NetPolOpts struct { - AccessPolicy *podtypes.AccessPolicy - Ingresses *[]string - Port *int - RelatedServices *[]corev1.Service - Namespace string - Namespaces *corev1.NamespaceList - Name string - PrometheusConfig *skiperatorv1alpha1.PrometheusConfig - IstioEnabled bool -} - -func CreateNetPolSpec(opts NetPolOpts) *networkingv1.NetworkPolicySpec { - ingressRules := getIngressRules(opts) - egressRules := getEgressRules(opts) - - if len(ingressRules) > 0 || len(egressRules) > 0 { - return &networkingv1.NetworkPolicySpec{ - PolicyTypes: getPolicyTypes(ingressRules, egressRules), - PodSelector: metav1.LabelSelector{ - MatchLabels: util.GetPodAppSelector(opts.Name), - }, - Ingress: ingressRules, - Egress: egressRules, - } - } - - return nil -} - -func getPolicyTypes(ingressRules []networkingv1.NetworkPolicyIngressRule, egressRules []networkingv1.NetworkPolicyEgressRule) []networkingv1.PolicyType { - var policyType []networkingv1.PolicyType - - if len(ingressRules) > 0 { - policyType = append(policyType, networkingv1.PolicyTypeIngress) - } - - if len(egressRules) > 0 { - policyType = append(policyType, networkingv1.PolicyTypeEgress) - } - - return policyType -} - -func getEgressRules(opts NetPolOpts) []networkingv1.NetworkPolicyEgressRule { - var egressRules []networkingv1.NetworkPolicyEgressRule - accessPolicy := opts.AccessPolicy - namespace := opts.Namespace - namespaces := *opts.Namespaces - availableServices := *opts.RelatedServices - - // Egress rules for internal peers - if accessPolicy == nil || availableServices == nil { - return egressRules - } - - for _, outboundRule := range (*accessPolicy).Outbound.Rules { - if outboundRule.Namespace == "" && outboundRule.NamespacesByLabel == nil { - outboundRule.Namespace = namespace - } - - relatedService, isApplicationAvailable := getRelatedService(availableServices, outboundRule, namespaces) - - if !isApplicationAvailable { - continue - } else { - var servicePorts []networkingv1.NetworkPolicyPort - - for _, port := range relatedService.Spec.Ports { - servicePorts = append(servicePorts, networkingv1.NetworkPolicyPort{ - Port: util.PointTo(intstr.FromInt(int(port.Port))), - }) - } - - egressRuleForOutboundRule := networkingv1.NetworkPolicyEgressRule{ - Ports: servicePorts, - To: []networkingv1.NetworkPolicyPeer{ - { - PodSelector: &metav1.LabelSelector{ - MatchLabels: relatedService.Spec.Selector, - }, - NamespaceSelector: getNamespaceSelector(outboundRule, namespace), - }, - }, - } - - egressRules = append(egressRules, egressRuleForOutboundRule) - } - - } - - return egressRules -} - -func getRelatedService(services []corev1.Service, rule podtypes.InternalRule, namespaces corev1.NamespaceList) (corev1.Service, bool) { - for _, service := range services { - if service.Name == rule.Application { - - if service.Namespace == rule.Namespace { - return service, true - } - - if rule.NamespacesByLabel != nil { - if namespaceMatchesNamespacesByLabel(rule.NamespacesByLabel, namespaces) { - return service, true - } - } - - } - - } - - return corev1.Service{}, false - -} - -func namespaceMatchesNamespacesByLabel(namespacesByLabel map[string]string, namespaces corev1.NamespaceList) bool { - for _, namespace := range namespaces.Items { - if namespace.Labels != nil { - for key, value := range namespacesByLabel { - if namespace.Labels[key] == value { - return true - } - } - } - } - - return false -} - -func getIngressRules(opts NetPolOpts) []networkingv1.NetworkPolicyIngressRule { - var ingressRules []networkingv1.NetworkPolicyIngressRule - - if opts.Ingresses != nil && opts.Port != nil && len(*opts.Ingresses) > 0 { - if hasInternalIngress(*opts.Ingresses) { - ingressRules = append(ingressRules, getGatewayIngressRule(*opts.Port, true)) - } - - if hasExternalIngress(*opts.Ingresses) { - ingressRules = append(ingressRules, getGatewayIngressRule(*opts.Port, false)) - } - } - - // Allow grafana-agent to scrape - if opts.IstioEnabled { - promScrapeRuleAlloy := networkingv1.NetworkPolicyIngressRule{ - From: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": "grafana-alloy"}, - }, - PodSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/instance": "alloy", - "app.kubernetes.io/name": "alloy", - }, - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - { - Port: util.PointTo(util.IstioMetricsPortName), - }, - }, - } - - promScrapeRule := networkingv1.NetworkPolicyIngressRule{ - From: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": GrafanaAgentNamespace}, - }, - PodSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app.kubernetes.io/instance": GrafanaAgentName, - "app.kubernetes.io/name": GrafanaAgentName, - }, - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - { - Port: util.PointTo(util.IstioMetricsPortName), - }, - }, - } - - - - ingressRules = append(ingressRules, promScrapeRule) - ingressRules = append(ingressRules, promScrapeRuleAlloy) - } - - if opts.AccessPolicy == nil { - return ingressRules - } - - if opts.AccessPolicy.Inbound != nil { - inboundTrafficIngressRule := networkingv1.NetworkPolicyIngressRule{ - From: getInboundPolicyPeers(opts.AccessPolicy.Inbound.Rules, opts.Namespace), - Ports: []networkingv1.NetworkPolicyPort{ - { - Port: util.PointTo(intstr.FromInt(*opts.Port)), - }, - }, - } - - ingressRules = append(ingressRules, inboundTrafficIngressRule) - } - - return ingressRules -} - -func getInboundPolicyPeers(inboundRules []podtypes.InternalRule, namespace string) []networkingv1.NetworkPolicyPeer { - var policyPeers []networkingv1.NetworkPolicyPeer - - for _, inboundRule := range inboundRules { - - policyPeers = append(policyPeers, networkingv1.NetworkPolicyPeer{ - NamespaceSelector: getNamespaceSelector(inboundRule, namespace), - PodSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": inboundRule.Application}, - }, - }) - } - - return policyPeers -} - -func getNamespaceSelector(rule podtypes.InternalRule, namespace string) *metav1.LabelSelector { - if rule.Namespace != "" { - return &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": rule.Namespace}, - } - } - - if rule.NamespacesByLabel != nil { - return &metav1.LabelSelector{ - MatchLabels: rule.NamespacesByLabel, - } - } - - return &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": namespace}, - } -} - -func hasExternalIngress(ingresses []string) bool { - for _, hostname := range ingresses { - if !util.IsInternal(hostname) { - return true - } - } - - return false -} - -func hasInternalIngress(ingresses []string) bool { - for _, hostname := range ingresses { - if util.IsInternal(hostname) { - return true - } - } - - return false -} - -func getGatewayIngressRule(port int, isInternal bool) networkingv1.NetworkPolicyIngressRule { - ingressRule := networkingv1.NetworkPolicyIngressRule{ - From: []networkingv1.NetworkPolicyPeer{ - { - NamespaceSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"kubernetes.io/metadata.name": "istio-gateways"}, - }, - PodSelector: &metav1.LabelSelector{ - MatchLabels: getIngressGatewayLabel(isInternal), - }, - }, - }, - Ports: []networkingv1.NetworkPolicyPort{ - { - Port: util.PointTo(intstr.FromInt(port)), - }, - }, - } - - return ingressRule -} - -func getIngressGatewayLabel(isInternal bool) map[string]string { - if isInternal { - return map[string]string{"app": "istio-ingress-internal"} - } else { - return map[string]string{"app": "istio-ingress-external"} - } -} diff --git a/pkg/resourcegenerator/networkpolicy/defaultdeny/default_deny_network_policy.go b/pkg/resourcegenerator/networkpolicy/defaultdeny/default_deny_network_policy.go new file mode 100644 index 00000000..823841d6 --- /dev/null +++ b/pkg/resourcegenerator/networkpolicy/defaultdeny/default_deny_network_policy.go @@ -0,0 +1,147 @@ +package defaultdeny + +import ( + "fmt" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate default deny network policy for namespace", "namespace", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.NamespaceType { + return fmt.Errorf("default deny namespace only supports namespace type") + } + + networkPolicy := networkingv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Namespace: r.GetSKIPObject().GetName(), Name: "default-deny"}} + + networkPolicy.Spec = networkingv1.NetworkPolicySpec{ + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + networkingv1.PolicyTypeEgress, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + // Egress rule for parts of internal server network + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "10.40.0.0/16", + }, + }, + // Egress rule for Internet + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "0.0.0.0/0", + Except: []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"}, + }, + }, + }, + }, + // Egress rule for DNS + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": "kube-system"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"k8s-app": "kube-dns"}, + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + // DNS Ports + { + Protocol: util.PointTo(corev1.ProtocolTCP), + Port: util.PointTo(intstr.FromInt(53)), + }, + { + Protocol: util.PointTo(corev1.ProtocolUDP), + Port: util.PointTo(intstr.FromInt(53)), + }, + }, + }, + // Egress rule for Istio XDS + { + To: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "istiod"}, + }, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": "istio-system"}, + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: util.PointTo(intstr.FromInt(15012)), + }, + }, + }, + // Egress rule for grafana-agent + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": "grafana-agent"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/instance": "grafana-agent", + "app.kubernetes.io/name": "grafana-agent", + }, + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: util.PointTo(corev1.ProtocolTCP), + Port: util.PointTo(intstr.FromInt(4317)), + }, + { + Protocol: util.PointTo(corev1.ProtocolTCP), + Port: util.PointTo(intstr.FromInt(4318)), + }, + }, + }, + // Egress rule for grafana-alloy + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": "grafana-alloy"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/instance": "alloy", + "app.kubernetes.io/name": "alloy", + }, + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: util.PointTo(corev1.ProtocolTCP), + Port: util.PointTo(intstr.FromInt(4317)), + }, + { + Protocol: util.PointTo(corev1.ProtocolTCP), + Port: util.PointTo(intstr.FromInt(4318)), + }, + }, + }, + }, + } + + r.AddResource(&networkPolicy) + + ctxLog.Debug("Finished generating default deny network policy for namespace", "namespace", r.GetSKIPObject().GetName()) + return nil +} diff --git a/pkg/resourcegenerator/networkpolicy/dynamic/common.go b/pkg/resourcegenerator/networkpolicy/dynamic/common.go new file mode 100644 index 00000000..50f1fe5c --- /dev/null +++ b/pkg/resourcegenerator/networkpolicy/dynamic/common.go @@ -0,0 +1,272 @@ +package dynamic + +import ( + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/api/v1alpha1/podtypes" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func init() { + multiGenerator.Register(reconciliation.ApplicationType, generateForCommon) + multiGenerator.Register(reconciliation.JobType, generateForCommon) +} + +// TODO fix mess +func generateForCommon(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate network policy for skipobj", "skipobj", r.GetSKIPObject().GetName()) + + object := r.GetSKIPObject() + name := object.GetName() + namespace := object.GetNamespace() + if r.GetType() == reconciliation.JobType { + name = util.ResourceNameWithKindPostfix(name, object.GetObjectKind().GroupVersionKind().Kind) + } + + networkPolicy := networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + + accessPolicy := object.GetCommonSpec().AccessPolicy + var ingresses []string + var inboundPort int32 + if r.GetType() == reconciliation.ApplicationType { + ingresses = object.(*skiperatorv1alpha1.Application).Spec.Ingresses + inboundPort = int32(object.(*skiperatorv1alpha1.Application).Spec.Port) + } + + ingressRules := getIngressRules(accessPolicy, ingresses, r.IsIstioEnabled(), namespace, inboundPort) + egressRules := getEgressRules(accessPolicy, namespace) + + netpolSpec := networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{MatchLabels: util.GetPodAppSelector(name)}, + Ingress: ingressRules, + Egress: egressRules, + PolicyTypes: getPolicyTypes(ingressRules, egressRules), + } + + if len(ingressRules) == 0 && len(egressRules) == 0 { + ctxLog.Debug("No rules for networkpolicy, skipping", "type", r.GetType(), "namespace", namespace) + return nil + } + + networkPolicy.Spec = netpolSpec + r.AddResource(&networkPolicy) + ctxLog.Debug("Finished generating networkpolicy", "type", r.GetType(), "namespace", namespace) + return nil +} + +func getPolicyTypes(ingressRules []networkingv1.NetworkPolicyIngressRule, egressRules []networkingv1.NetworkPolicyEgressRule) []networkingv1.PolicyType { + var policyType []networkingv1.PolicyType + + if len(ingressRules) > 0 { + policyType = append(policyType, networkingv1.PolicyTypeIngress) + } + + if len(egressRules) > 0 { + policyType = append(policyType, networkingv1.PolicyTypeEgress) + } + + return policyType +} + +func getEgressRules(accessPolicy *podtypes.AccessPolicy, appNamespace string) []networkingv1.NetworkPolicyEgressRule { + var egressRules []networkingv1.NetworkPolicyEgressRule + + if accessPolicy == nil || accessPolicy.Outbound == nil { + return egressRules + } + + for _, rule := range accessPolicy.Outbound.Rules { + if rule.Ports == nil { + continue + } + egressRules = append(egressRules, getEgressRule(rule, appNamespace)) + } + + return egressRules +} + +func getEgressRule(outboundRule podtypes.InternalRule, namespace string) networkingv1.NetworkPolicyEgressRule { + egressRuleForOutboundRule := networkingv1.NetworkPolicyEgressRule{ + To: []networkingv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{ + MatchLabels: util.GetPodAppSelector(outboundRule.Application), + }, + NamespaceSelector: getNamespaceSelector(outboundRule, namespace), + }, + }, + Ports: outboundRule.Ports, + } + return egressRuleForOutboundRule +} + +// TODO Clean up better +func getIngressRules(accessPolicy *podtypes.AccessPolicy, ingresses []string, istioEnabled bool, namespace string, port int32) []networkingv1.NetworkPolicyIngressRule { + var ingressRules []networkingv1.NetworkPolicyIngressRule + + if ingresses != nil && len(ingresses) > 0 { + if hasInternalIngress(ingresses) { + ingressRules = append(ingressRules, getGatewayIngressRule(true, port)) + } + + if hasExternalIngress(ingresses) { + ingressRules = append(ingressRules, getGatewayIngressRule(false, port)) + } + } + + // Allow grafana-agent to scrape + if istioEnabled { + promScrapeRuleGrafana := networkingv1.NetworkPolicyIngressRule{ + From: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": GrafanaAgentNamespace}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/instance": GrafanaAgentName, + "app.kubernetes.io/name": GrafanaAgentName, + }, + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: util.PointTo(util.IstioMetricsPortName), + }, + }, + } + + promScrapeRuleAlloy := networkingv1.NetworkPolicyIngressRule{ + From: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": AlloyAgentNamespace}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/instance": AlloyAgentName, + "app.kubernetes.io/name": AlloyAgentName, + }, + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: util.PointTo(util.IstioMetricsPortName), + }, + }, + } + + ingressRules = append(ingressRules, promScrapeRuleGrafana) + ingressRules = append(ingressRules, promScrapeRuleAlloy) + } + + if accessPolicy == nil { + return ingressRules + } + + if accessPolicy.Inbound != nil { + inboundTrafficIngressRule := networkingv1.NetworkPolicyIngressRule{ + From: getInboundPolicyPeers(accessPolicy.Inbound.Rules, namespace), + } + if port != 0 { + inboundTrafficIngressRule.Ports = []networkingv1.NetworkPolicyPort{{Port: util.PointTo(intstr.FromInt32(port))}} + } + ingressRules = append(ingressRules, inboundTrafficIngressRule) + } + + return ingressRules +} + +// TODO investigate if we can just return nil if SKIPJob +func getInboundPolicyPeers(inboundRules []podtypes.InternalRule, namespace string) []networkingv1.NetworkPolicyPeer { + var policyPeers []networkingv1.NetworkPolicyPeer + + for _, inboundRule := range inboundRules { + + policyPeers = append(policyPeers, networkingv1.NetworkPolicyPeer{ + NamespaceSelector: getNamespaceSelector(inboundRule, namespace), + PodSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": inboundRule.Application}, + }, + }) + } + + return policyPeers +} + +func getNamespaceSelector(rule podtypes.InternalRule, appNamespace string) *metav1.LabelSelector { + if rule.Namespace != "" { + return &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": rule.Namespace}, + } + } + + if rule.NamespacesByLabel != nil { + return &metav1.LabelSelector{ + MatchLabels: rule.NamespacesByLabel, + } + } + + return &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": appNamespace}, + } +} + +func hasExternalIngress(ingresses []string) bool { + for _, hostname := range ingresses { + if !util.IsInternal(hostname) { + return true + } + } + + return false +} + +func hasInternalIngress(ingresses []string) bool { + for _, hostname := range ingresses { + if util.IsInternal(hostname) { + return true + } + } + + return false +} + +func getGatewayIngressRule(isInternal bool, port int32) networkingv1.NetworkPolicyIngressRule { + ingressRule := networkingv1.NetworkPolicyIngressRule{ + From: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"kubernetes.io/metadata.name": "istio-gateways"}, + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: getIngressGatewayLabel(isInternal), + }, + }, + }, + } + if port != 0 { + ingressRule.Ports = []networkingv1.NetworkPolicyPort{{Port: util.PointTo(intstr.FromInt32(port))}} + } + return ingressRule +} + +// TODO Should be in constants or something +func getIngressGatewayLabel(isInternal bool) map[string]string { + if isInternal { + return map[string]string{"app": "istio-ingress-internal"} + } else { + return map[string]string{"app": "istio-ingress-external"} + } +} diff --git a/pkg/resourcegenerator/networkpolicy/dynamic/network_policy.go b/pkg/resourcegenerator/networkpolicy/dynamic/network_policy.go new file mode 100644 index 00000000..57eca62f --- /dev/null +++ b/pkg/resourcegenerator/networkpolicy/dynamic/network_policy.go @@ -0,0 +1,19 @@ +package dynamic + +import ( + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils/generator" +) + +const ( + GrafanaAgentName = "grafana-agent" + GrafanaAgentNamespace = GrafanaAgentName + AlloyAgentName = "alloy" + AlloyAgentNamespace = "grafana-alloy" +) + +var multiGenerator = generator.NewMulti() + +func Generate(r reconciliation.Reconciliation) error { + return multiGenerator.Generate(r, "NetworkPolicy") +} diff --git a/pkg/resourcegenerator/networkpolicy/dynamic/routing.go b/pkg/resourcegenerator/networkpolicy/dynamic/routing.go new file mode 100644 index 00000000..11fd658a --- /dev/null +++ b/pkg/resourcegenerator/networkpolicy/dynamic/routing.go @@ -0,0 +1,74 @@ +package dynamic + +import ( + "fmt" + + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func init() { + multiGenerator.Register(reconciliation.RoutingType, generateForRouting) +} + +func generateForRouting(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate network policy for routing", "routing", r.GetSKIPObject().GetName()) + routing, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Routing) + if !ok { + return fmt.Errorf("failed to cast object to Routing") + } + + uniqueTargetApps := make(map[string]skiperatorv1alpha1.Route) + for _, route := range routing.Spec.Routes { + uniqueTargetApps[getNetworkPolicyName(routing, route.TargetApp)] = route + } + + for netpolName, route := range uniqueTargetApps { + networkPolicy := networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: routing.Namespace, + Name: netpolName, + }, + } + networkPolicy.Spec = networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: util.GetPodAppSelector(route.TargetApp), + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + }, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: util.GetIstioGatewaySelector(), + }, + PodSelector: &metav1.LabelSelector{ + MatchLabels: util.GetIstioGatewayLabelSelector(routing.Spec.Hostname), + }, + }, + }, + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: util.PointTo(intstr.FromInt32(route.Port)), + }, + }, + }, + }, + } + + r.AddResource(&networkPolicy) + } + ctxLog.Debug("Finished generating networkpolicy for routing", "routing", routing.Name) + return nil +} + +func getNetworkPolicyName(routing *skiperatorv1alpha1.Routing, targetApp string) string { + return fmt.Sprintf("%s-%s-istio-ingress", routing.Name, targetApp) +} diff --git a/pkg/resourcegenerator/pdb/pod_disruption_budget.go b/pkg/resourcegenerator/pdb/pod_disruption_budget.go new file mode 100644 index 00000000..970d9a35 --- /dev/null +++ b/pkg/resourcegenerator/pdb/pod_disruption_budget.go @@ -0,0 +1,71 @@ +package pdb + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/k8sfeatures" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in pod disruption budget", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate pod disruption budget") + return err + } + ctxLog.Debug("Attempting to generate pdb for application", "application", application.Name) + + pdb := policyv1.PodDisruptionBudget{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} + + if *application.Spec.EnablePDB { + var minReplicas uint + + replicas, err := skiperatorv1alpha1.GetStaticReplicas(application.Spec.Replicas) + if err != nil { + replicasStruct, err := skiperatorv1alpha1.GetScalingReplicas(application.Spec.Replicas) + if err != nil { + ctxLog.Error(err, "Failed to get replicas") + return err + } else { + minReplicas = replicasStruct.Min + } + } else { + minReplicas = replicas + } + + pdb.Spec = policyv1.PodDisruptionBudgetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: util.GetPodAppSelector(application.Name), + }, + MinAvailable: determineMinAvailable(minReplicas), + } + + if k8sfeatures.EnhancedPDBAvailable() { + pdb.Spec.UnhealthyPodEvictionPolicy = util.PointTo(policyv1.AlwaysAllow) + } + r.AddResource(&pdb) + } + + return nil +} + +func determineMinAvailable(replicasAvailable uint) *intstr.IntOrString { + var value intstr.IntOrString + + if replicasAvailable > 1 { + value = intstr.FromString("50%") + } else { + intstr.FromInt(0) + } + + return &value +} diff --git a/pkg/resourcegenerator/core/pod.go b/pkg/resourcegenerator/pod/pod.go similarity index 96% rename from pkg/resourcegenerator/core/pod.go rename to pkg/resourcegenerator/pod/pod.go index 503f1289..efd36e00 100644 --- a/pkg/resourcegenerator/core/pod.go +++ b/pkg/resourcegenerator/pod/pod.go @@ -1,4 +1,4 @@ -package core +package pod import ( "fmt" @@ -11,6 +11,15 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +type SkiperatorTopologyKey string + +const ( + // Hostname is the value populated by the Kubelet. + Hostname SkiperatorTopologyKey = "kubernetes.io/hostname" + // OnPremFailureDomain is populated to the underlying ESXi hostname by the GKE on VMware tooling. + OnPremFailureDomain SkiperatorTopologyKey = "onprem.gke.io/failure-domain-name" +) + type PodOpts struct { IstioEnabled bool } diff --git a/pkg/resourcegenerator/podmonitor/pod_monitor.go b/pkg/resourcegenerator/podmonitor/pod_monitor.go new file mode 100644 index 00000000..816ea602 --- /dev/null +++ b/pkg/resourcegenerator/podmonitor/pod_monitor.go @@ -0,0 +1,60 @@ +package podmonitor + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate podmonitor for skipjob", "skipjob", r.GetSKIPObject().GetName()) + + if r.GetType() != reconciliation.JobType { + return fmt.Errorf("podmonitor only supports skipjob type, got %s", r.GetType()) + } + + skipJob := r.GetSKIPObject().(*skiperatorv1alpha1.SKIPJob) + + if skipJob.Spec.Prometheus == nil { + return nil + } + + podMonitor := pov1.PodMonitor{ObjectMeta: metav1.ObjectMeta{ + Name: skipJob.Name + "-monitor", + Namespace: skipJob.Namespace, + Labels: map[string]string{"instance": "primary"}, + }} + + podMonitor.Spec = pov1.PodMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: util.GetPodAppSelector(skipJob.Name), + }, + NamespaceSelector: pov1.NamespaceSelector{ + MatchNames: []string{skipJob.Namespace}, + }, + PodMetricsEndpoints: []pov1.PodMetricsEndpoint{ + { + Path: util.IstioMetricsPath, + TargetPort: &util.IstioMetricsPortName, + }, + }, + } + if !skipJob.Spec.Prometheus.AllowAllMetrics { + podMonitor.Spec.PodMetricsEndpoints[0].MetricRelabelConfigs = []pov1.RelabelConfig{ + { + Action: "drop", + Regex: strings.Join(util.DefaultMetricDropList, "|"), + SourceLabels: []pov1.LabelName{"__name__"}, + }, + } + } + r.AddResource(&podMonitor) + + ctxLog.Debug("Finished generating configmap", "name", skipJob.GetName()) + return nil +} diff --git a/pkg/resourcegenerator/resourceutils/generator/multigenerator.go b/pkg/resourcegenerator/resourceutils/generator/multigenerator.go new file mode 100644 index 00000000..358122c8 --- /dev/null +++ b/pkg/resourcegenerator/resourceutils/generator/multigenerator.go @@ -0,0 +1,41 @@ +package generator + +import ( + "fmt" + + "github.com/kartverket/skiperator/pkg/reconciliation" +) + +type genFunc = func(r reconciliation.Reconciliation) error + +type MultiGenerator struct { + generators map[reconciliation.ObjectType]genFunc +} + +func NewMulti() *MultiGenerator { + return &MultiGenerator{ + generators: map[reconciliation.ObjectType]genFunc{}, + } +} + +// Register will ensure that the supplied generator will be used for a given reconciliation object type. +func (g *MultiGenerator) Register(objectType reconciliation.ObjectType, generator genFunc) { + if generator == nil { + panic("generator cannot be nil") + } + + g.generators[objectType] = generator +} + +// Generate will look up the reconciliation object type and generate the resource using +// the appropriate generator function. +func (g *MultiGenerator) Generate(r reconciliation.Reconciliation, resourceType string) error { + generator, found := g.generators[r.GetType()] + if !found { + err := fmt.Errorf("unsupported type %s for resource %s", r.GetType(), resourceType) + r.GetLogger().Error(err, "failed to generate resource", "resourceType", resourceType, "reconciliationType", r.GetType()) + return err + } + + return generator(r) +} diff --git a/pkg/resourcegenerator/resourceutils/helpers.go b/pkg/resourcegenerator/resourceutils/helpers.go new file mode 100644 index 00000000..d7bd46cf --- /dev/null +++ b/pkg/resourcegenerator/resourceutils/helpers.go @@ -0,0 +1,18 @@ +package resourceutils + +import ( + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +func ShouldScaleToZero(jsonReplicas *apiextensionsv1.JSON) bool { + replicas, err := skiperatorv1alpha1.GetStaticReplicas(jsonReplicas) + if err == nil && replicas == 0 { + return true + } + replicasStruct, err := skiperatorv1alpha1.GetScalingReplicas(jsonReplicas) + if err == nil && (replicasStruct.Min == 0 || replicasStruct.Max == 0) { + return true + } + return false +} diff --git a/pkg/resourcegenerator/resourceutils/metadata.go b/pkg/resourcegenerator/resourceutils/metadata.go new file mode 100644 index 00000000..0be24404 --- /dev/null +++ b/pkg/resourcegenerator/resourceutils/metadata.go @@ -0,0 +1,100 @@ +package resourceutils + +import ( + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "golang.org/x/exp/maps" + "sigs.k8s.io/controller-runtime/pkg/client" + "strings" +) + +var ( + commonAnnotations = map[string]string{ + // Prevents Argo CD from deleting these resources and leaving the namespace + // in a deadlocked deleting state + // https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#no-prune-resources + "argocd.argoproj.io/sync-options": "Prune=false", + } +) + +func SetCommonAnnotations(object client.Object) { + annotations := object.GetAnnotations() + if len(annotations) == 0 { + annotations = make(map[string]string) + } + maps.Copy(annotations, commonAnnotations) + object.SetAnnotations(annotations) +} + +func SetApplicationLabels(object client.Object, app *skiperatorv1alpha1.Application) { + labels := object.GetLabels() + if len(labels) == 0 { + labels = make(map[string]string) + } + if app.Spec.Labels != nil { + maps.Copy(labels, app.Spec.Labels) + } + maps.Copy(labels, app.GetDefaultLabels()) + object.SetLabels(labels) + + setResourceLabels(object, app) +} + +func setResourceLabels(obj client.Object, app *skiperatorv1alpha1.Application) { + objectGroupVersionKind := obj.GetObjectKind().GroupVersionKind().Kind + resourceLabels, isPresent := getResourceLabels(app, objectGroupVersionKind) + if !isPresent { + return + } + labels := obj.GetLabels() + maps.Copy(labels, resourceLabels) + obj.SetLabels(labels) +} + +func getResourceLabels(app *skiperatorv1alpha1.Application, resourceKind string) (map[string]string, bool) { + for k, v := range app.Spec.ResourceLabels { + if strings.ToLower(k) == strings.ToLower(resourceKind) { + return v, true + } + } + return nil, false +} + +func FindResourceLabelErrors(app *skiperatorv1alpha1.Application, resources []client.Object) map[string]map[string]string { + labelsWithNoMatch := app.Spec.ResourceLabels + for k, _ := range labelsWithNoMatch { + for _, resource := range resources { + if strings.ToLower(k) == strings.ToLower(resource.GetObjectKind().GroupVersionKind().Kind) { + delete(labelsWithNoMatch, k) + } + } + } + return labelsWithNoMatch +} + +func SetNamespaceLabels(object client.Object, skipns *skiperatorv1alpha1.SKIPNamespace) { + labels := object.GetLabels() + if len(labels) == 0 { + labels = make(map[string]string) + } + maps.Copy(labels, skipns.GetDefaultLabels()) + object.SetLabels(labels) +} + +func SetRoutingLabels(object client.Object, routing *skiperatorv1alpha1.Routing) { + labels := object.GetLabels() + if len(labels) == 0 { + labels = make(map[string]string) + } + maps.Copy(labels, routing.GetDefaultLabels()) + object.SetLabels(labels) +} + +// TODO Porbably smart to move these SET functions to the controllers or types +func SetSKIPJobLabels(object client.Object, skipJob *skiperatorv1alpha1.SKIPJob) { + labels := object.GetLabels() + if len(labels) == 0 { + labels = make(map[string]string) + } + maps.Copy(labels, skipJob.GetDefaultLabels()) + object.SetLabels(labels) +} diff --git a/pkg/resourcegenerator/resourceutils/metadata_test.go b/pkg/resourcegenerator/resourceutils/metadata_test.go new file mode 100644 index 00000000..13530a22 --- /dev/null +++ b/pkg/resourcegenerator/resourceutils/metadata_test.go @@ -0,0 +1,37 @@ +package resourceutils + +import ( + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "testing" +) + +func TestSetResourceLabels(t *testing.T) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "testns", + }, + } + // need to add gvk to find resource labels + AddGVK(scheme.Scheme, sa) + + app := &skiperatorv1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testapp", + Namespace: "testns", + Labels: map[string]string{"test": "test"}, + }, + Spec: skiperatorv1alpha1.ApplicationSpec{ + ResourceLabels: map[string]map[string]string{"ServiceAccount": {"someLabel": "someValue"}, "OtherResource": {"otherLabel": "otherValue"}}, + }, + } + + SetApplicationLabels(sa, app) + assert.True(t, len(sa.GetLabels()) == 6) + assert.True(t, sa.GetLabels()["someLabel"] == "someValue") + assert.Empty(t, sa.GetLabels()["otherLabel"]) +} diff --git a/pkg/resourcegenerator/resourceutils/refs.go b/pkg/resourcegenerator/resourceutils/refs.go new file mode 100644 index 00000000..ece1cd6f --- /dev/null +++ b/pkg/resourcegenerator/resourceutils/refs.go @@ -0,0 +1,21 @@ +package resourceutils + +import ( + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +func SetOwnerReference(skiperatorObject client.Object, obj client.Object, scheme *runtime.Scheme) error { + switch obj.(type) { + //Certificates are created in istio-gateways namespace, so we cannot set ownerref + case *certmanagerv1.Certificate: + return nil + default: + if err := ctrlutil.SetControllerReference(skiperatorObject, obj, scheme); err != nil { + return err + } + } + return nil +} diff --git a/pkg/resourcegenerator/resourceutils/typemeta.go b/pkg/resourcegenerator/resourceutils/typemeta.go new file mode 100644 index 00000000..abe8292e --- /dev/null +++ b/pkg/resourcegenerator/resourceutils/typemeta.go @@ -0,0 +1,17 @@ +package resourceutils + +import ( + "fmt" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func AddGVK(scheme *runtime.Scheme, obj client.Object) error { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return fmt.Errorf("failed to get GVK for object, need gvk to proceed. type may not be added to schema: %w", err) + } + obj.GetObjectKind().SetGroupVersionKind(gvk) + return nil +} diff --git a/pkg/resourcegenerator/service/service.go b/pkg/resourcegenerator/service/service.go new file mode 100644 index 00000000..0649e293 --- /dev/null +++ b/pkg/resourcegenerator/service/service.go @@ -0,0 +1,94 @@ +package service + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/api/v1alpha1/podtypes" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "strings" +) + +const defaultPortName = "http" + +var defaultPrometheusPort = corev1.ServicePort{ + Name: util.IstioMetricsPortName.StrVal, + Protocol: corev1.ProtocolTCP, + Port: util.IstioMetricsPortNumber.IntVal, + TargetPort: util.IstioMetricsPortNumber, +} + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in service resource", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "failed to generate service resource") + return err + } + ctxLog.Debug("Attempting to create service for application", "application", application.Name) + + service := corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} + service.Labels = util.GetPodAppSelector(application.Name) + + ports := append(getAdditionalPorts(application.Spec.AdditionalPorts), getServicePort(application.Spec.Port, application.Spec.AppProtocol)) + if r.IsIstioEnabled() { + ports = append(ports, defaultPrometheusPort) + } + + service.Spec = corev1.ServiceSpec{ + Selector: util.GetPodAppSelector(application.Name), + Type: corev1.ServiceTypeClusterIP, + Ports: ports, + } + + ctxLog.Debug("created service manifest for application", "application", application.Name) + + r.AddResource(&service) + + return nil +} + +func getAdditionalPorts(additionalPorts []podtypes.InternalPort) []corev1.ServicePort { + var ports []corev1.ServicePort + + for _, p := range additionalPorts { + ports = append(ports, corev1.ServicePort{ + Name: p.Name, + Port: p.Port, + Protocol: p.Protocol, + TargetPort: intstr.FromInt32(p.Port), + }) + } + + return ports +} + +func getServicePort(port int, appProtocol string) corev1.ServicePort { + var resolvedProtocol = corev1.ProtocolTCP + if strings.ToLower(appProtocol) == "udp" { + resolvedProtocol = corev1.ProtocolUDP + } + + var resolvedAppProtocol = appProtocol + if len(resolvedAppProtocol) == 0 { + resolvedAppProtocol = "http" + } else if port == 5432 { + // Legacy postgres hack + resolvedAppProtocol = "tcp" + } + + return corev1.ServicePort{ + Name: defaultPortName, + Protocol: resolvedProtocol, + AppProtocol: &resolvedAppProtocol, + Port: int32(port), + TargetPort: intstr.FromInt(port), + } +} diff --git a/pkg/resourcegenerator/serviceaccount/application.go b/pkg/resourcegenerator/serviceaccount/application.go new file mode 100644 index 00000000..3bae8f39 --- /dev/null +++ b/pkg/resourcegenerator/serviceaccount/application.go @@ -0,0 +1,46 @@ +package serviceaccount + +import ( + "fmt" + + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + "golang.org/x/exp/maps" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.ApplicationType, generateForApplication) +} + +func generateForApplication(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate service account for application", "application", r.GetSKIPObject().GetName()) + + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + return fmt.Errorf("failed to cast object to Application") + } + + serviceAccount := corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: application.Namespace, Name: application.Name}} + + if util.IsCloudSqlProxyEnabled(application.Spec.GCP) { + setCloudSqlAnnotations(&serviceAccount, application) + } + r.AddResource(&serviceAccount) + ctxLog.Debug("Finished generating service account for application", "application", application.Name) + return nil +} + +func setCloudSqlAnnotations(serviceAccount *corev1.ServiceAccount, application *skiperatorv1alpha1.Application) { + annotations := serviceAccount.GetAnnotations() + if len(annotations) == 0 { + annotations = make(map[string]string) + } + maps.Copy(annotations, map[string]string{ + "iam.gke.io/gcp-service-account": application.Spec.GCP.CloudSQLProxy.ServiceAccount, + }) + serviceAccount.SetAnnotations(annotations) +} diff --git a/pkg/resourcegenerator/serviceaccount/application_test.go b/pkg/resourcegenerator/serviceaccount/application_test.go new file mode 100644 index 00000000..a6801f57 --- /dev/null +++ b/pkg/resourcegenerator/serviceaccount/application_test.go @@ -0,0 +1,21 @@ +package serviceaccount + +import ( + "github.com/kartverket/skiperator/pkg/testutil" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "testing" +) + +func TestServiceAccountMinimalApp(t *testing.T) { + // Setup + r := testutil.GetTestMinimalAppReconciliation() + // Test + err := Generate(r) + + // Assert + sa := r.GetResources()[0].(*corev1.ServiceAccount) + assert.Nil(t, err) + assert.Equal(t, 1, len(r.GetResources())) + assert.Equal(t, "minimal", sa.Name) +} diff --git a/pkg/resourcegenerator/serviceaccount/service_account.go b/pkg/resourcegenerator/serviceaccount/service_account.go new file mode 100644 index 00000000..1341fbb1 --- /dev/null +++ b/pkg/resourcegenerator/serviceaccount/service_account.go @@ -0,0 +1,12 @@ +package serviceaccount + +import ( + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils/generator" +) + +var multiGenerator = generator.NewMulti() + +func Generate(r reconciliation.Reconciliation) error { + return multiGenerator.Generate(r, "ServiceAccount") +} diff --git a/pkg/resourcegenerator/serviceaccount/skipjob.go b/pkg/resourcegenerator/serviceaccount/skipjob.go new file mode 100644 index 00000000..75929549 --- /dev/null +++ b/pkg/resourcegenerator/serviceaccount/skipjob.go @@ -0,0 +1,30 @@ +package serviceaccount + +import ( + "fmt" + + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func init() { + multiGenerator.Register(reconciliation.JobType, generateForSKIPJob) +} + +func generateForSKIPJob(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + ctxLog.Debug("Attempting to generate service account for skipjob", "skipjob", r.GetSKIPObject().GetName()) + + skipJob, ok := r.GetSKIPObject().(*skiperatorv1alpha1.SKIPJob) + if !ok { + return fmt.Errorf("failed to cast object to skipjob") + } + + serviceAccount := corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: skipJob.Namespace, Name: skipJob.KindPostFixedName()}} + + r.AddResource(&serviceAccount) + ctxLog.Debug("Finished generating service account for skipjob", "skipjob", skipJob.Name) + return nil +} diff --git a/pkg/resourcegenerator/servicemonitor/service_monitor.go b/pkg/resourcegenerator/servicemonitor/service_monitor.go new file mode 100644 index 00000000..205ebb9e --- /dev/null +++ b/pkg/resourcegenerator/servicemonitor/service_monitor.go @@ -0,0 +1,67 @@ +package servicemonitor + +import ( + "fmt" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/util" + pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "strings" +) + +func Generate(r reconciliation.Reconciliation) error { + ctxLog := r.GetLogger() + if r.GetType() != reconciliation.ApplicationType { + return fmt.Errorf("unsupported type %s in service monitor", r.GetType()) + } + application, ok := r.GetSKIPObject().(*skiperatorv1alpha1.Application) + if !ok { + err := fmt.Errorf("failed to cast resource to application") + ctxLog.Error(err, "Failed to generate service monitor") + return err + } + ctxLog.Debug("Attempting to generate service monitor for application", "application", application.Name) + + serviceMonitor := pov1.ServiceMonitor{ObjectMeta: metav1.ObjectMeta{ + Namespace: application.Namespace, + Name: application.Name, + Labels: map[string]string{"instance": "primary"}, + }} + + if !r.IsIstioEnabled() { + return nil + } + + serviceMonitor.Spec = pov1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: util.GetPodAppSelector(application.Name), + }, + NamespaceSelector: pov1.NamespaceSelector{ + MatchNames: []string{application.Namespace}, + }, + Endpoints: []pov1.Endpoint{ + { + Path: util.IstioMetricsPath, + TargetPort: &util.IstioMetricsPortName, + MetricRelabelConfigs: []pov1.RelabelConfig{ + { + Action: "drop", + Regex: strings.Join(util.DefaultMetricDropList, "|"), + SourceLabels: []pov1.LabelName{"__name__"}, + }, + }, + }, + }, + } + + // Remove MetricRelabelConfigs if AllowAllMetrics is set to true + if application.Spec.Prometheus != nil && application.Spec.Prometheus.AllowAllMetrics { + serviceMonitor.Spec.Endpoints[0].MetricRelabelConfigs = nil + } + + ctxLog.Debug("Finished generating service monitor for application", "application", application.Name) + + r.AddResource(&serviceMonitor) + return nil +} diff --git a/pkg/resourcegenerator/core/volumes.go b/pkg/resourcegenerator/volume/volumes.go similarity index 99% rename from pkg/resourcegenerator/core/volumes.go rename to pkg/resourcegenerator/volume/volumes.go index 29bb6c24..ca94f215 100644 --- a/pkg/resourcegenerator/core/volumes.go +++ b/pkg/resourcegenerator/volume/volumes.go @@ -1,4 +1,4 @@ -package core +package volume import ( "github.com/kartverket/skiperator/api/v1alpha1/podtypes" diff --git a/pkg/resourceprocessor/crud.go b/pkg/resourceprocessor/crud.go new file mode 100644 index 00000000..ded93142 --- /dev/null +++ b/pkg/resourceprocessor/crud.go @@ -0,0 +1,102 @@ +package resourceprocessor + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func (r *ResourceProcessor) create(ctx context.Context, obj client.Object) error { + createObj := obj.DeepCopyObject().(client.Object) //copy so we keep gvk + err := r.client.Create(ctx, createObj) + if err != nil && errors.IsAlreadyExists(err) { + if err = r.update(ctx, obj); err != nil { + return err + } + } + if err != nil { + return err + } + return nil +} + +func (r *ResourceProcessor) update(ctx context.Context, resource client.Object) error { + existing := resource.DeepCopyObject().(client.Object) + if err := r.client.Get(ctx, client.ObjectKeyFromObject(resource), existing); err != nil { + if errors.IsNotFound(err) { + r.log.Info("Couldn't find object trying to update. Attempting create.", "kind", resource.GetObjectKind().GroupVersionKind().Kind, "name", resource.GetName()) + return r.create(ctx, resource) + } + r.log.Error(err, "Failed to get object, for unknown reason") + } + copyRequiredData(resource, existing) + if err := r.client.Update(ctx, resource); err != nil { + r.log.Error(err, "Failed to update object") + return err + } + return nil +} + +func (r *ResourceProcessor) patch(ctx context.Context, newObj client.Object) error { + existing := newObj.DeepCopyObject().(client.Object) + if err := r.client.Get(ctx, client.ObjectKeyFromObject(newObj), existing); err != nil { + if errors.IsNotFound(err) { + r.log.Info("Couldn't find object trying to update. Attempting create.", "kind", newObj.GetObjectKind().GroupVersionKind().Kind, "name", newObj.GetName()) + return r.create(ctx, newObj) + } + r.log.Error(err, "Failed to get object, for unknown reason") + } + preparePatch(newObj, existing) + + //TODO move this to getDiffs? + if !diffBetween(newObj, existing) { + r.log.Info("No diff between objects, not patching", "kind", newObj.GetObjectKind().GroupVersionKind().Kind, "name", newObj.GetName()) + return nil + } + + err := r.client.Patch(ctx, newObj, client.MergeFrom(existing)) + if err != nil { + return fmt.Errorf("failed to patch object: %w", err) + } + return nil +} + +func (r *ResourceProcessor) delete(ctx context.Context, resource client.Object) error { + err := r.client.Delete(ctx, resource) + if err != nil && errors.IsNotFound(err) { + return nil + } + return err +} + +func (r *ResourceProcessor) listResourcesByLabels(ctx context.Context, namespace string, labels map[string]string, objList *[]client.Object) error { + selector := metav1.LabelSelector{MatchLabels: labels} + selectorString, err := metav1.LabelSelectorAsSelector(&selector) + if err != nil { + return fmt.Errorf("failed to convert label selector to selector string: %w", err) + } + + listOpts := &client.ListOptions{ + LabelSelector: selectorString, + Namespace: namespace, + } + + for _, schema := range r.schemas { + if err := r.client.List(ctx, &schema, listOpts); err != nil { + return fmt.Errorf("failed to list resources: %w", err) + } + for _, resource := range schema.Items { + obj := resource.DeepCopyObject().(client.Object) + *objList = append(*objList, obj) + } + } + + return nil +} + +func (r *ResourceProcessor) getCertificates(ctx context.Context, labels map[string]string, objList *[]client.Object) error { + return r.listResourcesByLabels(ctx, "istio-gateways", labels, objList) +} diff --git a/pkg/resourceprocessor/diffs.go b/pkg/resourceprocessor/diffs.go new file mode 100644 index 00000000..6aaa706f --- /dev/null +++ b/pkg/resourceprocessor/diffs.go @@ -0,0 +1,117 @@ +package resourceprocessor + +import ( + "fmt" + "github.com/kartverket/skiperator/pkg/reconciliation" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type resourceDiff struct { + shouldDelete []client.Object + shouldUpdate []client.Object + shouldPatch []client.Object + shouldCreate []client.Object +} + +// TODO nicer return type (struct instead?) +func (r *ResourceProcessor) getDiff(task reconciliation.Reconciliation) (*resourceDiff, error) { + liveObjects := make([]client.Object, 0) + labels := task.GetSKIPObject().GetDefaultLabels() + + if labels == nil { + return nil, fmt.Errorf("labels are nil, cant process resources without labels") + } + if err := r.listResourcesByLabels(task.GetCtx(), getNamespace(task), labels, &liveObjects); err != nil { + return nil, fmt.Errorf("failed to list resources by labels: %w", err) + } + //TODO ugly as hell + certs := make([]client.Object, 0) + if err := r.getCertificates(task.GetCtx(), labels, &certs); err != nil { + return nil, fmt.Errorf("failed to get certificates: %w", err) + } + liveObjects = append(liveObjects, certs...) + liveObjectsMap := make(map[string]client.Object) + for _, obj := range liveObjects { + liveObjectsMap[client.ObjectKeyFromObject(obj).String()+obj.GetObjectKind().GroupVersionKind().Kind] = obj + } + + newObjectsMap := make(map[string]client.Object) + for _, obj := range task.GetResources() { + newObjectsMap[client.ObjectKeyFromObject(obj).String()+(obj).GetObjectKind().GroupVersionKind().Kind] = obj + } + + diffs := &resourceDiff{ + shouldDelete: make([]client.Object, 0), + shouldUpdate: make([]client.Object, 0), + shouldPatch: make([]client.Object, 0), + shouldCreate: make([]client.Object, 0), + } + + // Determine resources to delete + for key, liveObj := range liveObjectsMap { + if shouldIgnoreObject(liveObj) { + continue + } + if _, exists := newObjectsMap[key]; !exists { + diffs.shouldDelete = append(diffs.shouldDelete, liveObj) + } + } + + for key, newObj := range newObjectsMap { + if liveObj, exists := liveObjectsMap[key]; exists { + if shouldIgnoreObject(liveObj) { + continue + } + if compareObject(liveObj, newObj) { + if requirePatch(newObj) { + diffs.shouldPatch = append(diffs.shouldPatch, newObj) + } else { + diffs.shouldUpdate = append(diffs.shouldUpdate, newObj) + } + } + } else { + diffs.shouldCreate = append(diffs.shouldCreate, newObj) + } + } + + return diffs, nil +} + +func compareObject(obj1, obj2 client.Object) bool { + if obj1.GetObjectKind().GroupVersionKind().Kind != obj2.GetObjectKind().GroupVersionKind().Kind { + return false + } + if obj1.GetObjectKind().GroupVersionKind().Group != obj2.GetObjectKind().GroupVersionKind().Group { + return false + } + if obj1.GetObjectKind().GroupVersionKind().Version != obj2.GetObjectKind().GroupVersionKind().Version { + return false + } + + if obj1.GetNamespace() != obj2.GetNamespace() { + return false + } + + if obj1.GetName() != obj2.GetName() { + return false + } + + return true +} + +func getNamespace(r reconciliation.Reconciliation) string { + if r.GetType() == reconciliation.NamespaceType { + return r.GetSKIPObject().GetName() + } + return r.GetSKIPObject().GetNamespace() +} + +func shouldIgnoreObject(obj client.Object) bool { + if obj.GetLabels()["skiperator.kartverket.no/ignore"] == "true" { + return true + } + if len(obj.GetOwnerReferences()) > 0 && obj.GetOwnerReferences()[0].Kind == "CronJob" { + return true + } + return false +} diff --git a/pkg/resourceprocessor/diffs_test.go b/pkg/resourceprocessor/diffs_test.go new file mode 100644 index 00000000..7d66cafe --- /dev/null +++ b/pkg/resourceprocessor/diffs_test.go @@ -0,0 +1,189 @@ +package resourceprocessor + +import ( + "context" + "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + "github.com/kartverket/skiperator/pkg/reconciliation" + "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" + "github.com/kartverket/skiperator/pkg/resourceschemas" + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestGetDiffForApplicationShouldCreateDelete(t *testing.T) { + scheme := runtime.NewScheme() + resourceschemas.AddSchemas(scheme) + mockClient := fake.NewClientBuilder().Build() + resourceProcessor := NewResourceProcessor(mockClient, resourceschemas.GetApplicationSchemas(scheme), scheme) + + ctx := context.TODO() + namespace := "test" + + application := &v1alpha1.Application{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: namespace, + }, + } + liveSA := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "live-sa", + Namespace: namespace, + Labels: application.GetDefaultLabels(), + }, + } + newSA := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-sa", + Namespace: namespace, + Labels: application.GetDefaultLabels(), + }, + } + + ignoreLabels := application.GetDefaultLabels() + ignoreLabels["skiperator.kartverket.no/ignore"] = "true" + liveDeploymentDontDelete := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: namespace, + Labels: ignoreLabels, + }, + } + liveDeploymentIgnorePatchOrCreate := &v1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app2", + Namespace: namespace, + Labels: ignoreLabels, + }, + } + + // Create the live resource in the fake client + err := mockClient.Create(ctx, liveDeploymentDontDelete) + err = mockClient.Create(ctx, liveDeploymentIgnorePatchOrCreate) + err = mockClient.Create(ctx, liveSA) + assert.Nil(t, err) + r := reconciliation.NewApplicationReconciliation(context.TODO(), application, log.NewLogger(), false, nil, nil) + resourceutils.AddGVK(scheme, newSA) + resourceutils.AddGVK(scheme, liveDeploymentIgnorePatchOrCreate) + //build reconcile objects array + r.AddResource(newSA) + r.AddResource(liveDeploymentIgnorePatchOrCreate) + diffs, err := resourceProcessor.getDiff(r) + assert.Nil(t, err) + assert.Len(t, diffs.shouldDelete, 1) + assert.Len(t, diffs.shouldCreate, 1) + assert.Len(t, diffs.shouldUpdate, 0) + assert.Len(t, diffs.shouldPatch, 0) +} + +func TestCompareObjectShouldEqual(t *testing.T) { + sa1 := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "test", + }, + } + sa2 := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "test", + }, + } + + isEqual := compareObject(sa1, sa2) + assert.True(t, isEqual) +} + +func TestCompareObjectShouldNotEqualNamespace(t *testing.T) { + sa1 := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "test", + }, + } + sa2 := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "test2", + }, + } + + isEqual := compareObject(sa1, sa2) + assert.False(t, isEqual) +} + +func TestCompareObjectShouldNotEqualName(t *testing.T) { + sa1 := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "test", + }, + } + sa2 := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa2", + Namespace: "test", + }, + } + + isEqual := compareObject(sa1, sa2) + assert.False(t, isEqual) +} + +func TestCompareObjectShouldNotEqualType(t *testing.T) { + sa := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-sa", + Namespace: "test", + }, + } + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cm", + Namespace: "test", + }, + } + + isEqual := compareObject(sa, configMap) + assert.False(t, isEqual) +} diff --git a/pkg/resourceprocessor/processor.go b/pkg/resourceprocessor/processor.go new file mode 100644 index 00000000..b25d43c3 --- /dev/null +++ b/pkg/resourceprocessor/processor.go @@ -0,0 +1,68 @@ +package resourceprocessor + +import ( + "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + "github.com/kartverket/skiperator/pkg/reconciliation" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Processor interface { + Process() error +} + +type ResourceProcessor struct { + client client.Client + log log.Logger + schemas []unstructured.UnstructuredList + scheme *runtime.Scheme +} + +func NewResourceProcessor(client client.Client, schemas []unstructured.UnstructuredList, scheme *runtime.Scheme) *ResourceProcessor { + l := log.NewLogger().WithName("ResourceProcessor") + return &ResourceProcessor{client: client, log: l, schemas: schemas, scheme: scheme} +} + +func (r *ResourceProcessor) Process(task reconciliation.Reconciliation) []error { + if !hasGVK(task.GetResources()) { + return []error{v1alpha1.ErrNoGVK} + } + diffs, err := r.getDiff(task) + if err != nil { + return []error{err} + } + results := map[client.Object]error{} + + for _, obj := range diffs.shouldDelete { + err = r.delete(task.GetCtx(), obj) + results[obj] = err + } + + for _, obj := range diffs.shouldCreate { + err = r.create(task.GetCtx(), obj) + results[obj] = err + } + + for _, obj := range diffs.shouldPatch { + err = r.patch(task.GetCtx(), obj) + results[obj] = err + } + + for _, obj := range diffs.shouldUpdate { + err = r.update(task.GetCtx(), obj) + results[obj] = err + } + + var errors []error + for obj, err := range results { + if err != nil { + task.GetSKIPObject().GetStatus().AddSubResourceStatus(obj, err.Error(), v1alpha1.ERROR) + errors = append(errors, err) + } else { + task.GetSKIPObject().GetStatus().AddSubResourceStatus(obj, "has finished synchronizing", v1alpha1.SYNCED) + } + } + return errors +} diff --git a/pkg/resourceprocessor/resource.go b/pkg/resourceprocessor/resource.go new file mode 100644 index 00000000..c9685d55 --- /dev/null +++ b/pkg/resourceprocessor/resource.go @@ -0,0 +1,91 @@ +package resourceprocessor + +import ( + "github.com/kartverket/skiperator/pkg/util" + "golang.org/x/exp/maps" + v1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/equality" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func copyRequiredData(new client.Object, existing client.Object) { + new.SetResourceVersion(existing.GetResourceVersion()) + new.SetUID(existing.GetUID()) + new.SetCreationTimestamp(existing.GetCreationTimestamp()) + new.SetSelfLink(existing.GetSelfLink()) + new.SetOwnerReferences(existing.GetOwnerReferences()) +} + +// Patch if you care about status or if kubernetes does changes to the object after creation +func requirePatch(obj client.Object) bool { + switch obj.(type) { + case *v1.Deployment: + return true + case *batchv1.Job: + return true + } + return false +} + +func preparePatch(new client.Object, old client.Object) { + switch new.(type) { + case *v1.Deployment: + deployment := old.(*v1.Deployment) + definition := new.(*v1.Deployment) + if definition.Spec.Replicas == nil { + definition.Spec.Replicas = deployment.Spec.Replicas + } + // The command "kubectl rollout restart" puts an annotation on the deployment template in order to track + // rollouts of different replicasets. This annotation must not trigger a new reconcile, and a quick and easy + // fix is to just remove it from the map before hashing and checking the diff. + if _, rolloutIssued := deployment.Spec.Template.Annotations["kubectl.kubernetes.io/restartedAt"]; rolloutIssued { + delete(deployment.Spec.Template.Annotations, "kubectl.kubernetes.io/restartedAt") + } + case *batchv1.Job: + job := old.(*batchv1.Job) + definition := new.(*batchv1.Job) + maps.Copy(definition.Spec.Template.Labels, job.Spec.Template.Labels) //kubernetes adds labels on creation + definition.Spec.Selector = job.Spec.Selector //is set on creation + } +} + +func diffBetween(old client.Object, new client.Object) bool { + switch new.(type) { + case *v1.Deployment: + deployment := old.(*v1.Deployment) + definition := new.(*v1.Deployment) + deploymentHash := util.GetHashForStructs([]interface{}{&deployment.Spec, &deployment.Labels}) + deploymentDefinitionHash := util.GetHashForStructs([]interface{}{&definition.Spec, &definition.Labels}) + if deploymentHash != deploymentDefinitionHash { + return true + } + + // Same mechanism as "pod-template-hash" + if equality.Semantic.DeepEqual(deployment.DeepCopy().Spec, definition.DeepCopy().Spec) { + return false + } + + return true + + case *batchv1.Job: + job := old.(*batchv1.Job) + definition := new.(*batchv1.Job) + jobHash := util.GetHashForStructs([]interface{}{&job.Spec, &job.Labels}) + jobDefinitionHash := util.GetHashForStructs([]interface{}{&definition.Spec, &definition.Labels}) + if jobHash != jobDefinitionHash { + return true + } + } + return true +} + +func hasGVK(resources []client.Object) bool { + for _, obj := range resources { + gvk := (obj).GetObjectKind().GroupVersionKind().Kind + if gvk == "" { + return false + } + } + return true +} diff --git a/pkg/resourceprocessor/resource_test.go b/pkg/resourceprocessor/resource_test.go new file mode 100644 index 00000000..b06be85b --- /dev/null +++ b/pkg/resourceprocessor/resource_test.go @@ -0,0 +1,19 @@ +package resourceprocessor + +import ( + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "testing" +) + +func TestRequirePatch(t *testing.T) { + depl := &v1.Deployment{} + sa := &corev1.ServiceAccount{} + + deplShouldPatch := requirePatch(depl) + saShouldPatch := requirePatch(sa) + + assert.True(t, deplShouldPatch) + assert.False(t, saShouldPatch) +} diff --git a/pkg/resourceschemas/schemas.go b/pkg/resourceschemas/schemas.go new file mode 100644 index 00000000..bfcaf9d5 --- /dev/null +++ b/pkg/resourceschemas/schemas.go @@ -0,0 +1,108 @@ +package resourceschemas + +/* + * In order to keep the processor generic we need to supply the processor with a list of schemas that it can process. + * If we don't do this then the GVK will remain empty, and it will be impossible to compare resources. + */ +import ( + "fmt" + certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + nais_io_v1 "github.com/nais/liberator/pkg/apis/nais.io/v1" + pov1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + networkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" + securityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1" + appsv1 "k8s.io/api/apps/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + goclientscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func AddSchemas(scheme *runtime.Scheme) { + utilruntime.Must(goclientscheme.AddToScheme(scheme)) + utilruntime.Must(skiperatorv1alpha1.AddToScheme(scheme)) + utilruntime.Must(autoscalingv2.AddToScheme(scheme)) + utilruntime.Must(securityv1beta1.AddToScheme(scheme)) + utilruntime.Must(networkingv1beta1.AddToScheme(scheme)) + utilruntime.Must(certmanagerv1.AddToScheme(scheme)) + utilruntime.Must(policyv1.AddToScheme(scheme)) + utilruntime.Must(pov1.AddToScheme(scheme)) + utilruntime.Must(nais_io_v1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(corev1.AddToScheme(scheme)) +} + +func addGVKToList(lists []client.ObjectList, scheme *runtime.Scheme) []unstructured.UnstructuredList { + listsWithGVKs := make([]unstructured.UnstructuredList, 0) + for _, list := range lists { + unstructuredList := unstructured.UnstructuredList{} + gvk, err := apiutil.GVKForObject(list, scheme) + if err != nil { + panic(fmt.Errorf("failed to get GVK for object, cant start without schemas: %w", err)) + } + unstructuredList.SetGroupVersionKind(gvk) + listsWithGVKs = append(listsWithGVKs, unstructuredList) + } + return listsWithGVKs +} + +func GetApplicationSchemas(scheme *runtime.Scheme) []unstructured.UnstructuredList { + return addGVKToList([]client.ObjectList{ + &appsv1.DeploymentList{}, + &corev1.ServiceList{}, + &corev1.ConfigMapList{}, + &networkingv1beta1.ServiceEntryList{}, + &networkingv1beta1.GatewayList{}, + &autoscalingv2.HorizontalPodAutoscalerList{}, + &networkingv1beta1.VirtualServiceList{}, + &securityv1beta1.PeerAuthenticationList{}, + &corev1.ServiceAccountList{}, + &policyv1.PodDisruptionBudgetList{}, + &networkingv1.NetworkPolicyList{}, + &securityv1beta1.AuthorizationPolicyList{}, + &nais_io_v1.MaskinportenClientList{}, + &nais_io_v1.IDPortenClientList{}, + &pov1.ServiceMonitorList{}, + &pov1.PodMonitorList{}, + &certmanagerv1.CertificateList{}, + }, scheme) +} + +func GetJobSchemas(scheme *runtime.Scheme) []unstructured.UnstructuredList { + return addGVKToList([]client.ObjectList{ + &batchv1.CronJobList{}, + &batchv1.JobList{}, + &networkingv1.NetworkPolicyList{}, + &corev1.ServiceAccountList{}, + &networkingv1beta1.ServiceEntryList{}, + &corev1.ConfigMapList{}, + &pov1.PodMonitorList{}, + }, scheme) +} + +func GetRoutingSchemas(scheme *runtime.Scheme) []unstructured.UnstructuredList { + return addGVKToList([]client.ObjectList{ + &certmanagerv1.CertificateList{}, + &networkingv1beta1.GatewayList{}, + &networkingv1.NetworkPolicyList{}, + &networkingv1beta1.VirtualServiceList{}, + }, scheme) +} + +func GetNamespaceSchemas(scheme *runtime.Scheme) []unstructured.UnstructuredList { + return addGVKToList([]client.ObjectList{ + &corev1.NamespaceList{}, + &corev1.ConfigMapList{}, + &networkingv1.NetworkPolicyList{}, + &networkingv1beta1.SidecarList{}, + &corev1.SecretList{}, + }, scheme) +} diff --git a/pkg/resourceschemas/schemas_test.go b/pkg/resourceschemas/schemas_test.go new file mode 100644 index 00000000..f5e2e013 --- /dev/null +++ b/pkg/resourceschemas/schemas_test.go @@ -0,0 +1,37 @@ +package resourceschemas + +import ( + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + testing2 "testing" +) + +func TestAddGVK(t *testing2.T) { + //arrange + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + list := []client.ObjectList{&corev1.ServiceList{}} + //act + result := addGVKToList(list, scheme) + //assert + assert.NotEmpty(t, result) + assert.NotEmpty(t, result[0].GroupVersionKind().Kind) + assert.NotEmpty(t, result[0].GroupVersionKind().Version) + assert.Equal(t, schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceList"}, result[0].GroupVersionKind()) +} + +func TestGetApplicationSchemas(t *testing2.T) { + //arrange + scheme := runtime.NewScheme() + AddSchemas(scheme) + //act + result := GetApplicationSchemas(scheme) + //assert + assert.NotEmpty(t, result) + assert.NotEmpty(t, result[0].GroupVersionKind().Kind) + assert.NotEmpty(t, result[0].GroupVersionKind().Version) + assert.Equal(t, schema.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DeploymentList"}, result[0].GroupVersionKind()) +} diff --git a/pkg/testutil/reconciliation.go b/pkg/testutil/reconciliation.go new file mode 100644 index 00000000..04b5b60a --- /dev/null +++ b/pkg/testutil/reconciliation.go @@ -0,0 +1,33 @@ +package testutil + +import ( + "context" + skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" + "github.com/kartverket/skiperator/pkg/log" + "github.com/kartverket/skiperator/pkg/reconciliation" + "golang.org/x/exp/maps" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func GetTestMinimalAppReconciliation() *reconciliation.ApplicationReconciliation { + application := &skiperatorv1alpha1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minimal", + Namespace: "test", + Labels: make(map[string]string), + }, + } + application.Spec = skiperatorv1alpha1.ApplicationSpec{ + Image: "image", + Port: 8080, + } + application.FillDefaultsSpec() + maps.Copy(application.Labels, application.GetDefaultLabels()) + identityConfigMap := corev1.ConfigMap{} + identityConfigMap.Data = map[string]string{"workloadIdentityPool": "test-pool"} + ctx := context.TODO() + r := reconciliation.NewApplicationReconciliation(ctx, application, log.NewLogger(), false, nil, &identityConfigMap) + + return r +} diff --git a/pkg/util/conditions.go b/pkg/util/conditions.go deleted file mode 100644 index 9ef7f568..00000000 --- a/pkg/util/conditions.go +++ /dev/null @@ -1,39 +0,0 @@ -package util - -import ( - "context" - "fmt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "time" -) - -type ConditionsAware interface { - GetConditions() []metav1.Condition - SetConditions(conditions []metav1.Condition) -} - -func AppendCondition(ctx context.Context, reconcilerClient client.Client, object client.Object, - typeName string, status metav1.ConditionStatus, reason string, message string) error { - logger := log.FromContext(ctx) - - conditionsAware, conversionSuccessful := (object).(ConditionsAware) - if conversionSuccessful { - timeNow := metav1.Time{Time: time.Now()} - condition := metav1.Condition{Type: typeName, Status: status, Reason: reason, Message: message, LastTransitionTime: timeNow} - conditionsAware.SetConditions(append(conditionsAware.GetConditions(), condition)) - err := reconcilerClient.Status().Update(ctx, object) - if err != nil { - errMessage := "Custom resource status update failed" - logger.Info(errMessage) - return fmt.Errorf(errMessage) - } - - } else { - errMessage := "Status cannot be set, resource doesn't support conditions" - logger.Info(errMessage) - return fmt.Errorf(errMessage) - } - return nil -} diff --git a/pkg/util/constants.go b/pkg/util/constants.go index f36b4771..f7e47878 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -5,13 +5,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -var CommonAnnotations = map[string]string{ - // Prevents Argo CD from deleting these resources and leaving the namespace - // in a deadlocked deleting state - // https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#no-prune-resources - "argocd.argoproj.io/sync-options": "Prune=false", -} - const SkiperatorUser = int64(150) var ( diff --git a/pkg/util/digest.go b/pkg/util/digest.go index 765bd5b8..929662bc 100644 --- a/pkg/util/digest.go +++ b/pkg/util/digest.go @@ -10,24 +10,23 @@ import ( "sigs.k8s.io/kustomize/kyaml/yaml" ) -func ResolveImageTags(ctx context.Context, log logr.Logger, config *rest.Config, deployment *appsv1.Deployment) (*appsv1.Deployment, error) { +func ResolveImageTags(ctx context.Context, log logr.Logger, config *rest.Config, deployment *appsv1.Deployment) error { n, err := parseManifest(deployment) if err != nil { - return nil, err + return err } if err = resolve.ImageTags(ctx, log, config, n, []string{}); err != nil { - return nil, err + return err } b, _ := n.MarshalJSON() - var res appsv1.Deployment - err = json.Unmarshal(b, &res) + err = json.Unmarshal(b, &deployment) if err != nil { - return nil, err + return err } - return &res, nil + return nil } func parseManifest(deployment *appsv1.Deployment) (*yaml.RNode, error) { diff --git a/pkg/util/helperfunctions.go b/pkg/util/helperfunctions.go index 2a501148..6102fb57 100644 --- a/pkg/util/helperfunctions.go +++ b/pkg/util/helperfunctions.go @@ -7,7 +7,6 @@ import ( "github.com/mitchellh/hashstructure/v2" "github.com/nais/liberator/pkg/namegen" "github.com/r3labs/diff/v3" - "golang.org/x/exp/maps" "hash/fnv" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -22,6 +21,8 @@ import ( "unicode" ) +//TODO Clean up this file, move functions to more appropriate files + var internalPattern = regexp.MustCompile(`[^.]\.skip\.statkart\.no|[^.]\.kartverket-intern.cloud`) func IsInternal(hostname string) bool { @@ -94,15 +95,6 @@ func ErrDoPanic(err error, message string) { } } -func SetCommonAnnotations(object client.Object) { - annotations := object.GetAnnotations() - if len(annotations) == 0 { - annotations = make(map[string]string) - } - maps.Copy(annotations, CommonAnnotations) - object.SetAnnotations(annotations) -} - func PointTo[T any](x T) *T { return &x } diff --git a/pkg/util/reconciler.go b/pkg/util/reconciler.go deleted file mode 100644 index 8699c313..00000000 --- a/pkg/util/reconciler.go +++ /dev/null @@ -1,223 +0,0 @@ -package util - -import ( - "context" - "fmt" - "github.com/kartverket/skiperator/pkg/flags" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/kartverket/skiperator/api/v1alpha1/podtypes" - corev1 "k8s.io/api/core/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// ReconcilerBase is a base struct from which all reconcilers can be derived from. By doing so your reconcilers will also inherit a set of utility functions -// To inherit from reconciler just build your finalizer this way: -// -// type MyReconciler struct { -// util.ReconcilerBase -// ... other optional fields ... -// } -type ReconcilerBase struct { - apireader client.Reader - client client.Client - extensionsClient *apiextensionsclient.Clientset - scheme *runtime.Scheme - restConfig *rest.Config - recorder record.EventRecorder - features *flags.Features -} - -func NewReconcilerBase(client client.Client, extensionsClient *apiextensionsclient.Clientset, scheme *runtime.Scheme, restConfig *rest.Config, recorder record.EventRecorder, apireader client.Reader) ReconcilerBase { - return ReconcilerBase{ - apireader: apireader, - client: client, - extensionsClient: extensionsClient, - scheme: scheme, - restConfig: restConfig, - recorder: recorder, - features: flags.FeatureFlags, - } -} - -// NewReconcilerBase is a construction function to create a new ReconcilerBase. -func NewFromManager(mgr manager.Manager, recorder record.EventRecorder) ReconcilerBase { - extensionsClient, err := apiextensionsclient.NewForConfig(mgr.GetConfig()) - if err != nil { - ctrl.Log.Error(err, "could not create extensions client, won't be able to peek at CRDs") - } - - return NewReconcilerBase(mgr.GetClient(), extensionsClient, mgr.GetScheme(), mgr.GetConfig(), recorder, mgr.GetAPIReader()) -} - -// GetClient returns the underlying client -func (r *ReconcilerBase) GetClient() client.Client { - return r.client -} - -// GetApiExtensionsClient returns the underlying API Extensions client -func (r *ReconcilerBase) GetApiExtensionsClient() *apiextensionsclient.Clientset { - return r.extensionsClient -} - -// GetRestConfig returns the underlying rest config -func (r *ReconcilerBase) GetRestConfig() *rest.Config { - return r.restConfig -} - -// GetRecorder returns the underlying recorder -func (r *ReconcilerBase) GetRecorder() record.EventRecorder { - return r.recorder -} - -// GetScheme returns the scheme -func (r *ReconcilerBase) GetScheme() *runtime.Scheme { - return r.scheme -} - -func (r *ReconcilerBase) GetEgressServices(ctx context.Context, owner client.Object, accessPolicy *podtypes.AccessPolicy) ([]corev1.Service, error) { - var egressServices []corev1.Service - if accessPolicy == nil { - return egressServices, nil - } - - for _, outboundRule := range accessPolicy.Outbound.Rules { - namespaces := []string{} - - if outboundRule.Namespace != "" { - namespaces = []string{outboundRule.Namespace} - } else if outboundRule.NamespacesByLabel == nil { - if outboundRule.Namespace == "" { - namespaces = []string{owner.GetNamespace()} - } - - } else { - namespaceList := corev1.NamespaceList{} - - err := r.GetClient().List(ctx, &namespaceList, client.MatchingLabels(outboundRule.NamespacesByLabel)) - - if errors.IsNotFound(err) { - r.EmitWarningEvent(owner, "NoNamespaces", fmt.Sprintf("cannot find any namespaces")) - return egressServices, err - } else if err != nil { - return egressServices, err - } - - for _, namespace := range namespaceList.Items { - namespaces = append(namespaces, namespace.Name) - } - - } - - for _, namespace := range namespaces { - service := corev1.Service{} - - err := r.GetClient().Get(ctx, client.ObjectKey{ - Namespace: namespace, - Name: outboundRule.Application, - }, &service) - if errors.IsNotFound(err) { - r.EmitWarningEvent(owner, "MissingApplication", fmt.Sprintf("cannot find Application named %s in Namespace %s, egress rule will not be added", outboundRule.Application, outboundRule.Namespace)) - continue - } else if err != nil { - return egressServices, err - } - - egressServices = append(egressServices, service) - } - } - - return egressServices, nil -} - -func (r *ReconcilerBase) GetNamespaces(ctx context.Context, owner client.Object) (corev1.NamespaceList, error) { - namespaces := corev1.NamespaceList{} - - err := r.GetClient().List(ctx, &namespaces) - - if errors.IsNotFound(err) { - r.EmitWarningEvent(owner, "NoNamespaces", fmt.Sprintf("cannot find any namespaces")) - return namespaces, err - } else if err != nil { - return namespaces, err - } - - return namespaces, nil -} - -func (r *ReconcilerBase) IsIstioEnabledForNamespace(ctx context.Context, namespaceName string) bool { - namespace := corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespaceName, - }, - } - - err := r.GetClient().Get(ctx, client.ObjectKeyFromObject(&namespace), &namespace) - if err != nil { - return false - } - - v, exists := namespace.Labels[IstioRevisionLabel] - - return exists && len(v) > 0 -} - -func hasIgnoreLabel(obj client.Object) bool { - labels := obj.GetLabels() - return labels["skiperator.kartverket.no/ignore"] == "true" -} - -func (r *ReconcilerBase) ShouldReconcile(ctx context.Context, obj client.Object) (bool, error) { - copyObj := obj.DeepCopyObject().(client.Object) - err := r.GetClient().Get(ctx, client.ObjectKeyFromObject(copyObj), copyObj) - err = client.IgnoreNotFound(err) - - if err != nil { - return false, err - } - - shouldReconcile := !hasIgnoreLabel(copyObj) - - return shouldReconcile, nil -} - -func (r *ReconcilerBase) EmitWarningEvent(object runtime.Object, reason string, message string) { - r.GetRecorder().Event( - object, - corev1.EventTypeWarning, reason, - message, - ) -} - -func (r *ReconcilerBase) EmitNormalEvent(object runtime.Object, reason string, message string) { - r.GetRecorder().Event( - object, - corev1.EventTypeNormal, reason, - message, - ) -} - -func (r *ReconcilerBase) DeleteObjectIfExists(ctx context.Context, object client.Object) error { - err := client.IgnoreNotFound(r.GetClient().Delete(ctx, object)) - if err != nil { - return err - } - - return nil -} - -func DoNotRequeue() (reconcile.Result, error) { - return reconcile.Result{}, nil -} - -func RequeueWithError(err error) (reconcile.Result, error) { - return reconcile.Result{}, err -} diff --git a/tests/application/access-policy/advanced-assert.yaml b/tests/application/access-policy/advanced-assert.yaml index 885b3817..0ce23bd1 100644 --- a/tests/application/access-policy/advanced-assert.yaml +++ b/tests/application/access-policy/advanced-assert.yaml @@ -97,3 +97,33 @@ spec: - name: https number: 443 protocol: HTTPS +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: access-policy + namespace: access-policy-ns +spec: + image: image + port: 8080 + accessPolicy: + inbound: + rules: + - application: access-policy-other + namespace: access-policy-other + outbound: + external: + - host: example.com + ports: + - name: http + port: 80 + protocol: HTTP + - host: foo.com + rules: + - application: access-policy-two + - application: access-policy-other + namespace: access-policy-other +status: + conditions: + - type: InternalRulesValid + status: "True" diff --git a/tests/application/access-policy/advanced-patch-assert.yaml b/tests/application/access-policy/advanced-patch-assert.yaml index ea2bfb39..d0f5965f 100644 --- a/tests/application/access-policy/advanced-patch-assert.yaml +++ b/tests/application/access-policy/advanced-patch-assert.yaml @@ -35,5 +35,5 @@ spec: matchLabels: app: access-policy-other ports: - - port: 8080 - protocol: TCP + - port: 8080 + protocol: TCP diff --git a/tests/application/access-policy/bad-policy-assert.yaml b/tests/application/access-policy/bad-policy-assert.yaml new file mode 100644 index 00000000..f12d5588 --- /dev/null +++ b/tests/application/access-policy/bad-policy-assert.yaml @@ -0,0 +1,81 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: no-app-same-namespace +spec: + image: image + port: 8080 + accessPolicy: + outbound: + rules: + - application: doesnt-exist +status: + conditions: + - type: InternalRulesValid + status: "False" +--- +apiVersion: v1 +kind: Event +reason: InvalidAccessPolicy +source: + component: application-controller +involvedObject: + apiVersion: skiperator.kartverket.no/v1alpha1 + kind: Application + name: no-app-same-namespace +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: no-app-different-namespace +spec: + image: image + port: 8080 + accessPolicy: + outbound: + rules: + - application: doesnt-exist-diff-ns + namespace: non-existing +status: + conditions: + - type: InternalRulesValid + status: "False" +--- +apiVersion: v1 +kind: Event +reason: InvalidAccessPolicy +source: + component: application-controller +involvedObject: + apiVersion: skiperator.kartverket.no/v1alpha1 + kind: Application + name: no-app-different-namespace +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: no-namespace-with-labels +spec: + image: image + port: 8080 + accessPolicy: + outbound: + rules: + - namespacesByLabel: + test: dontexist + application: access-policy-other +status: + conditions: + - type: InternalRulesValid + status: "False" + +--- +apiVersion: v1 +kind: Event +reason: InvalidAccessPolicy +source: + component: application-controller +involvedObject: + apiVersion: skiperator.kartverket.no/v1alpha1 + kind: Application + name: no-namespace-with-labels diff --git a/tests/application/access-policy/bad-policy-error.yaml b/tests/application/access-policy/bad-policy-error.yaml new file mode 100644 index 00000000..6a69df3b --- /dev/null +++ b/tests/application/access-policy/bad-policy-error.yaml @@ -0,0 +1,32 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: no-app-same-namespace + labels: + app.kubernetes.io/managed-by: skiperator + application.skiperator.no/app: no-app-same-namespace + application.skiperator.no/app-name: no-app-same-namespace + application.skiperator.no/app-namespace: access-policy-ns + skiperator.kartverket.no/controller: application + annotations: + argocd.argoproj.io/sync-options: Prune=false + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + kind: Application + name: no-app-same-namespace + controller: true + blockOwnerDeletion: true +spec: + podSelector: + matchLabels: + app: no-app-same-namespace + egress: + - to: + - podSelector: + matchLabels: + app: doesnt-exist + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ($namespace) + policyTypes: + - Egress \ No newline at end of file diff --git a/tests/application/access-policy/bad-policy.yaml b/tests/application/access-policy/bad-policy.yaml new file mode 100644 index 00000000..efcc2509 --- /dev/null +++ b/tests/application/access-policy/bad-policy.yaml @@ -0,0 +1,38 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: no-app-same-namespace +spec: + image: image + port: 8080 + accessPolicy: + outbound: + rules: + - application: doesnt-exist +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: no-app-different-namespace +spec: + image: image + port: 8080 + accessPolicy: + outbound: + rules: + - application: doesnt-exist-diff-ns + namespace: non-existing +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: no-namespace-with-labels +spec: + image: image + port: 8080 + accessPolicy: + outbound: + rules: + - namespacesByLabel: + test: dontexist + application: access-policy-other diff --git a/tests/application/access-policy/chainsaw-test.yaml b/tests/application/access-policy/chainsaw-test.yaml index 54e7d539..bfcd414d 100644 --- a/tests/application/access-policy/chainsaw-test.yaml +++ b/tests/application/access-policy/chainsaw-test.yaml @@ -23,6 +23,10 @@ spec: file: advanced-patch.yaml - assert: file: advanced-patch-assert.yaml - - - + - try : + - apply: + file: bad-policy.yaml + - assert: + file: bad-policy-assert.yaml + - error: + file: bad-policy-error.yaml diff --git a/tests/application/authorization-settings/patch-application-assert.yaml b/tests/application/authorization-settings/patch-application-assert.yaml index d09f1835..df556a73 100644 --- a/tests/application/authorization-settings/patch-application-assert.yaml +++ b/tests/application/authorization-settings/patch-application-assert.yaml @@ -20,26 +20,22 @@ spec: selector: matchLabels: app: allow-list -# This assertion does not work in the emulated control plane of Kuttl. See https://github.com/kudobuilder/kuttl/issues/471 -# for progress regarding this issue -# TODO Add this back when issue fixed -#--- -#apiVersion: security.istio.io/v1beta1 -#kind: AuthorizationPolicy -#metadata: -# name: allow-all-deny -#spec: -# action: DENY -# rules: -# - from: -# - source: -# namespaces: -# - istio-gateways -# to: -# - operation: -# paths: -# - /actuator* -# selector: -# matchLabels: -# app: allow-all - +--- +apiVersion: security.istio.io/v1beta1 +kind: AuthorizationPolicy +metadata: + name: allow-all-deny +spec: + action: DENY + rules: + - from: + - source: + namespaces: + - istio-gateways + to: + - operation: + paths: + - /actuator* + selector: + matchLabels: + app: allow-all diff --git a/tests/application/authorization-settings/patch-application.yaml b/tests/application/authorization-settings/patch-application.yaml index 939348c8..92f3c2fb 100644 --- a/tests/application/authorization-settings/patch-application.yaml +++ b/tests/application/authorization-settings/patch-application.yaml @@ -16,4 +16,6 @@ metadata: name: allow-all spec: image: image - port: 8080 \ No newline at end of file + port: 8080 + authorizationSettings: null # if we don't set it to null, then it will just merge. + diff --git a/tests/application/custom-certificate/application-duplicate-ingress-assert.yaml b/tests/application/custom-certificate/application-duplicate-ingress-assert.yaml new file mode 100644 index 00000000..327b1fc6 --- /dev/null +++ b/tests/application/custom-certificate/application-duplicate-ingress-assert.yaml @@ -0,0 +1,137 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: custom-cert-duplicate + annotations: + argocd.argoproj.io/sync-options: "Prune=false" +spec: + selector: + matchLabels: + app: custom-cert-duplicate + template: + metadata: + annotations: + prometheus.io/scrape: "true" + argocd.argoproj.io/sync-options: "Prune=false" + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: custom-cert-duplicate + spec: + containers: + - name: custom-cert-duplicate + image: image + imagePullPolicy: Always + ports: + - containerPort: 8080 + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + privileged: false + readOnlyRootFilesystem: true + runAsGroup: 150 + runAsUser: 150 + runAsNonRoot: true + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + volumeMounts: + - mountPath: /tmp + name: tmp + imagePullSecrets: + - name: github-auth + securityContext: + fsGroup: 150 + supplementalGroups: + - 150 + seccompProfile: + type: RuntimeDefault + serviceAccountName: custom-cert-duplicate + volumes: + - emptyDir: {} + name: tmp + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: "kubernetes.io/hostname" + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - custom-cert-duplicate + matchLabelKeys: + - pod-template-hash + - maxSkew: 1 + topologyKey: "onprem.gke.io/failure-domain-name" + whenUnsatisfiable: ScheduleAnyway + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - custom-cert-duplicate + matchLabelKeys: + - pod-template-hash +--- +apiVersion: v1 +kind: Secret +metadata: + name: some-cert + namespace: istio-gateways +type: kubernetes.io/tls +--- +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: custom-cert-duplicate-ingress-dc2b250f77a411ad +spec: + selector: + app: istio-ingress-external + servers: + - hosts: + - test.kartverket.no + port: + name: http + number: 80 + protocol: HTTP + - hosts: + - test.kartverket.no + port: + name: https + number: 443 + protocol: HTTPS + tls: + credentialName: some-cert + mode: SIMPLE +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: custom-cert-duplicate-ingress +spec: + exportTo: + - . + - istio-system + - istio-gateways + gateways: + - custom-cert-duplicate-ingress-dc2b250f77a411ad + hosts: + - test.kartverket.no + http: + - match: + - port: 80 + withoutHeaders: + ':path': + prefix: /.well-known/acme-challenge/ + name: redirect-to-https + redirect: + redirectCode: 308 + scheme: https + - name: default-app-route + route: + - destination: + host: custom-cert-duplicate + port: + number: 8080 diff --git a/tests/application/custom-certificate/application-duplicate-ingress-error.yaml b/tests/application/custom-certificate/application-duplicate-ingress-error.yaml new file mode 100644 index 00000000..682ecdfe --- /dev/null +++ b/tests/application/custom-certificate/application-duplicate-ingress-error.yaml @@ -0,0 +1,53 @@ +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: custom-cert-duplicate-ingress-db284ad1b14a59a0 +spec: + selector: + app: istio-ingress-external + servers: + - hosts: + - "test.kartverket.no+custom-cert" + port: + name: http + number: 80 + protocol: HTTP + - hosts: + - "test.kartverket.no+custom-cert" + port: + name: https + number: 443 + protocol: HTTPS + tls: + credentialName: some-cert + mode: SIMPLE +--- +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: custom-cert-duplicate-ingress +spec: + exportTo: + - . + - istio-system + - istio-gateways + gateways: + - custom-cert-duplicate-ingress-db284ad1b14a59a0 + hosts: + - "test.kartverket.no+custom-cert" + http: + - match: + - port: 80 + withoutHeaders: + ':path': + prefix: /.well-known/acme-challenge/ + name: redirect-to-https + redirect: + redirectCode: 308 + scheme: https + - name: default-app-route + route: + - destination: + host: custom-cert-duplicate + port: + number: 8080 diff --git a/tests/application/custom-certificate/application-duplicate-ingress.yaml b/tests/application/custom-certificate/application-duplicate-ingress.yaml new file mode 100644 index 00000000..7815d0ff --- /dev/null +++ b/tests/application/custom-certificate/application-duplicate-ingress.yaml @@ -0,0 +1,10 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: custom-cert-duplicate +spec: + image: image + port: 8080 + ingresses: + - test.kartverket.no + - test.kartverket.no+some-cert diff --git a/tests/application/custom-certificate/chainsaw-test.yaml b/tests/application/custom-certificate/chainsaw-test.yaml index 372b5f64..4c865fde 100644 --- a/tests/application/custom-certificate/chainsaw-test.yaml +++ b/tests/application/custom-certificate/chainsaw-test.yaml @@ -15,3 +15,9 @@ spec: - error: template: true file: generated-cert.yaml + - create: + file: application-duplicate-ingress.yaml + - assert: + file: application-duplicate-ingress-assert.yaml + - error: + file: application-duplicate-ingress-error.yaml diff --git a/tests/application/ignore-reconcile/remove-label-assert.yaml b/tests/application/ignore-reconcile/remove-label-assert.yaml index 1647df20..af0bba6f 100644 --- a/tests/application/ignore-reconcile/remove-label-assert.yaml +++ b/tests/application/ignore-reconcile/remove-label-assert.yaml @@ -2,8 +2,6 @@ apiVersion: networking.istio.io/v1beta1 kind: VirtualService metadata: name: ignore-reconcile-ingress - labels: - skiperator.kartverket.no/ignore: "false" spec: hosts: - test.com diff --git a/tests/application/minimal/application-assert.yaml b/tests/application/minimal/application-assert.yaml index 91daf679..32094590 100644 --- a/tests/application/minimal/application-assert.yaml +++ b/tests/application/minimal/application-assert.yaml @@ -2,13 +2,40 @@ apiVersion: v1 kind: ServiceAccount metadata: name: minimal + annotations: + argocd.argoproj.io/sync-options: "Prune=false" + labels: + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "application" + application.skiperator.no/app: minimal + application.skiperator.no/app-name: minimal + application.skiperator.no/app-namespace: ($namespace) + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Application + name: minimal + --- apiVersion: apps/v1 kind: Deployment metadata: name: minimal + labels: + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "application" + application.skiperator.no/app: minimal + application.skiperator.no/app-name: minimal + application.skiperator.no/app-namespace: ($namespace) annotations: argocd.argoproj.io/sync-options: "Prune=false" + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Application + name: minimal spec: selector: matchLabels: @@ -27,7 +54,8 @@ spec: image: image imagePullPolicy: Always ports: - - containerPort: 8080 + - name: main + containerPort: 8080 protocol: TCP securityContext: allowPrivilegeEscalation: false @@ -79,11 +107,33 @@ spec: - minimal matchLabelKeys: - pod-template-hash + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + maxSurge: 25% + revisionHistoryLimit: 2 + progressDeadlineSeconds: 600 + --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: name: minimal + annotations: + argocd.argoproj.io/sync-options: "Prune=false" + labels: + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "application" + application.skiperator.no/app: minimal + application.skiperator.no/app-name: minimal + application.skiperator.no/app-namespace: ($namespace) + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Application + name: minimal spec: minReplicas: 2 maxReplicas: 5 @@ -103,6 +153,21 @@ apiVersion: v1 kind: Service metadata: name: minimal + annotations: + argocd.argoproj.io/sync-options: "Prune=false" + labels: + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "application" + application.skiperator.no/app: minimal + application.skiperator.no/app-name: minimal + application.skiperator.no/app-namespace: ($namespace) + app: minimal + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Application + name: minimal spec: selector: app: minimal @@ -117,6 +182,20 @@ apiVersion: security.istio.io/v1beta1 kind: PeerAuthentication metadata: name: minimal + annotations: + argocd.argoproj.io/sync-options: "Prune=false" + labels: + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "application" + application.skiperator.no/app: minimal + application.skiperator.no/app-name: minimal + application.skiperator.no/app-namespace: ($namespace) + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Application + name: minimal spec: selector: matchLabels: diff --git a/tests/application/service/application-assert.yaml b/tests/application/service/application-assert.yaml index 945006e1..c16e3c35 100644 --- a/tests/application/service/application-assert.yaml +++ b/tests/application/service/application-assert.yaml @@ -2,6 +2,8 @@ apiVersion: v1 kind: Service metadata: name: service-props + labels: + app: service-props spec: selector: app: service-props diff --git a/tests/application/subresource-status/application-error-assert.yaml b/tests/application/subresource-status/application-error-assert.yaml deleted file mode 100644 index 8aaeeb18..00000000 --- a/tests/application/subresource-status/application-error-assert.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: skiperator.kartverket.no/v1alpha1 -kind: Application -metadata: - finalizers: - - skip.statkart.no/finalizer - name: borked -spec: - enablePDB: true - image: image - port: 8080 - priority: medium - redirectToHTTPS: true - replicas: "2" - strategy: - type: RollingUpdate -status: - application: - message: One of the controllers is in a failed state - status: Error - controllers: - AuthorizationPolicy: - message: AuthorizationPolicy has finished synchronizing - status: Synced - Certificate: - message: Certificate has finished synchronizing - status: Synced - ConfigMap: - message: ConfigMap has finished synchronizing - status: Synced - EgressServiceEntry: - message: EgressServiceEntry has finished synchronizing - status: Synced - HorizontalPodAutoScaler: - message: HorizontalPodAutoScaler has finished synchronizing - status: Synced - IngressGateway: - message: IngressGateway has finished synchronizing - status: Synced - IngressVirtualService: - message: IngressVirtualService has finished synchronizing - status: Synced - NetworkPolicy: - message: NetworkPolicy has finished synchronizing - status: Synced - PeerAuthentication: - message: PeerAuthentication has finished synchronizing - status: Synced - PodDisruptionBudget: - message: 'json: cannot unmarshal string into Go value of type v1alpha1.Replicas' - status: Error - Service: - message: Service has finished synchronizing - status: Synced - ServiceAccount: - message: ServiceAccount has finished synchronizing - status: Synced diff --git a/tests/application/subresource-status/application-generate-error-assert.yaml b/tests/application/subresource-status/application-generate-error-assert.yaml new file mode 100644 index 00000000..1985b294 --- /dev/null +++ b/tests/application/subresource-status/application-generate-error-assert.yaml @@ -0,0 +1,22 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + finalizers: + - skip.statkart.no/finalizer + name: borked +spec: + enablePDB: true + image: image + port: 8080 + priority: medium + redirectToHTTPS: true + replicas: "2" + strategy: + type: RollingUpdate +status: + summary: + message: >- + failed to generate application resource: json: cannot unmarshal string + into Go value of type v1alpha1.Replicas + status: Error + diff --git a/tests/application/subresource-status/application-error.yaml b/tests/application/subresource-status/application-generate-error.yaml similarity index 53% rename from tests/application/subresource-status/application-error.yaml rename to tests/application/subresource-status/application-generate-error.yaml index b6d2546b..7b85e0f4 100644 --- a/tests/application/subresource-status/application-error.yaml +++ b/tests/application/subresource-status/application-generate-error.yaml @@ -1,3 +1,10 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: chainsaw-subresource-labels + labels: + istio.io/rev: "revision-1" +--- apiVersion: skiperator.kartverket.no/v1alpha1 kind: Application metadata: diff --git a/tests/application/subresource-status/application-resource-apply-error-assert.yaml b/tests/application/subresource-status/application-resource-apply-error-assert.yaml new file mode 100644 index 00000000..d3ba67a0 --- /dev/null +++ b/tests/application/subresource-status/application-resource-apply-error-assert.yaml @@ -0,0 +1,77 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + finalizers: + - skip.statkart.no/finalizer + name: badport +spec: + enablePDB: true + image: image + port: 80801 + priority: medium + redirectToHTTPS: true + replicas: + max: 5 + min: 2 + targetCpuUtilization: 80 + strategy: + type: RollingUpdate +status: + conditions: [] + subresources: + AuthorizationPolicy[badport-deny]: + message: AuthorizationPolicy has finished synchronizing + status: Synced + Certificate[chainsaw-subresource-labels-badport-ingress-214f400d5abb9c1c]: + message: Certificate has finished synchronizing + status: Synced + ConfigMap[badport-gcp-auth]: + message: ConfigMap has finished synchronizing + status: Synced + Deployment[badport]: + message: >- + Deployment Deployment.apps "badport" is invalid: + spec.template.spec.containers[0].ports[0].containerPort: Invalid value: + 80801: must be between 1 and 65535, inclusive + status: Error + Gateway[badport-ingress-214f400d5abb9c1c]: + message: Gateway has finished synchronizing + status: Synced + HorizontalPodAutoscaler[badport]: + message: HorizontalPodAutoscaler has finished synchronizing + status: Synced + NetworkPolicy[badport]: + message: >- + NetworkPolicy NetworkPolicy.networking.k8s.io "badport" is invalid: + [spec.ingress[0].ports[0].port: Invalid value: 80801: must be between 1 + and 65535, inclusive, spec.ingress[3].ports[0].port: Invalid value: + 80801: must be between 1 and 65535, inclusive] + status: Error + PeerAuthentication[badport]: + message: PeerAuthentication has finished synchronizing + status: Synced + PodDisruptionBudget[badport]: + message: PodDisruptionBudget has finished synchronizing + status: Synced + ServiceAccount[badport]: + message: ServiceAccount has finished synchronizing + status: Synced + ServiceEntry[badport-egress-56cd7aa901014e78]: + message: ServiceEntry has finished synchronizing + status: Synced + Service[badport]: + message: >- + Service Service "badport" is invalid: [spec.ports[0].port: Invalid + value: 80801: must be between 1 and 65535, inclusive, + spec.ports[0].targetPort: Invalid value: 80801: must be between 1 and + 65535, inclusive] + status: Error + VirtualService[badport-ingress]: + message: >- + VirtualService admission webhook "validation.istio.io" denied the + request: configuration is invalid: port number 80801 must be in the + range 1..65535 + status: Error + summary: + message: 'failed to process application resources, see subresource status: found 4 errors' + status: Error diff --git a/tests/application/subresource-status/application-resource-apply-error.yaml b/tests/application/subresource-status/application-resource-apply-error.yaml new file mode 100644 index 00000000..38ac987a --- /dev/null +++ b/tests/application/subresource-status/application-resource-apply-error.yaml @@ -0,0 +1,21 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: badport +spec: + image: image + port: 80801 + ingresses: + - hei.no + gcp: + auth: + serviceAccount: something@verdier.com + accessPolicy: + inbound: + rules: + - application: access-policy-other + outbound: + rules: + - application: access-policy-two + external: + - host: example.com diff --git a/tests/application/subresource-status/application-synced-assert.yaml b/tests/application/subresource-status/application-synced-assert.yaml index 58455c29..6176210e 100644 --- a/tests/application/subresource-status/application-synced-assert.yaml +++ b/tests/application/subresource-status/application-synced-assert.yaml @@ -17,49 +17,49 @@ spec: strategy: type: RollingUpdate status: - application: - message: All controllers synced + summary: + message: All subresources synced status: Synced - controllers: - AuthorizationPolicy: + subresources: + AuthorizationPolicy[working-deny]: message: AuthorizationPolicy has finished synchronizing status: Synced - Certificate: + Certificate[chainsaw-subresource-labels-working-ingress-214f400d5abb9c1c]: message: Certificate has finished synchronizing status: Synced - ConfigMap: + ConfigMap[working-gcp-auth]: message: ConfigMap has finished synchronizing status: Synced - Deployment: + Deployment[working]: message: Deployment has finished synchronizing status: Synced - EgressServiceEntry: - message: EgressServiceEntry has finished synchronizing + Gateway[working-ingress-214f400d5abb9c1c]: + message: Gateway has finished synchronizing status: Synced - HorizontalPodAutoScaler: - message: HorizontalPodAutoScaler has finished synchronizing + HorizontalPodAutoscaler[working]: + message: HorizontalPodAutoscaler has finished synchronizing status: Synced - IngressGateway: - message: IngressGateway has finished synchronizing - status: Synced - IngressVirtualService: - message: IngressVirtualService has finished synchronizing - status: Synced - NetworkPolicy: + NetworkPolicy[working]: message: NetworkPolicy has finished synchronizing status: Synced - PeerAuthentication: + PeerAuthentication[working]: message: PeerAuthentication has finished synchronizing status: Synced - PodDisruptionBudget: + PodDisruptionBudget[working]: message: PodDisruptionBudget has finished synchronizing status: Synced - Service: - message: Service has finished synchronizing - status: Synced - ServiceAccount: + ServiceAccount[working]: message: ServiceAccount has finished synchronizing status: Synced - ServiceMonitor: + ServiceEntry[working-egress-56cd7aa901014e78]: + message: ServiceEntry has finished synchronizing + status: Synced + ServiceMonitor[working]: message: ServiceMonitor has finished synchronizing - status: Synced \ No newline at end of file + status: Synced + Service[working]: + message: Service has finished synchronizing + status: Synced + VirtualService[working-ingress]: + message: VirtualService has finished synchronizing + status: Synced diff --git a/tests/application/subresource-status/application-synced.yaml b/tests/application/subresource-status/application-synced.yaml index 04c68e95..7d4910c9 100644 --- a/tests/application/subresource-status/application-synced.yaml +++ b/tests/application/subresource-status/application-synced.yaml @@ -1,3 +1,19 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: chainsaw-subresource-labels + labels: + istio.io/rev: "revision-1" +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: "gcp-identity-config" + namespace: "skiperator-system" +data: + workloadIdentityPool: "testPool" + identityProvider: "testProvider" +--- apiVersion: skiperator.kartverket.no/v1alpha1 kind: Application metadata: @@ -5,3 +21,17 @@ metadata: spec: image: image port: 8080 + ingresses: + - hei.no + gcp: + auth: + serviceAccount: something@verdier.com + accessPolicy: + inbound: + rules: + - application: access-policy-other + outbound: + rules: + - application: access-policy-two + external: + - host: example.com diff --git a/tests/application/subresource-status/chainsaw-test.yaml b/tests/application/subresource-status/chainsaw-test.yaml index ad6e964d..3ef4913d 100644 --- a/tests/application/subresource-status/chainsaw-test.yaml +++ b/tests/application/subresource-status/chainsaw-test.yaml @@ -6,14 +6,20 @@ spec: skip: false concurrent: true skipDelete: false + namespace: chainsaw-subresource-labels steps: - try: - - create: + - apply: file: application-synced.yaml - assert: file: application-synced-assert.yaml - try: - - create: - file: application-error.yaml + - apply: + file: application-generate-error.yaml - assert: - file: application-error-assert.yaml + file: application-generate-error-assert.yaml + - try: + - apply: + file: application-resource-apply-error.yaml + - assert: + file: application-resource-apply-error-assert.yaml diff --git a/tests/application/team-label/chainsaw-test.yaml b/tests/application/team-label/chainsaw-test.yaml index 8023e0f1..a908e0cb 100644 --- a/tests/application/team-label/chainsaw-test.yaml +++ b/tests/application/team-label/chainsaw-test.yaml @@ -8,7 +8,7 @@ spec: skipDelete: false steps: - try: - - create: + - apply: file: namespace.yaml - create: file: application.yaml @@ -20,7 +20,7 @@ spec: - assert: file: application-with-fixed-team-assert.yaml - try: - - create: + - apply: file: namespace-no-team.yaml - create: file: application-no-team.yaml diff --git a/tests/application/watched/certificate-assert.yaml b/tests/application/watched/certificate-assert.yaml new file mode 100644 index 00000000..5926ff11 --- /dev/null +++ b/tests/application/watched/certificate-assert.yaml @@ -0,0 +1,21 @@ + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + namespace: istio-gateways + name: watched-certificates-ingress-56cd7aa901014e78 +spec: + issuerRef: + kind: ClusterIssuer + name: cluster-issuer + secretName: watched-certificates-ingress-56cd7aa901014e78 + dnsNames: + - example.com + +--- + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + namespace: istio-gateways + name: watched-certificates-ingress-34888c0b0c2a4a2c diff --git a/tests/application/watched/certificate-errors.yaml b/tests/application/watched/certificate-errors.yaml new file mode 100644 index 00000000..dafd0f35 --- /dev/null +++ b/tests/application/watched/certificate-errors.yaml @@ -0,0 +1,15 @@ +# controller should clean up certificates + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + namespace: istio-gateways + name: watched-certificates-ingress-56cd7aa901014e78 + +--- + +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + namespace: istio-gateways + name: watched-certificates-ingress-34888c0b0c2a4a2c diff --git a/tests/application/watched/certificate.yaml b/tests/application/watched/certificate.yaml new file mode 100644 index 00000000..e9203130 --- /dev/null +++ b/tests/application/watched/certificate.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: watched +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: certificates + namespace: watched +spec: + image: image + port: 8080 + ingresses: + - example.com + - test.com + redirectToHTTPS: true diff --git a/tests/application/watched/chainsaw-test.yaml b/tests/application/watched/chainsaw-test.yaml new file mode 100644 index 00000000..43e16517 --- /dev/null +++ b/tests/application/watched/chainsaw-test.yaml @@ -0,0 +1,19 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: watched +spec: + skip: false + concurrent: true + skipDelete: false + steps: + - try: + - create: + file: certificate.yaml + - assert: + file: certificate-assert.yaml + - delete: + file: certificate.yaml + - try: + - error: + file: certificate-errors.yaml diff --git a/tests/cluster-config/gcp-identity-config.yaml b/tests/cluster-config/gcp-identity-config.yaml new file mode 100644 index 00000000..8852661a --- /dev/null +++ b/tests/cluster-config/gcp-identity-config.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "gcp-identity-config" + namespace: "skiperator-system" +data: + workloadIdentityPool: empty + identityProvider: empty diff --git a/tests/cluster-config/ns-exclusions-config.yaml b/tests/cluster-config/ns-exclusions-config.yaml new file mode 100644 index 00000000..521078e4 --- /dev/null +++ b/tests/cluster-config/ns-exclusions-config.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "namespace-exclusions" + namespace: "skiperator-system" +data: + default: "true" + istio-system: "true" + istio-gateways: "true" + cert-manager: "true" + kube-node-lease: "true" + kube-public: "true" + kube-system: "true" diff --git a/tests/config.yaml b/tests/config.yaml index cefa3e30..6c50e4b9 100644 --- a/tests/config.yaml +++ b/tests/config.yaml @@ -3,7 +3,7 @@ kind: Configuration metadata: name: configuration spec: - parallel: 10 + parallel: 15 timeouts: delete: 60s assert: 60s diff --git a/tests/namespace/default-deny/assert.yaml b/tests/namespace/default-deny/assert.yaml index a22b9362..3a89ccb7 100644 --- a/tests/namespace/default-deny/assert.yaml +++ b/tests/namespace/default-deny/assert.yaml @@ -2,6 +2,9 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: default-deny + labels: + app.kubernetes.io/managed-by: skiperator + skiperator.kartverket.no/controller: namespace spec: policyTypes: - Ingress @@ -56,11 +59,11 @@ spec: protocol: TCP - port: 4318 protocol: TCP - to: + to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: grafana-alloy podSelector: matchLabels: app.kubernetes.io/instance: alloy - app.kubernetes.io/name: alloy + app.kubernetes.io/name: alloy \ No newline at end of file diff --git a/tests/namespace/image-pull-secret/assert.yaml b/tests/namespace/image-pull-secret/assert.yaml index 10c0437e..454ca7a5 100644 --- a/tests/namespace/image-pull-secret/assert.yaml +++ b/tests/namespace/image-pull-secret/assert.yaml @@ -2,6 +2,9 @@ apiVersion: v1 kind: Secret metadata: name: github-auth + labels: + app.kubernetes.io/managed-by: skiperator + skiperator.kartverket.no/controller: namespace type: kubernetes.io/dockerconfigjson data: .dockerconfigjson: eyJhdXRocyI6eyJnaGNyLmlvIjp7ImF1dGgiOiIifX19Cg== \ No newline at end of file diff --git a/tests/namespace/sidecar/assert.yaml b/tests/namespace/sidecar/assert.yaml index 49b0626d..ee040b59 100644 --- a/tests/namespace/sidecar/assert.yaml +++ b/tests/namespace/sidecar/assert.yaml @@ -2,5 +2,8 @@ apiVersion: networking.istio.io/v1beta1 kind: Sidecar metadata: name: sidecar + labels: + app.kubernetes.io/managed-by: skiperator + skiperator.kartverket.no/controller: namespace spec: outboundTrafficPolicy: {} \ No newline at end of file diff --git a/tests/routing/routes/patch-routing-change-hostname-assert.yaml b/tests/routing/routes/patch-routing-change-hostname-assert.yaml index 6907f836..2f5dfa13 100644 --- a/tests/routing/routes/patch-routing-change-hostname-assert.yaml +++ b/tests/routing/routes/patch-routing-change-hostname-assert.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/managed-by: skiperator skiperator.kartverket.no/controller: routing - skiperator.kartverket.no/source-namespace: chainsaw-routing-routes + skiperator.kartverket.no/source-namespace: ($namespace) name: chainsaw-routing-routes-app-paths-routing-ingress-b1dffede namespace: istio-gateways spec: diff --git a/tests/routing/routes/routing-assert.yaml b/tests/routing/routes/routing-assert.yaml index 6c420f21..1a69ce8e 100644 --- a/tests/routing/routes/routing-assert.yaml +++ b/tests/routing/routes/routing-assert.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/managed-by: skiperator skiperator.kartverket.no/controller: routing - skiperator.kartverket.no/source-namespace: chainsaw-routing-routes + skiperator.kartverket.no/source-namespace: ($namespace) name: chainsaw-routing-routes-app-paths-routing-ingress-b1dffede namespace: istio-gateways spec: diff --git a/tests/skipjob/access-policy-job/chainsaw-test.yaml b/tests/skipjob/access-policy-job/chainsaw-test.yaml index 124b051c..8053322b 100644 --- a/tests/skipjob/access-policy-job/chainsaw-test.yaml +++ b/tests/skipjob/access-policy-job/chainsaw-test.yaml @@ -18,3 +18,8 @@ spec: file: skipjob.yaml - assert: file: skipjob-assert.yaml + - try: + - apply: + file: skipjob-cron.yaml + - assert: + file: skipjob-cron-assert.yaml diff --git a/tests/skipjob/access-policy-job/skipjob-assert.yaml b/tests/skipjob/access-policy-job/skipjob-assert.yaml index 31be3cfa..37527dd3 100644 --- a/tests/skipjob/access-policy-job/skipjob-assert.yaml +++ b/tests/skipjob/access-policy-job/skipjob-assert.yaml @@ -2,12 +2,23 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: access-policy-job-skipjob + labels: + app.kubernetes.io/managed-by: skiperator + skiperator.kartverket.no/controller: skipjob + skiperator.kartverket.no/skipjob: 'true' + skiperator.kartverket.no/skipjobName: access-policy-job + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: SKIPJob + name: access-policy-job spec: egress: - - ports: - - port: 8080 - protocol: TCP - to: + - ports: + - port: 8080 + protocol: TCP + to: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: access-policy-job-ns @@ -53,3 +64,40 @@ spec: number: 80 protocol: HTTP resolution: DNS +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: access-policy-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" + accessPolicy: + outbound: + external: + - host: example.com + ports: + - name: http + port: 80 + protocol: HTTP + - host: foo.com + rules: + - application: minimal-application + ports: + - port: 8080 + protocol: TCP +status: + conditions: + - type: Failed + status: "False" + - type: Running + status: "True" + - type: Finished + status: "False" + - type: InternalRulesValid + status: "True" diff --git a/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml b/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml new file mode 100644 index 00000000..9fe3b98f --- /dev/null +++ b/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml @@ -0,0 +1,68 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: access-policy-cron-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" + accessPolicy: + outbound: + external: + - host: example.com + ports: + - name: http + port: 80 + protocol: HTTP + - host: foo.com + rules: + - application: minimal-application + ports: + - port: 8080 + protocol: TCP + cron: + schedule: "* * * * *" +status: + conditions: + - type: Failed + status: "False" + - type: Running + status: "True" + - type: Finished + status: "False" + - type: InternalRulesValid + status: "True" + +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: access-policy-cron-job-skipjob + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: SKIPJob + name: access-policy-cron-job +spec: + egress: + - ports: + - port: 8080 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: access-policy-job-ns + podSelector: + matchLabels: + app: minimal-application + podSelector: + matchLabels: + app: access-policy-cron-job-skipjob + policyTypes: + - Egress +--- \ No newline at end of file diff --git a/tests/skipjob/access-policy-job/skipjob-cron.yaml b/tests/skipjob/access-policy-job/skipjob-cron.yaml new file mode 100644 index 00000000..6503bb2b --- /dev/null +++ b/tests/skipjob/access-policy-job/skipjob-cron.yaml @@ -0,0 +1,25 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: access-policy-cron-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" + accessPolicy: + outbound: + rules: + - application: minimal-application + external: + - host: example.com + ports: + - name: http + port: 80 + protocol: HTTP + - host: foo.com + cron: + schedule: "* * * * *" diff --git a/tests/skipjob/access-policy-job/skipjob.yaml b/tests/skipjob/access-policy-job/skipjob.yaml index 2289cb4b..2838205d 100644 --- a/tests/skipjob/access-policy-job/skipjob.yaml +++ b/tests/skipjob/access-policy-job/skipjob.yaml @@ -20,4 +20,4 @@ spec: - name: http port: 80 protocol: HTTP - - host: foo.com \ No newline at end of file + - host: foo.com diff --git a/tests/skipjob/conditions/chainsaw-test.yaml b/tests/skipjob/conditions/chainsaw-test.yaml index e0a60e84..dd1eb36a 100644 --- a/tests/skipjob/conditions/chainsaw-test.yaml +++ b/tests/skipjob/conditions/chainsaw-test.yaml @@ -7,8 +7,9 @@ spec: concurrent: true skipDelete: false steps: - - try: + - try: - apply: file: skipjob.yaml - # - assert: - # file: skipjob-assert.yaml + - assert: + file: skipjob-assert.yaml + diff --git a/tests/skipjob/conditions/skipjob-assert.yaml b/tests/skipjob/conditions/skipjob-assert.yaml index b20094bc..dc060be3 100644 --- a/tests/skipjob/conditions/skipjob-assert.yaml +++ b/tests/skipjob/conditions/skipjob-assert.yaml @@ -1,37 +1,44 @@ - -#apiVersion: skiperator.kartverket.no/v1alpha1 -#kind: SKIPJob -#metadata: -# name: condition-finish -#status: -# conditions: -# - type: SKIPJobCreated -# status: "True" -# - type: Running -# status: "False" -# - type: Finished -# status: "True" -#--- -#apiVersion: skiperator.kartverket.no/v1alpha1 -#kind: SKIPJob -#metadata: -# name: condition-running -#status: -# conditions: -# - type: SKIPJobCreated -# status: "True" -# - type: Running -# status: "True" -#--- -#apiVersion: skiperator.kartverket.no/v1alpha1 -#kind: SKIPJob -#metadata: -# name: condition-fail -#status: -# conditions: -# - type: SKIPJobCreated -# status: "True" -# - type: Failed -# status: "True" -# - type: Running -# status: "False" \ No newline at end of file +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: condition-finish +status: + conditions: + - type: Failed + status: "False" + - type: Running + status: "False" + - type: Finished + status: "True" + - type: InternalRulesValid + status: "True" +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: condition-running +status: + conditions: + - type: Failed + status: "False" + - type: Running + status: "True" + - type: Finished + status: "False" + - type: InternalRulesValid + status: "True" +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: condition-fail +status: + conditions: + - type: Failed + status: "True" + - type: Running + status: "False" + - type: Finished + status: "False" + - type: InternalRulesValid + status: "False" diff --git a/tests/skipjob/conditions/skipjob.yaml b/tests/skipjob/conditions/skipjob.yaml index 3f01949b..5c1ef826 100644 --- a/tests/skipjob/conditions/skipjob.yaml +++ b/tests/skipjob/conditions/skipjob.yaml @@ -34,4 +34,11 @@ spec: command: - "perl" - "-wle" - - "exit 1" \ No newline at end of file + - "exit 1" + accessPolicy: + outbound: + rules: + - application: doesnt-exist-diff-ns + namespace: non-existing + job: + activeDeadlineSeconds: 1 \ No newline at end of file diff --git a/tests/skipjob/immutable-container/chainsaw-test.yaml b/tests/skipjob/immutable-container/chainsaw-test.yaml new file mode 100644 index 00000000..fe8e2f46 --- /dev/null +++ b/tests/skipjob/immutable-container/chainsaw-test.yaml @@ -0,0 +1,26 @@ +# spec.Container should be immutable after status has been set +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: immutable-container +spec: + skip: false + concurrent: true + skipDelete: false + steps: + - try: + - apply: + file: skipjob.yaml + - assert: + file: skipjob-assert.yaml + - try: + - patch: + file: skipjob-patch.yaml + expect: + - match: + apiVersion: skiperator.kartverket.no/v1alpha1 + kind: SKIPJob + check: + ($error != null): true + - error: + file: skipjob-patch-error.yaml diff --git a/tests/skipjob/immutable-container/skipjob-assert.yaml b/tests/skipjob/immutable-container/skipjob-assert.yaml new file mode 100644 index 00000000..467f10af --- /dev/null +++ b/tests/skipjob/immutable-container/skipjob-assert.yaml @@ -0,0 +1,15 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: minimal-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" +status: + summary: + status: Synced diff --git a/tests/skipjob/immutable-container/skipjob-patch-error.yaml b/tests/skipjob/immutable-container/skipjob-patch-error.yaml new file mode 100644 index 00000000..5030623c --- /dev/null +++ b/tests/skipjob/immutable-container/skipjob-patch-error.yaml @@ -0,0 +1,12 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: minimal-job +spec: + container: + image: "perl:5.34.2" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" diff --git a/tests/skipjob/immutable-container/skipjob-patch.yaml b/tests/skipjob/immutable-container/skipjob-patch.yaml new file mode 100644 index 00000000..5030623c --- /dev/null +++ b/tests/skipjob/immutable-container/skipjob-patch.yaml @@ -0,0 +1,12 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: minimal-job +spec: + container: + image: "perl:5.34.2" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" diff --git a/tests/skipjob/immutable-container/skipjob.yaml b/tests/skipjob/immutable-container/skipjob.yaml new file mode 100644 index 00000000..909cae41 --- /dev/null +++ b/tests/skipjob/immutable-container/skipjob.yaml @@ -0,0 +1,12 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: minimal-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" diff --git a/tests/skipjob/minimal-cron-job/skipjob-assert.yaml b/tests/skipjob/minimal-cron-job/skipjob-assert.yaml index debfc23d..479fe19e 100644 --- a/tests/skipjob/minimal-cron-job/skipjob-assert.yaml +++ b/tests/skipjob/minimal-cron-job/skipjob-assert.yaml @@ -2,11 +2,28 @@ apiVersion: v1 kind: ServiceAccount metadata: name: minimal-cron-job-skipjob + labels: + skiperator.kartverket.no/skipjob: "true" + skiperator.kartverket.no/skipjobName: minimal-cron-job + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "skipjob" --- apiVersion: batch/v1 kind: CronJob metadata: name: minimal-cron-job + labels: + app: minimal-cron-job-skipjob + skiperator.kartverket.no/skipjob: "true" + skiperator.kartverket.no/skipjobName: minimal-cron-job + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "skipjob" + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: SKIPJob + name: minimal-cron-job spec: schedule: "* * * * *" concurrencyPolicy: Allow diff --git a/tests/skipjob/minimal-job/skipjob-assert.yaml b/tests/skipjob/minimal-job/skipjob-assert.yaml index 93457421..c519a607 100644 --- a/tests/skipjob/minimal-job/skipjob-assert.yaml +++ b/tests/skipjob/minimal-job/skipjob-assert.yaml @@ -11,6 +11,8 @@ metadata: app: minimal-job-skipjob skiperator.kartverket.no/skipjob: "true" skiperator.kartverket.no/skipjobName: minimal-job + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "skipjob" spec: suspend: false backoffLimit: 6 @@ -20,6 +22,10 @@ spec: metadata: labels: job-name: minimal-job + skiperator.kartverket.no/skipjob: "true" + skiperator.kartverket.no/skipjobName: minimal-job + app.kubernetes.io/managed-by: "skiperator" + skiperator.kartverket.no/controller: "skipjob" spec: containers: - name: minimal-job-skipjob From a4616722cb73803a0b3a247170f5172d6be71d76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Haram=20Nyg=C3=A5rd?= Date: Fri, 16 Aug 2024 09:41:58 +0200 Subject: [PATCH 2/4] Missing status when updating to new CRD blocks resource update (#509) * Missing status when updating to new CRD blocks resource update * tweak skipjob validation rule to allow dynamic ports for access policy * fix log Co-authored-by: Even Holthe --------- Co-authored-by: Even Holthe --- api/v1alpha1/skipjob_types.go | 2 +- .../skiperator.kartverket.no_skipjobs.yaml | 2 +- internal/controllers/application.go | 18 +++++--- internal/controllers/common/util.go | 27 ++++++++++++ internal/controllers/common/util_test.go | 44 +++++++++++++++++++ internal/controllers/skipjob.go | 21 +++++---- pkg/util/helperfunctions.go | 17 ------- 7 files changed, 98 insertions(+), 33 deletions(-) diff --git a/api/v1alpha1/skipjob_types.go b/api/v1alpha1/skipjob_types.go index 61acfa22..f020e1f3 100644 --- a/api/v1alpha1/skipjob_types.go +++ b/api/v1alpha1/skipjob_types.go @@ -39,7 +39,7 @@ type SKIPJobStatus struct { // A SKIPJob is either defined as a one-off or a scheduled job. If the Cron field is set for SKIPJob, it may not be removed. If the Cron field is unset, it may not be added. // The Container field of a SKIPJob is only mutable if the Cron field is set. If unset, you must delete your SKIPJob to change container settings. // +kubebuilder:validation:XValidation:rule="(has(oldSelf.spec.cron) && has(self.spec.cron)) || (!has(oldSelf.spec.cron) && !has(self.spec.cron))", message="After creation of a SKIPJob you may not remove the Cron field if it was previously present, or add it if it was previously omitted. Please delete the SKIPJob to change its nature from a one-off/scheduled job." -// +kubebuilder:validation:XValidation:rule="(!has(self.status) || ((!has(self.spec.cron) && (oldSelf.spec.container == self.spec.container)) || has(self.spec.cron)))", message="The field Container is immutable for one-off jobs. Please delete your SKIPJob to change the containers settings." +// +kubebuilder:validation:XValidation:rule="(size(self.status.subresources) == 0|| ((!has(self.spec.cron) && (oldSelf.spec.container == self.spec.container)) || has(self.spec.cron)))", message="The field Container is immutable for one-off jobs. Please delete your SKIPJob to change the containers settings." // SKIPJob is the Schema for the skipjobs API type SKIPJob struct { metav1.TypeMeta `json:",inline"` diff --git a/config/crd/skiperator.kartverket.no_skipjobs.yaml b/config/crd/skiperator.kartverket.no_skipjobs.yaml index cf72e1e0..1584d270 100644 --- a/config/crd/skiperator.kartverket.no_skipjobs.yaml +++ b/config/crd/skiperator.kartverket.no_skipjobs.yaml @@ -981,7 +981,7 @@ spec: && !has(self.spec.cron)) - message: The field Container is immutable for one-off jobs. Please delete your SKIPJob to change the containers settings. - rule: (!has(self.status) || ((!has(self.spec.cron) && (oldSelf.spec.container + rule: (size(self.status.subresources) == 0|| ((!has(self.spec.cron) && (oldSelf.spec.container == self.spec.container)) || has(self.spec.cron))) served: true storage: true diff --git a/internal/controllers/application.go b/internal/controllers/application.go index 05a2f677..35acbb20 100644 --- a/internal/controllers/application.go +++ b/internal/controllers/application.go @@ -147,11 +147,22 @@ func (r *ApplicationReconciler) Reconcile(ctx context.Context, req reconcile.Req r.setApplicationDefaults(application, ctx) - specDiff, err := util.GetObjectDiff(tmpApplication.Spec, application.Spec) + specDiff, err := common.GetObjectDiff(tmpApplication.Spec, application.Spec) if err != nil { return common.RequeueWithError(err) } + statusDiff, err := common.GetObjectDiff(tmpApplication.Status, application.Status) + if err != nil { + return common.RequeueWithError(err) + } + + if len(statusDiff) > 0 { + rLog.Info("Status has changed", "diff", statusDiff) + err = r.GetClient().Status().Update(ctx, application) + return reconcile.Result{Requeue: true}, err + } + // Finalizer check is due to a bug when updating using controller-runtime // See https://github.com/kubernetes-sigs/controller-runtime/issues/2453 if len(specDiff) > 0 || (!ctrlutil.ContainsFinalizer(tmpApplication, applicationFinalizer) && ctrlutil.ContainsFinalizer(application, applicationFinalizer)) { @@ -160,11 +171,6 @@ func (r *ApplicationReconciler) Reconcile(ctx context.Context, req reconcile.Req return reconcile.Result{Requeue: true}, err } - // TODO Removed status diff check here... why do we need that? Causing endless reconcile because timestamps are different (which makes sense) - if err = r.GetClient().Status().Update(ctx, application); err != nil { - return common.RequeueWithError(err) - } - //Start the actual reconciliation rLog.Debug("Starting reconciliation loop", "application", application.Name) r.SetProgressingState(ctx, application, fmt.Sprintf("Application %v has started reconciliation loop", application.Name)) diff --git a/internal/controllers/common/util.go b/internal/controllers/common/util.go index cc272a5c..1806bb77 100644 --- a/internal/controllers/common/util.go +++ b/internal/controllers/common/util.go @@ -1,10 +1,13 @@ package common import ( + "fmt" skiperatorv1alpha1 "github.com/kartverket/skiperator/api/v1alpha1" "github.com/kartverket/skiperator/api/v1alpha1/podtypes" + "github.com/r3labs/diff/v3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) @@ -55,3 +58,27 @@ func GetInternalRulesCondition(obj skiperatorv1alpha1.SKIPObject, status metav1. Message: message, } } + +func GetObjectDiff[T any](a T, b T) (diff.Changelog, error) { + aKind := reflect.ValueOf(a).Kind() + bKind := reflect.ValueOf(b).Kind() + if aKind != bKind { + return nil, fmt.Errorf("The objects to compare are not the same, found obj1: %v, obj2: %v\n", aKind, bKind) + } + changelog, err := diff.Diff(a, b) + + changelog = filterOutStatusTimestamps(changelog) + + if len(changelog) == 0 { + return nil, err + } + + return changelog, nil +} + +func filterOutStatusTimestamps(changelog diff.Changelog) diff.Changelog { + changelog = changelog.FilterOut([]string{"Summary", "TimeStamp"}) + changelog = changelog.FilterOut([]string{"Conditions", ".*", "LastTransitionTime"}) + changelog = changelog.FilterOut([]string{"SubResources", ".*", "TimeStamp"}) + return changelog +} diff --git a/internal/controllers/common/util_test.go b/internal/controllers/common/util_test.go index 1d32ea71..80cfeb2b 100644 --- a/internal/controllers/common/util_test.go +++ b/internal/controllers/common/util_test.go @@ -4,7 +4,9 @@ import ( "github.com/kartverket/skiperator/api/v1alpha1" "github.com/kartverket/skiperator/pkg/testutil" "github.com/stretchr/testify/assert" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "testing" + "time" ) func TestShouldReconcile(t *testing.T) { @@ -14,3 +16,45 @@ func TestShouldReconcile(t *testing.T) { app.Labels["skiperator.kartverket.no/ignore"] = "true" assert.False(t, ShouldReconcile(app)) } + +func TestStatusDiffWithTimestamp(t *testing.T) { + status := &v1alpha1.SkiperatorStatus{ + Summary: v1alpha1.Status{ + Status: v1alpha1.SYNCED, + Message: "All subresources synced", + TimeStamp: time.Now().String(), + }, + Conditions: []v1.Condition{ + { + ObservedGeneration: 1, + LastTransitionTime: v1.Now(), + }, + }, + SubResources: map[string]v1alpha1.Status{ + "test": { + Status: v1alpha1.SYNCED, + Message: "All subresources synced", + TimeStamp: time.Now().String(), + }, + }, + } + + tmpStatus := status.DeepCopy() + status.Summary.TimeStamp = time.Now().String() + status.Conditions[0].LastTransitionTime = v1.Now() + status.SubResources["test"] = v1alpha1.Status{ + Status: v1alpha1.SYNCED, + Message: "All subresources synced", + TimeStamp: time.Now().String(), + } + + //assert that timestamps are in fact different + assert.NotEqual(t, tmpStatus.Summary.TimeStamp, status.Summary.TimeStamp) + assert.NotEqual(t, tmpStatus.Conditions[0].LastTransitionTime, status.Conditions[0].LastTransitionTime) + assert.NotEqual(t, tmpStatus.SubResources["test"].TimeStamp, status.SubResources["test"].TimeStamp) + + //assert zero diff + diff, err := GetObjectDiff(tmpStatus, status) + assert.NoError(t, err) + assert.Equal(t, 0, len(diff)) +} diff --git a/internal/controllers/skipjob.go b/internal/controllers/skipjob.go index 3dd3bf76..20a55249 100644 --- a/internal/controllers/skipjob.go +++ b/internal/controllers/skipjob.go @@ -14,7 +14,6 @@ import ( "github.com/kartverket/skiperator/pkg/resourcegenerator/podmonitor" "github.com/kartverket/skiperator/pkg/resourcegenerator/resourceutils" "github.com/kartverket/skiperator/pkg/resourcegenerator/serviceaccount" - "github.com/kartverket/skiperator/pkg/util" istionetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -101,23 +100,29 @@ func (r *SKIPJobReconciler) Reconcile(ctx context.Context, req reconcile.Request return common.RequeueWithError(err) } - specDiff, err := util.GetObjectDiff(tmpSkipJob.Spec, skipJob.Spec) + specDiff, err := common.GetObjectDiff(tmpSkipJob.Spec, skipJob.Spec) if err != nil { return common.RequeueWithError(err) } + statusDiff, err := common.GetObjectDiff(tmpSkipJob.Status, skipJob.Status) + if err != nil { + return common.RequeueWithError(err) + } + + if len(statusDiff) > 0 { + rLog.Debug("Status has changed", "diff", statusDiff) + err = r.GetClient().Status().Update(ctx, skipJob) + return reconcile.Result{Requeue: true}, err + } + // If we update the SKIPJob initially on applied defaults before starting reconciling resources we allow all // updates to be visible even though the controllerDuties may take some time. if len(specDiff) > 0 { - err := r.GetClient().Update(ctx, skipJob) + err = r.GetClient().Update(ctx, skipJob) return reconcile.Result{Requeue: true}, err } - // TODO Removed status diff check here... why do we need that? Causing endless reconcile because timestamps are different (which makes sense) - if err = r.GetClient().Status().Update(ctx, skipJob); err != nil { - return common.RequeueWithError(err) - } - //Start the actual reconciliation rLog.Debug("Starting reconciliation loop") r.SetProgressingState(ctx, skipJob, fmt.Sprintf("SKIPJob %v has started reconciliation loop", skipJob.Name)) diff --git a/pkg/util/helperfunctions.go b/pkg/util/helperfunctions.go index 6102fb57..b49cd2de 100644 --- a/pkg/util/helperfunctions.go +++ b/pkg/util/helperfunctions.go @@ -6,7 +6,6 @@ import ( "github.com/kartverket/skiperator/api/v1alpha1/podtypes" "github.com/mitchellh/hashstructure/v2" "github.com/nais/liberator/pkg/namegen" - "github.com/r3labs/diff/v3" "hash/fnv" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -14,7 +13,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/tools/record" - "reflect" "regexp" "sigs.k8s.io/controller-runtime/pkg/client" "strings" @@ -150,21 +148,6 @@ func EnsurePrefix(s string, prefix string) string { return s } -func GetObjectDiff[T any](a T, b T) (diff.Changelog, error) { - aKind := reflect.ValueOf(a).Kind() - bKind := reflect.ValueOf(b).Kind() - if aKind != bKind { - return nil, fmt.Errorf("The objects to compare are not the same, found obj1: %v, obj2: %v\n", aKind, bKind) - } - changelog, err := diff.Diff(a, b) - - if len(changelog) == 0 { - return nil, err - } - - return changelog, nil -} - func IsCloudSqlProxyEnabled(gcp *podtypes.GCP) bool { return gcp != nil && gcp.CloudSQLProxy.Enabled } From 28198d1fab83d540c1ca676a107a8a6f9d4c77f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Haram=20Nyg=C3=A5rd?= Date: Fri, 16 Aug 2024 11:51:52 +0200 Subject: [PATCH 3/4] fix: Autosync loop because we update access policy with ports (#510) --- internal/controllers/application.go | 8 ++-- internal/controllers/skipjob.go | 7 ++-- .../access-policy/advanced-error.yaml | 35 +++++++++++++++++ .../access-policy/chainsaw-test.yaml | 2 + .../access-policy-job/chainsaw-test.yaml | 4 ++ .../access-policy-job/skipjob-assert.yaml | 3 -- .../skipjob-cron-assert.yaml | 3 -- .../access-policy-job/skipjob-cron-error.yaml | 39 +++++++++++++++++++ .../access-policy-job/skipjob-error.yaml | 36 +++++++++++++++++ 9 files changed, 124 insertions(+), 13 deletions(-) create mode 100644 tests/application/access-policy/advanced-error.yaml create mode 100644 tests/skipjob/access-policy-job/skipjob-cron-error.yaml create mode 100644 tests/skipjob/access-policy-job/skipjob-error.yaml diff --git a/internal/controllers/application.go b/internal/controllers/application.go index 35acbb20..e2602e9e 100644 --- a/internal/controllers/application.go +++ b/internal/controllers/application.go @@ -171,6 +171,10 @@ func (r *ApplicationReconciler) Reconcile(ctx context.Context, req reconcile.Req return reconcile.Result{Requeue: true}, err } + //We try to feed the access policy with port values dynamically, + //if unsuccessfull we just don't set ports, and rely on podselectors + r.UpdateAccessPolicy(ctx, application) + //Start the actual reconciliation rLog.Debug("Starting reconciliation loop", "application", application.Name) r.SetProgressingState(ctx, application, fmt.Sprintf("Application %v has started reconciliation loop", application.Name)) @@ -318,10 +322,6 @@ func (r *ApplicationReconciler) setApplicationDefaults(application *skiperatorv1 } } - //We try to feed the access policy with port values dynamically, - //if unsuccessfull we just don't set ports, and rely on podselectors - r.UpdateAccessPolicy(ctx, application) - application.FillDefaultsStatus() } diff --git a/internal/controllers/skipjob.go b/internal/controllers/skipjob.go index 20a55249..74c7c370 100644 --- a/internal/controllers/skipjob.go +++ b/internal/controllers/skipjob.go @@ -123,6 +123,10 @@ func (r *SKIPJobReconciler) Reconcile(ctx context.Context, req reconcile.Request return reconcile.Result{Requeue: true}, err } + //We try to feed the access policy with port values dynamically, + //if unsuccessfull we just don't set ports, and rely on podselectors + r.UpdateAccessPolicy(ctx, skipJob) + //Start the actual reconciliation rLog.Debug("Starting reconciliation loop") r.SetProgressingState(ctx, skipJob, fmt.Sprintf("SKIPJob %v has started reconciliation loop", skipJob.Name)) @@ -198,9 +202,6 @@ func (r *SKIPJobReconciler) setSKIPJobDefaults(ctx context.Context, skipJob *ski } resourceutils.SetSKIPJobLabels(skipJob, skipJob) skipJob.FillDefaultStatus() - //We try to feed the access policy with port values dynamically, - //if unsuccessfull we just don't set ports, and rely on podselectors - r.UpdateAccessPolicy(ctx, skipJob) return nil } diff --git a/tests/application/access-policy/advanced-error.yaml b/tests/application/access-policy/advanced-error.yaml new file mode 100644 index 00000000..46ba75c2 --- /dev/null +++ b/tests/application/access-policy/advanced-error.yaml @@ -0,0 +1,35 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: access-policy + namespace: access-policy-ns +spec: + image: image + port: 8080 + accessPolicy: + inbound: + rules: + - application: access-policy-other + namespace: access-policy-other + outbound: + external: + - host: example.com + ports: + - name: http + port: 80 + protocol: HTTP + - host: foo.com + rules: + - application: access-policy-two + ports: + - port: 8080 + protocol: TCP + - application: access-policy-other + namespace: access-policy-other + ports: + - port: 8080 + protocol: TCP +status: + conditions: + - type: InternalRulesValid + status: "True" diff --git a/tests/application/access-policy/chainsaw-test.yaml b/tests/application/access-policy/chainsaw-test.yaml index bfcd414d..0a321714 100644 --- a/tests/application/access-policy/chainsaw-test.yaml +++ b/tests/application/access-policy/chainsaw-test.yaml @@ -18,6 +18,8 @@ spec: file: advanced.yaml - assert: file: advanced-assert.yaml + - error: + file: advanced-error.yaml - try: - apply: file: advanced-patch.yaml diff --git a/tests/skipjob/access-policy-job/chainsaw-test.yaml b/tests/skipjob/access-policy-job/chainsaw-test.yaml index 8053322b..383a6c53 100644 --- a/tests/skipjob/access-policy-job/chainsaw-test.yaml +++ b/tests/skipjob/access-policy-job/chainsaw-test.yaml @@ -18,8 +18,12 @@ spec: file: skipjob.yaml - assert: file: skipjob-assert.yaml + - error: + file: skipjob-error.yaml - try: - apply: file: skipjob-cron.yaml - assert: file: skipjob-cron-assert.yaml + - error: + file: skipjob-cron-error.yaml diff --git a/tests/skipjob/access-policy-job/skipjob-assert.yaml b/tests/skipjob/access-policy-job/skipjob-assert.yaml index 37527dd3..5e2be8ad 100644 --- a/tests/skipjob/access-policy-job/skipjob-assert.yaml +++ b/tests/skipjob/access-policy-job/skipjob-assert.yaml @@ -88,9 +88,6 @@ spec: - host: foo.com rules: - application: minimal-application - ports: - - port: 8080 - protocol: TCP status: conditions: - type: Failed diff --git a/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml b/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml index 9fe3b98f..287679e4 100644 --- a/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml +++ b/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml @@ -21,9 +21,6 @@ spec: - host: foo.com rules: - application: minimal-application - ports: - - port: 8080 - protocol: TCP cron: schedule: "* * * * *" status: diff --git a/tests/skipjob/access-policy-job/skipjob-cron-error.yaml b/tests/skipjob/access-policy-job/skipjob-cron-error.yaml new file mode 100644 index 00000000..8317e348 --- /dev/null +++ b/tests/skipjob/access-policy-job/skipjob-cron-error.yaml @@ -0,0 +1,39 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: access-policy-cron-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" + accessPolicy: + outbound: + external: + - host: example.com + ports: + - name: http + port: 80 + protocol: HTTP + - host: foo.com + rules: + - application: minimal-application + ports: + - port: 8080 + protocol: TCP + cron: + schedule: "* * * * *" +status: + conditions: + - type: Failed + status: "False" + - type: Running + status: "True" + - type: Finished + status: "False" + - type: InternalRulesValid + status: "True" + diff --git a/tests/skipjob/access-policy-job/skipjob-error.yaml b/tests/skipjob/access-policy-job/skipjob-error.yaml new file mode 100644 index 00000000..6b151575 --- /dev/null +++ b/tests/skipjob/access-policy-job/skipjob-error.yaml @@ -0,0 +1,36 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: access-policy-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" + accessPolicy: + outbound: + external: + - host: example.com + ports: + - name: http + port: 80 + protocol: HTTP + - host: foo.com + rules: + - application: minimal-application + ports: + - port: 8080 + protocol: TCP +status: + conditions: + - type: Failed + status: "False" + - type: Running + status: "True" + - type: Finished + status: "False" + - type: InternalRulesValid + status: "True" From 40dd745d5668beb4b3e8e89995e6e5682c762adc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20Haram=20Nyg=C3=A5rd?= Date: Mon, 19 Aug 2024 10:35:15 +0200 Subject: [PATCH 4/4] Add missing annotation for skipjobs anduse service instead of application for dynamic ports (#514) --- internal/controllers/common/reconciler.go | 19 ++++++---- internal/controllers/skipjob.go | 7 ++-- .../access-policy-istio-assert.yaml | 23 ++++++++++++ .../access-policy/access-policy-istio.yaml | 28 +++++++++++++++ .../access-policy/chainsaw-test.yaml | 5 +++ .../skipjob/access-policy-job/app-istio.yaml | 16 +++++++++ .../access-policy-job/application.yaml | 1 + .../access-policy-job/chainsaw-test.yaml | 7 ++++ .../netpol-istio-assert.yaml | 36 +++++++++++++++++++ .../access-policy-job/netpol-istio.yaml | 17 +++++++++ .../access-policy-job/skipjob-assert.yaml | 2 ++ .../skipjob-cron-assert.yaml | 2 ++ 12 files changed, 152 insertions(+), 11 deletions(-) create mode 100644 tests/application/access-policy/access-policy-istio-assert.yaml create mode 100644 tests/application/access-policy/access-policy-istio.yaml create mode 100644 tests/skipjob/access-policy-job/app-istio.yaml create mode 100644 tests/skipjob/access-policy-job/netpol-istio-assert.yaml create mode 100644 tests/skipjob/access-policy-job/netpol-istio.yaml diff --git a/internal/controllers/common/reconciler.go b/internal/controllers/common/reconciler.go index 57df9dbd..5f459212 100644 --- a/internal/controllers/common/reconciler.go +++ b/internal/controllers/common/reconciler.go @@ -184,13 +184,20 @@ func (r *ReconcilerBase) updateStatus(ctx context.Context, skipObj v1alpha1.SKIP } } -func (r *ReconcilerBase) getTargetApplication(ctx context.Context, appName string, namespace string) (*v1alpha1.Application, error) { - application := &v1alpha1.Application{} - if err := r.GetClient().Get(ctx, types.NamespacedName{Name: appName, Namespace: namespace}, application); err != nil { +func (r *ReconcilerBase) getTargetApplicationPorts(ctx context.Context, appName string, namespace string) ([]networkingv1.NetworkPolicyPort, error) { + service := &corev1.Service{} + if err := r.GetClient().Get(ctx, types.NamespacedName{Name: appName, Namespace: namespace}, service); err != nil { return nil, fmt.Errorf("error when trying to get target application: %w", err) } - return application, nil + var servicePorts []networkingv1.NetworkPolicyPort + + for _, port := range service.Spec.Ports { + servicePorts = append(servicePorts, networkingv1.NetworkPolicyPort{ + Port: util.PointTo(intstr.FromInt32(port.Port)), + }) + } + return servicePorts, nil } func (r *ReconcilerBase) UpdateAccessPolicy(ctx context.Context, obj v1alpha1.SKIPObject) { @@ -225,11 +232,11 @@ func (r *ReconcilerBase) setPortsForRules(ctx context.Context, rules []podtypes. } namespace = namespaces.Items[0].Name } - targetApp, err := r.getTargetApplication(ctx, rule.Application, namespace) + targetAppPorts, err := r.getTargetApplicationPorts(ctx, rule.Application, namespace) if err != nil { return err } - rule.Ports = []networkingv1.NetworkPolicyPort{{Port: util.PointTo(intstr.FromInt32(int32(targetApp.Spec.Port)))}} + rule.Ports = targetAppPorts } return nil } diff --git a/internal/controllers/skipjob.go b/internal/controllers/skipjob.go index 74c7c370..37db84fb 100644 --- a/internal/controllers/skipjob.go +++ b/internal/controllers/skipjob.go @@ -151,7 +151,7 @@ func (r *SKIPJobReconciler) Reconcile(ctx context.Context, req reconcile.Request for _, f := range resourceGeneration { if err := f(reconciliation); err != nil { rLog.Error(err, "failed to generate skipjob resource") - //At this point we don't have the gvk of the resource yet, so we can't set subresource status. + // At this point we don't have the gvk of the resource yet, so we can't set subresource status. r.SetErrorState(ctx, skipJob, err, "failed to generate skipjob resource", "ResourceGenerationFailure") return common.RequeueWithError(err) } @@ -208,13 +208,10 @@ func (r *SKIPJobReconciler) setSKIPJobDefaults(ctx context.Context, skipJob *ski func (r *SKIPJobReconciler) setResourceDefaults(resources []client.Object, skipJob *skiperatorv1alpha1.SKIPJob) error { for _, resource := range resources { - if err := resourceutils.AddGVK(r.GetScheme(), resource); err != nil { + if err := r.SetSubresourceDefaults(resources, skipJob); err != nil { return err } resourceutils.SetSKIPJobLabels(resource, skipJob) - if err := resourceutils.SetOwnerReference(skipJob, resource, r.GetScheme()); err != nil { - return err - } } return nil } diff --git a/tests/application/access-policy/access-policy-istio-assert.yaml b/tests/application/access-policy/access-policy-istio-assert.yaml new file mode 100644 index 00000000..b33ac96a --- /dev/null +++ b/tests/application/access-policy/access-policy-istio-assert.yaml @@ -0,0 +1,23 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: access-policy-to-istio-app +spec: + egress: + - ports: + - port: 8080 + protocol: TCP + - port: 15020 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ns-with-istio + podSelector: + matchLabels: + app: istio-application + podSelector: + matchLabels: + app: access-policy-to-istio-app + policyTypes: + - Egress diff --git a/tests/application/access-policy/access-policy-istio.yaml b/tests/application/access-policy/access-policy-istio.yaml new file mode 100644 index 00000000..3393fd19 --- /dev/null +++ b/tests/application/access-policy/access-policy-istio.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ns-with-istio + labels: + istio.io/rev: asm-stable +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: istio-application + namespace: ns-with-istio +spec: + image: image + port: 8080 +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: access-policy-to-istio-app +spec: + image: image + port: 8080 + accessPolicy: + outbound: + rules: + - application: istio-application + namespace: ns-with-istio diff --git a/tests/application/access-policy/chainsaw-test.yaml b/tests/application/access-policy/chainsaw-test.yaml index 0a321714..fefca27f 100644 --- a/tests/application/access-policy/chainsaw-test.yaml +++ b/tests/application/access-policy/chainsaw-test.yaml @@ -32,3 +32,8 @@ spec: file: bad-policy-assert.yaml - error: file: bad-policy-error.yaml + - try: + - apply: + file: access-policy-istio.yaml + - assert: + file: access-policy-istio-assert.yaml diff --git a/tests/skipjob/access-policy-job/app-istio.yaml b/tests/skipjob/access-policy-job/app-istio.yaml new file mode 100644 index 00000000..908b953e --- /dev/null +++ b/tests/skipjob/access-policy-job/app-istio.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: ns-with-istio + labels: + istio.io/rev: asm-stable +--- +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: Application +metadata: + name: istio-application + namespace: ns-with-istio +spec: + image: image + port: 8080 + diff --git a/tests/skipjob/access-policy-job/application.yaml b/tests/skipjob/access-policy-job/application.yaml index 8b0f99af..ca46586c 100644 --- a/tests/skipjob/access-policy-job/application.yaml +++ b/tests/skipjob/access-policy-job/application.yaml @@ -5,3 +5,4 @@ metadata: spec: image: image port: 8080 +--- diff --git a/tests/skipjob/access-policy-job/chainsaw-test.yaml b/tests/skipjob/access-policy-job/chainsaw-test.yaml index 383a6c53..810d08f3 100644 --- a/tests/skipjob/access-policy-job/chainsaw-test.yaml +++ b/tests/skipjob/access-policy-job/chainsaw-test.yaml @@ -27,3 +27,10 @@ spec: file: skipjob-cron-assert.yaml - error: file: skipjob-cron-error.yaml + - try: + - apply: + file: app-istio.yaml + - apply: + file: netpol-istio.yaml + - assert: + file: netpol-istio-assert.yaml diff --git a/tests/skipjob/access-policy-job/netpol-istio-assert.yaml b/tests/skipjob/access-policy-job/netpol-istio-assert.yaml new file mode 100644 index 00000000..759eb9d4 --- /dev/null +++ b/tests/skipjob/access-policy-job/netpol-istio-assert.yaml @@ -0,0 +1,36 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: istio-policy-job-skipjob + annotations: + argocd.argoproj.io/sync-options: "Prune=false" + labels: + app.kubernetes.io/managed-by: skiperator + skiperator.kartverket.no/controller: skipjob + skiperator.kartverket.no/skipjob: 'true' + skiperator.kartverket.no/skipjobName: istio-policy-job + ownerReferences: + - apiVersion: skiperator.kartverket.no/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: SKIPJob + name: istio-policy-job +spec: + egress: + - ports: + - port: 8080 + protocol: TCP + - port: 15020 + protocol: TCP + to: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: ns-with-istio + podSelector: + matchLabels: + app: istio-application + podSelector: + matchLabels: + app: istio-policy-job-skipjob + policyTypes: + - Egress diff --git a/tests/skipjob/access-policy-job/netpol-istio.yaml b/tests/skipjob/access-policy-job/netpol-istio.yaml new file mode 100644 index 00000000..20580997 --- /dev/null +++ b/tests/skipjob/access-policy-job/netpol-istio.yaml @@ -0,0 +1,17 @@ +apiVersion: skiperator.kartverket.no/v1alpha1 +kind: SKIPJob +metadata: + name: istio-policy-job +spec: + container: + image: "perl:5.34.0" + command: + - "perl" + - "-Mbignum=bpi" + - "-wle" + - "print bpi(2000)" + accessPolicy: + outbound: + rules: + - application: istio-application + namespace: ns-with-istio diff --git a/tests/skipjob/access-policy-job/skipjob-assert.yaml b/tests/skipjob/access-policy-job/skipjob-assert.yaml index 5e2be8ad..afa9d01d 100644 --- a/tests/skipjob/access-policy-job/skipjob-assert.yaml +++ b/tests/skipjob/access-policy-job/skipjob-assert.yaml @@ -2,6 +2,8 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: access-policy-job-skipjob + annotations: + argocd.argoproj.io/sync-options: "Prune=false" labels: app.kubernetes.io/managed-by: skiperator skiperator.kartverket.no/controller: skipjob diff --git a/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml b/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml index 287679e4..f6689493 100644 --- a/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml +++ b/tests/skipjob/access-policy-job/skipjob-cron-assert.yaml @@ -39,6 +39,8 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: name: access-policy-cron-job-skipjob + annotations: + argocd.argoproj.io/sync-options: "Prune=false" ownerReferences: - apiVersion: skiperator.kartverket.no/v1alpha1 blockOwnerDeletion: true