From 55535d543a46c79bc3a9e3b96502d8dbee2624b8 Mon Sep 17 00:00:00 2001 From: shreddedbacon Date: Mon, 15 Apr 2024 08:46:30 +1000 Subject: [PATCH] chore: restructure local dev to use metallb and certmanager --- Makefile | 213 ++-- .../developing-lagoon.md | 110 +- k3d-calico.config.yaml.tpl | 13 +- k3d.config.yaml.tpl | 9 +- .../k3d-seed-data/00-populate-kubernetes.gql | 967 ++++++++++++++++++ local-dev/k3d-seed-data/seed-users.sh | 52 + .../files/drupal8-mariadb-single/.lagoon.yml | 4 +- .../drush/aliases.drushrc.php | 2 +- .../files/drupal9-mariadb-single/.lagoon.yml | 4 +- 9 files changed, 1254 insertions(+), 120 deletions(-) create mode 100644 local-dev/k3d-seed-data/00-populate-kubernetes.gql create mode 100644 local-dev/k3d-seed-data/seed-users.sh diff --git a/Makefile b/Makefile index 2e268635a5..2d9b0c6bab 100644 --- a/Makefile +++ b/Makefile @@ -89,6 +89,14 @@ else PLATFORM_ARCH ?= linux/amd64 endif +# this enables the ssh portal and other related services to be exposed on a LoadBalancer for local development usage +LAGOON_SSH_PORTAL_LOADBALANCER ?= true + +HELM = $(realpath ./local-dev/helm) +KUBECTL = $(realpath ./local-dev/kubectl) +JQ = $(realpath ./local-dev/jq) +K3D = $(realpath ./local-dev/k3d) + ####### ####### Functions ####### @@ -249,8 +257,13 @@ build-ui-logs-development: # Wait for Keycloak to be ready (before this no API calls will work) .PHONY: wait-for-keycloak wait-for-keycloak: - $(info Waiting for Keycloak to be ready....) - grep -m 1 "Config of Keycloak done." <(docker-compose -p $(CI_BUILD_TAG) --compatibility logs -f keycloak 2>&1) + @$(info Waiting for Keycloak to be ready....) + @grep -m 1 "Config of Keycloak done." <(docker-compose -p $(CI_BUILD_TAG) --compatibility logs -f keycloak 2>&1) + @docker-compose -p $(CI_BUILD_TAG) cp ./local-dev/k3d-seed-data/seed-users.sh keycloak:/tmp/seed-users.sh \ + && docker-compose -p $(CI_BUILD_TAG) exec -it keycloak bash '/tmp/seed-users.sh' \ + && echo "You will be able to log in with these seed user email addresses and the passwords will be the same as the email address" \ + && echo "eg. maintainer@example.com has the password maintainer@example.com" \ + && echo "" # Define a list of which Lagoon Services are needed for running any deployment testing main-test-services = actions-handler broker logs2notifications api api-db api-redis keycloak keycloak-db ssh auth-server local-git local-api-data-watcher-pusher local-minio @@ -352,14 +365,17 @@ local-dev-yarn-stop: .PHONY: ui-development ui-development: build-ui-logs-development IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db local-api-data-watcher-pusher ui keycloak keycloak-db broker api-redis + $(MAKE) wait-for-keycloak .PHONY: api-development api-development: build-ui-logs-development IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db local-api-data-watcher-pusher keycloak keycloak-db broker api-redis + $(MAKE) wait-for-keycloak .PHONY: ui-logs-development ui-logs-development: build-ui-logs-development IMAGE_REPO=$(CI_BUILD_TAG) docker-compose -p $(CI_BUILD_TAG) --compatibility up -d api api-db actions-handler local-api-data-watcher-pusher ui keycloak keycloak-db broker api-redis logs2notifications local-minio mailhog + $(MAKE) wait-for-keycloak ## CI targets @@ -371,9 +387,12 @@ STERN_VERSION = v2.6.1 CHART_TESTING_VERSION = v3.10.1 K3D_IMAGE = docker.io/rancher/k3s:v1.28.6-k3s2 TESTS = [nginx,api,features-kubernetes,bulk-deployment,features-kubernetes-2,features-variables,active-standby-kubernetes,tasks,drush,python,gitlab,github,bitbucket,services,workflows] -CHARTS_TREEISH = main +CHARTS_TREEISH = dev-restructure TASK_IMAGES = task-activestandby +# the name of the docker network to create +DOCKER_NETWORK = k3d + # Symlink the installed kubectl client if the correct version is already # installed, otherwise downloads it. local-dev/kubectl: @@ -438,45 +457,45 @@ endif .PHONY: helm/repos helm/repos: local-dev/helm # install repo dependencies required by the charts - ./local-dev/helm repo add harbor https://helm.goharbor.io - ./local-dev/helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx - ./local-dev/helm repo add stable https://charts.helm.sh/stable - ./local-dev/helm repo add bitnami https://charts.bitnami.com/bitnami - ./local-dev/helm repo add amazeeio https://amazeeio.github.io/charts/ - ./local-dev/helm repo add lagoon https://uselagoon.github.io/lagoon-charts/ - ./local-dev/helm repo add minio https://charts.min.io/ - ./local-dev/helm repo add nats https://nats-io.github.io/k8s/helm/charts/ - ./local-dev/helm repo update + $(HELM) repo add harbor https://helm.goharbor.io + $(HELM) repo add ingress-nginx https://kubernetes.github.io/ingress-nginx + $(HELM) repo add stable https://charts.helm.sh/stable + $(HELM) repo add bitnami https://charts.bitnami.com/bitnami + $(HELM) repo add amazeeio https://amazeeio.github.io/charts/ + $(HELM) repo add lagoon https://uselagoon.github.io/lagoon-charts/ + $(HELM) repo add minio https://charts.min.io/ + $(HELM) repo add nats https://nats-io.github.io/k8s/helm/charts/ + $(HELM) repo add metallb https://metallb.github.io/metallb + $(HELM) repo add jetstack https://charts.jetstack.io + $(HELM) repo update # stand up a k3d cluster configured appropriately for lagoon testing .PHONY: k3d/cluster k3d/cluster: local-dev/k3d - ./local-dev/k3d cluster list | grep -q "$(CI_BUILD_TAG)" && exit; \ - docker network create k3d || true \ + $(K3D) cluster list | grep -q "$(CI_BUILD_TAG)" && exit; \ + docker network create $(DOCKER_NETWORK) || true \ && export KUBECONFIG=$$(mktemp) \ K3DCONFIG=$$(mktemp ./k3dconfig.XXX) \ - K3D_NODE_IP=$$(docker run --rm --network k3d alpine ip -o addr show eth0 | sed -nE 's/.* ([0-9.]{7,})\/.*/\1/p') && \ - if [[ $(ARCH) == darwin ]]; then \ - K3D_NODE_IP=$$(echo $$K3D_NODE_IP | awk -F '.' '{$$4++;printf "%d.%d.%d.%d",$$1,$$2,$$3,$$4}'); \ - fi \ + && LAGOON_K3D_CIDR_BLOCK=$$(docker network inspect $(DOCKER_NETWORK) | $(JQ) '. [0].IPAM.Config[0].Subnet' | tr -d '"') \ + && export LAGOON_K3D_NETWORK=$$(echo $${LAGOON_K3D_CIDR_BLOCK%???} | awk -F'.' '{print $$1,$$2,$$3,240}' OFS='.') \ && chmod 644 $$KUBECONFIG \ $$([ $(USE_CALICO_CNI) != true ] && envsubst < ./k3d.config.yaml.tpl > $$K3DCONFIG) \ $$([ $(USE_CALICO_CNI) = true ] && envsubst < ./k3d-calico.config.yaml.tpl > $$K3DCONFIG) \ $$([ $(USE_CALICO_CNI) = true ] && wget -N https://k3d.io/$(K3D_VERSION)/usage/advanced/calico.yaml) \ - && ./local-dev/k3d cluster create $(CI_BUILD_TAG) --image $(K3D_IMAGE) --wait --timeout 120s --config=$$K3DCONFIG --kubeconfig-update-default --kubeconfig-switch-context \ + && $(K3D) cluster create $(CI_BUILD_TAG) --image $(K3D_IMAGE) --wait --timeout 120s --config=$$K3DCONFIG --kubeconfig-update-default --kubeconfig-switch-context \ && cp $$KUBECONFIG "kubeconfig.k3d.$(CI_BUILD_TAG)" \ && echo -e 'Interact with the cluster during the test run in Jenkins like so:\n' \ && echo "export KUBECONFIG=\$$(mktemp) && scp $$NODE_NAME:$$KUBECONFIG \$$KUBECONFIG && K3D_PORT=\$$(sed -nE 's/.+server:.+:([0-9]+)/\1/p' \$$KUBECONFIG) && ssh -fNL \$$K3D_PORT:127.0.0.1:\$$K3D_PORT $$NODE_NAME" \ && echo -e '\nOr running locally:\n' \ - && echo -e 'export KUBECONFIG=$$(./local-dev/k3d kubeconfig write $(CI_BUILD_TAG))\n' \ + && echo -e 'export KUBECONFIG=$$($(K3D) kubeconfig write $(CI_BUILD_TAG))\n' \ && echo -e 'kubectl ...\n' ifeq ($(ARCH), darwin) export KUBECONFIG="$$(pwd)/kubeconfig.k3d.$(CI_BUILD_TAG)" && \ - if ! ifconfig lo0 | grep $$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}') -q; then sudo ifconfig lo0 alias $$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}'); fi + if ! ifconfig lo0 | grep $$($(KUBECTL) get nodes -o jsonpath='{.items[0].status.addresses[0].address}') -q; then sudo ifconfig lo0 alias $$($(KUBECTL) get nodes -o jsonpath='{.items[0].status.addresses[0].address}'); fi docker rm --force $(CI_BUILD_TAG)-k3d-proxy-32080 || true docker run -d --name $(CI_BUILD_TAG)-k3d-proxy-32080 \ --publish 32080:32080 \ - --link k3d-$(CI_BUILD_TAG)-server-0:target --network k3d \ + --link k3d-$(CI_BUILD_TAG)-server-0:target --network $(DOCKER_NETWORK) \ alpine/socat -dd \ tcp-listen:32080,fork,reuseaddr tcp-connect:target:32080 endif @@ -487,28 +506,21 @@ K3D_TOOLS = k3d helm kubectl jq stern # install lagoon charts and run lagoon test suites in a k3d cluster .PHONY: k3d/test -k3d/test: k3d/cluster helm/repos $(addprefix local-dev/,$(K3D_TOOLS)) build - export CHARTSDIR=$$(mktemp -d ./lagoon-charts.XXX) \ - && ln -sfn "$$CHARTSDIR" lagoon-charts.k3d.lagoon \ - && git clone https://github.com/uselagoon/lagoon-charts.git "$$CHARTSDIR" \ - && cd "$$CHARTSDIR" \ - && git checkout $(CHARTS_TREEISH) \ - && export KUBECONFIG="$$(realpath ../kubeconfig.k3d.$(CI_BUILD_TAG))" \ - && export IMAGE_REGISTRY="registry.$$(../local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \ - && $(MAKE) install-registry HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) USE_CALICO_CNI=false \ - && cd .. && $(MAKE) k3d/push-images && cd "$$CHARTSDIR" \ - && $(MAKE) fill-test-ci-values TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ - HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \ - JQ=$$(realpath ../local-dev/jq) \ +k3d/test: k3d/setup + export KUBECONFIG="$$(pwd)/kubeconfig.k3d.$(CI_BUILD_TAG)" \ + && cd lagoon-charts.k3d.lagoon \ + && export IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ + && $(MAKE) fill-test-ci-values DOCKER_NETWORK=$(DOCKER_NETWORK) TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ + HELM=$(HELM) KUBECTL=$(KUBECTL) JQ=$(JQ) \ OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=uselagoon/build-deploy-image:${BUILD_DEPLOY_IMAGE_TAG} \ - $$([ $(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG) ] && echo 'OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=$(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG)') \ - $$([ $(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY) ] && echo 'OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY=$(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY)') \ - OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE=$$IMAGE_REGISTRY/task-activestandby:$(SAFE_BRANCH_NAME) \ - IMAGE_REGISTRY=$$IMAGE_REGISTRY \ - SKIP_INSTALL_REGISTRY=true \ + OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library/task-activestandby:$(SAFE_BRANCH_NAME)" \ + IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ + SKIP_ALL_DEPS=true \ LAGOON_FEATURE_FLAG_DEFAULT_ISOLATION_NETWORK_POLICY=enabled \ USE_CALICO_CNI=false \ + LAGOON_SSH_PORTAL_LOADBALANCER=$(LAGOON_SSH_PORTAL_LOADBALANCER) \ LAGOON_FEATURE_FLAG_DEFAULT_ROOTLESS_WORKLOAD=enabled \ + CLEAR_API_DATA=$(CLEAR_API_DATA) \ && docker run --rm --network host --name ct-$(CI_BUILD_TAG) \ --volume "$$(pwd)/test-suite-run.ct.yaml:/etc/ct/ct.yaml" \ --volume "$$(pwd):/workdir" \ @@ -528,17 +540,26 @@ k3d/setup: k3d/cluster helm/repos $(addprefix local-dev/,$(K3D_TOOLS)) build && cd "$$CHARTSDIR" \ && git checkout $(CHARTS_TREEISH) \ && export KUBECONFIG="$$(realpath ../kubeconfig.k3d.$(CI_BUILD_TAG))" \ - && export IMAGE_REGISTRY="registry.$$(../local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \ - && $(MAKE) install-registry HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \ - && cd .. && $(MAKE) -j6 k3d/push-images && cd "$$CHARTSDIR" \ - && $(MAKE) fill-test-ci-values TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ - HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \ - JQ=$$(realpath ../local-dev/jq) \ + && export IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ + && $(MAKE) install-registry DOCKER_NETWORK=$(DOCKER_NETWORK) JQ=$(JQ) HELM=$(HELM) KUBECTL=$(KUBECTL) USE_CALICO_CNI=false \ + && cd .. && $(MAKE) k3d/push-images JQ=$(JQ) HELM=$(HELM) KUBECTL=$(KUBECTL) && cd "$$CHARTSDIR" \ + && $(MAKE) fill-test-ci-values DOCKER_NETWORK=$(DOCKER_NETWORK) TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ + HELM=$(HELM) KUBECTL=$(KUBECTL) JQ=$(JQ) \ OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=uselagoon/build-deploy-image:${BUILD_DEPLOY_IMAGE_TAG} \ $$([ $(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG) ] && echo 'OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=$(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG)') \ $$([ $(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY) ] && echo 'OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY=$(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY)') \ - OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE=$$IMAGE_REGISTRY/task-activestandby:$(SAFE_BRANCH_NAME) \ - IMAGE_REGISTRY=$$IMAGE_REGISTRY + OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library/task-activestandby:$(SAFE_BRANCH_NAME)" \ + IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ + SKIP_INSTALL_REGISTRY=true \ + LAGOON_FEATURE_FLAG_DEFAULT_ISOLATION_NETWORK_POLICY=enabled \ + USE_CALICO_CNI=false \ + LAGOON_SSH_PORTAL_LOADBALANCER=$(LAGOON_SSH_PORTAL_LOADBALANCER) \ + LAGOON_FEATURE_FLAG_DEFAULT_ROOTLESS_WORKLOAD=enabled + +# k3d/local-stack will deploy and seed a lagoon-core with a lagoon-remote and all basic services to get you going +# and will provide some initial seed data for a user to jump right in and start using lagoon +.PHONY: k3d/local-stack +k3d/local-stack: k3d/setup k3d/seed-data k3d/get-lagoon-details # k3d/local-dev-patch will build the services in LOCAL_DEV_SERVICES on your machine, and then use kubectl patch to mount the folders into Kubernetes # the deployments should be restarted to trigger any updated code changes @@ -547,14 +568,14 @@ k3d/setup: k3d/cluster helm/repos $(addprefix local-dev/,$(K3D_TOOLS)) build .PHONY: k3d/local-dev-patch k3d/local-dev-patch: export KUBECONFIG="$$(pwd)/kubeconfig.k3d.$(CI_BUILD_TAG)" && \ - export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \ + export IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ && for image in $(LOCAL_DEV_SERVICES); do \ echo "building $$image" \ && cd services/$$image && yarn install && yarn build && cd ../..; \ done \ && for image in $(LOCAL_DEV_SERVICES); do \ echo "patching lagoon-core-$$image" \ - && ./local-dev/kubectl --namespace lagoon patch deployment lagoon-core-$$image --patch-file ./local-dev/kubectl-patches/$$image.yaml; \ + && $(KUBECTL) --namespace lagoon-core patch deployment lagoon-core-$$image --patch-file ./local-dev/kubectl-patches/$$image.yaml; \ done ## Use local-dev-logging to deploy an Elasticsearch/Kibana cluster into docker compose and forward @@ -563,14 +584,14 @@ k3d/local-dev-patch: k3d/local-dev-logging: export KUBECONFIG="$$(pwd)/kubeconfig.k3d.$(CI_BUILD_TAG)" \ && docker-compose -f local-dev/odfe-docker-compose.yml -p odfe up -d \ - && ./local-dev/helm upgrade --install --create-namespace \ + && $(HELM) upgrade --install --create-namespace \ --namespace lagoon-logs-concentrator \ --wait --timeout 15m \ --values ./local-dev/lagoon-logs-concentrator.values.yaml \ lagoon-logs-concentrator \ ./lagoon-charts.k3d.lagoon/charts/lagoon-logs-concentrator \ - && ./local-dev/helm dependency update ./lagoon-charts.k3d.lagoon/charts/lagoon-logging \ - && ./local-dev/helm upgrade --install --create-namespace --namespace lagoon-logging \ + && $(HELM) dependency update ./lagoon-charts.k3d.lagoon/charts/lagoon-logging \ + && $(HELM) upgrade --install --create-namespace --namespace lagoon-logging \ --wait --timeout 15m \ --values ./local-dev/lagoon-logging.values.yaml \ lagoon-logging \ @@ -583,37 +604,78 @@ k3d/local-dev-logging: # into the image registry and reinstalls the lagoon-core helm chart. .PHONY: k3d/dev k3d/dev: build - export KUBECONFIG="$$(realpath ./kubeconfig.k3d.$(CI_BUILD_TAG))" \ - && export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \ + @export KUBECONFIG="$$(realpath ./kubeconfig.k3d.$(CI_BUILD_TAG))" \ + && export IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ && $(MAKE) k3d/push-images && cd lagoon-charts.k3d.lagoon \ - && $(MAKE) install-lagoon-core IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ - HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \ - JQ=$$(realpath ../local-dev/jq) \ + && $(MAKE) install-lagoon-core DOCKER_NETWORK=$(DOCKER_NETWORK) IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ + HELM=$(HELM) KUBECTL=$(KUBECTL) \ + JQ=$(JQ) \ OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=uselagoon/build-deploy-image:${BUILD_DEPLOY_IMAGE_TAG} \ $$([ $(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG) ] && echo 'OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG=$(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGETAG)') \ $$([ $(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY) ] && echo 'OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY=$(OVERRIDE_BUILD_DEPLOY_CONTROLLER_IMAGE_REPOSITORY)') \ OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE=$$IMAGE_REGISTRY/task-activestandby:$(SAFE_BRANCH_NAME) \ - IMAGE_REGISTRY=$$IMAGE_REGISTRY + LAGOON_SSH_PORTAL_LOADBALANCER=$(LAGOON_SSH_PORTAL_LOADBALANCER) \ + IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" + @$(MAKE) k3d/get-lagoon-details # k3d/push-images pushes locally build images into the k3d cluster registry. IMAGES = $(K3D_SERVICES) $(LOCAL_DEV_SERVICES) $(TASK_IMAGES) .PHONY: k3d/push-images k3d/push-images: - export KUBECONFIG="$$(pwd)/kubeconfig.k3d.$(CI_BUILD_TAG)" && \ - export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \ + @export KUBECONFIG="$$(pwd)/kubeconfig.k3d.$(CI_BUILD_TAG)" && \ + export IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ && docker login -u admin -p Harbor12345 $$IMAGE_REGISTRY \ && for image in $(IMAGES); do \ docker tag $(CI_BUILD_TAG)/$$image $$IMAGE_REGISTRY/$$image:$(SAFE_BRANCH_NAME) \ && docker push $$IMAGE_REGISTRY/$$image:$(SAFE_BRANCH_NAME); \ done -# Use k3d/get-admin-creds to retrieve the admin JWT, Lagoon admin password, and the password for the lagoonadmin user. -# These credentials are re-created on every re-install of Lagoon Core. -.PHONY: k3d/get-admin-creds -k3d/get-admin-creds: - export KUBECONFIG="$$(realpath ./kubeconfig.k3d.$(CI_BUILD_TAG))" \ - && cd lagoon-charts.k3d.lagoon \ - && $(MAKE) get-admin-creds +# Use k3d/get-lagoon-details to retrieve information related to accessing the local k3d deployed lagoon and its services +.PHONY: k3d/get-lagoon-details +k3d/get-lagoon-details: + @export KUBECONFIG="$$(realpath ./kubeconfig.k3d.$(CI_BUILD_TAG))" && \ + echo "===============================" && \ + echo "Lagoon UI URL: http://lagoon-ui.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io" \ + && echo "Lagoon API URL: http://lagoon-api.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/graphql" \ + && echo "Lagoon API admin legacy token: $$(docker run \ + -e JWTSECRET="$$($(KUBECTL) get secret -n lagoon-core lagoon-core-secrets -o jsonpath="{.data.JWTSECRET}" | base64 --decode)" \ + -e JWTAUDIENCE=api.dev \ + -e JWTUSER=localadmin \ + uselagoon/tests \ + python3 /ansible/tasks/api/admin_token.py)" \ + && echo "Lagoon webhook URL: http://lagoon-webhook.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io" \ + && echo "SSH Core Service: $$($(KUBECTL) -n lagoon-core get services lagoon-core-ssh -o jsonpath='{.status.loadBalancer.ingress[0].ip}'):$$($(KUBECTL) -n lagoon-core get services lagoon-core-ssh -o jsonpath='{.spec.ports[0].port}')" \ + && echo "SSH Portal Service: $$($(KUBECTL) -n lagoon get services lagoon-remote-ssh-portal -o jsonpath='{.status.loadBalancer.ingress[0].ip}'):$$($(KUBECTL) -n lagoon get services lagoon-remote-ssh-portal -o jsonpath='{.spec.ports[0].port}')" \ + && echo "SSH Token Service: $$($(KUBECTL) -n lagoon-core get services lagoon-core-ssh-token -o jsonpath='{.status.loadBalancer.ingress[0].ip}'):$$($(KUBECTL) -n lagoon-core get services lagoon-core-ssh-token -o jsonpath='{.spec.ports[0].port}')" \ + && echo "Lagoon webhook URL: http://lagoon-webhook.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io" \ + && echo "Keycloak admin URL: http://lagoon-keycloak.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/auth" \ + && echo "Keycloak admin password: $$($(KUBECTL) get secret -n lagoon-core lagoon-core-keycloak -o jsonpath="{.data.KEYCLOAK_ADMIN_PASSWORD}" | base64 --decode)" \ + && echo "" \ + +# k3d/seed-data is a way to seed a lagoon-core deployed via k3d/setup. +# it is also called as part of k3d/local-stack though so should not need to be called directly. +.PHONY: k3d/seed-data +k3d/seed-data: + @export KUBECONFIG="$$(realpath ./kubeconfig.k3d.$(CI_BUILD_TAG))" && \ + export LAGOON_LEGACY_ADMIN=$$(docker run \ + -e JWTSECRET="$$($(KUBECTL) get secret -n lagoon-core lagoon-core-secrets -o jsonpath="{.data.JWTSECRET}" | base64 --decode)" \ + -e JWTAUDIENCE=api.dev \ + -e JWTUSER=localadmin \ + uselagoon/tests \ + python3 /ansible/tasks/api/admin_token.py) && \ + echo "Loading API seed data" && \ + export SSH_PORTAL_HOST="$$($(KUBECTL) -n lagoon get services lagoon-remote-ssh-portal -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" && \ + export SSH_PORTAL_PORT="$$($(KUBECTL) -n lagoon get services lagoon-remote-ssh-portal -o jsonpath='{.spec.ports[0].port}')" && \ + export ROUTER_PATTERN="\$${project}.\$${environment}.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" && \ + export SEED_DATA=$$(envsubst < ./local-dev/k3d-seed-data/00-populate-kubernetes.gql | sed 's/"/\\"/g' | sed 's/\\n/\\\\n/g' | awk -F'\n' '{if(NR == 1) {printf $$0} else {printf "\\n"$$0}}') && \ + export SEED_DATA_JSON="{\"query\": \"$$SEED_DATA\"}" && \ + wget --quiet --header "Content-Type: application/json" --header "Authorization: bearer $${LAGOON_LEGACY_ADMIN}" "http://lagoon-api.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/graphql" --post-data "$$SEED_DATA_JSON" --content-on-error -O - && \ + echo "Loading API seed users" && \ + cat ./local-dev/k3d-seed-data/seed-users.sh | $(KUBECTL) -n lagoon-core exec -i $$($(KUBECTL) -n lagoon-core get pods -l app.kubernetes.io/component=lagoon-core-keycloak -o json | $(JQ) -r '.items[0].metadata.name') -- sh -c "cat > /tmp/seed-users.sh" \ + && $(KUBECTL) -n lagoon-core exec -it $$($(KUBECTL) -n lagoon-core get pods -l app.kubernetes.io/component=lagoon-core-keycloak -o json | $(JQ) -r '.items[0].metadata.name') -- bash '/tmp/seed-users.sh' \ + && echo "You will be able to log in with these seed user email addresses and the passwords will be the same as the email address" \ + && echo "eg. maintainer@example.com has the password maintainer@example.com" \ + && echo "" # Use k3d/port-forwards to create local ports for the UI (6060), API (7070) and Keycloak (8080). These ports will always # log in the foreground, so perform this command in a separate window/terminal. @@ -628,17 +690,18 @@ k3d/port-forwards: .PHONY: k3d/retest k3d/retest: export KUBECONFIG="$$(pwd)/kubeconfig.k3d.$(CI_BUILD_TAG)" \ - && export IMAGE_REGISTRY="registry.$$(./local-dev/kubectl get nodes -o jsonpath='{.items[0].status.addresses[0].address}').nip.io:32080/library" \ && cd lagoon-charts.k3d.lagoon \ - && $(MAKE) fill-test-ci-values TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ - HELM=$$(realpath ../local-dev/helm) KUBECTL=$$(realpath ../local-dev/kubectl) \ - JQ=$$(realpath ../local-dev/jq) \ + && export IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ + && $(MAKE) fill-test-ci-values DOCKER_NETWORK=$(DOCKER_NETWORK) TESTS=$(TESTS) IMAGE_TAG=$(SAFE_BRANCH_NAME) DISABLE_CORE_HARBOR=true \ + HELM=$(HELM) KUBECTL=$(KUBECTL) \ + JQ=$(JQ) \ OVERRIDE_BUILD_DEPLOY_DIND_IMAGE=uselagoon/build-deploy-image:${BUILD_DEPLOY_IMAGE_TAG} \ - OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE=$$IMAGE_REGISTRY/task-activestandby:$(SAFE_BRANCH_NAME) \ - IMAGE_REGISTRY=$$IMAGE_REGISTRY \ + OVERRIDE_ACTIVE_STANDBY_TASK_IMAGE="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library/task-activestandby:$(SAFE_BRANCH_NAME)" \ + IMAGE_REGISTRY="registry.$$($(KUBECTL) -n ingress-nginx get services ingress-nginx-controller -o jsonpath='{.status.loadBalancer.ingress[0].ip}').nip.io/library" \ SKIP_ALL_DEPS=true \ LAGOON_FEATURE_FLAG_DEFAULT_ISOLATION_NETWORK_POLICY=enabled \ USE_CALICO_CNI=false \ + LAGOON_SSH_PORTAL_LOADBALANCER=$(LAGOON_SSH_PORTAL_LOADBALANCER) \ LAGOON_FEATURE_FLAG_DEFAULT_ROOTLESS_WORKLOAD=enabled \ CLEAR_API_DATA=$(CLEAR_API_DATA) \ && docker run --rm --network host --name ct-$(CI_BUILD_TAG) \ @@ -651,7 +714,7 @@ k3d/retest: .PHONY: k3d/clean k3d/clean: local-dev/k3d - ./local-dev/k3d cluster delete $(CI_BUILD_TAG) + $(K3D) cluster delete $(CI_BUILD_TAG) ifeq ($(ARCH), darwin) docker rm --force $(CI_BUILD_TAG)-k3d-proxy-32080 || true endif diff --git a/docs/contributing-to-lagoon/developing-lagoon.md b/docs/contributing-to-lagoon/developing-lagoon.md index d884da247c..3ad96e66f5 100644 --- a/docs/contributing-to-lagoon/developing-lagoon.md +++ b/docs/contributing-to-lagoon/developing-lagoon.md @@ -41,10 +41,31 @@ We have provided a number of routines in the [Makefile](https://github.com/usela make -j8 build ``` +### Deploy a local Lagoon development stack without test suites + +The make file offers a command that allows you to spin up Lagoon inside of a k3d cluster locally and explore its functionality. + +Using the following make command will create a k3d cluster, install Lagoon and all of the necessary components to get you up and running and ready to explore. + +```bash title="Deploy local stack" +make k3d/local-stack +``` + +!!! warning + This can take some time to complete as it will install a lot of components necessary to make Lagoon work. This includes things like ingress-nginx, harbor, and all the additional services to make exploring Lagoon easy. + +At the end of the process, the command will provide some useful information that will get you up and running and able to log in to the UI or using the API with tools like the Lagoon CLI. + +### Run the Lagoon test-suite + +If you're developing new functionality in Lagoon and want to make sure the tests complete, you can run the entire test suite using the following options + 1. Start Lagoon test routine using the defaults in the Makefile \(all tests\). ```bash title="Start tests" -make kind/test +make k3d/test +# or use retest if you already have a local stack running +make k3d/retest ``` !!! warning @@ -52,12 +73,12 @@ make kind/test This process will: -1. Download the correct versions of the local development tools if not installed - `kind`, `kubectl`, `helm`, `jq`. +1. Download the correct versions of the local development tools if not installed - `k3d`, `kubectl`, `helm`, `jq`. 2. Update the necessary Helm repositories for Lagoon to function. 3. Ensure all of the correct images have been built in the previous step. -4. Create a local [KinD](https://kind.sigs.k8s.io/) cluster, which provisions an entire running Kubernetes cluster in a local Docker container. This cluster has been configured to talk to a provisioned image registry that we will be pushing the built Lagoon images to. It has also been configured to allow access to the host filesystem for local development. +4. Create a local K3D cluster, which provisions an entire running Kubernetes cluster in a local Docker container. This cluster has been configured to talk to a provisioned image registry that we will be pushing the built Lagoon images to. It has also been configured to allow access to the host filesystem for local development. 5. Clone Lagoon from [https://github.com/uselagoon/lagoon-charts](https://github.com/uselagoon/lagoon-charts) \(use the `CHARTS_TREEISH` variable in the Makefile to control which branch if needed\). -6. Install the Harbor Image registry into the KinD cluster and configure its ingress and access properly. +6. Install the Harbor Image registry into the K3D cluster and configure its ingress and access properly. 7. Docker will push the built images for Lagoon into the Harbor image registry. 8. It then uses the [Makefile from lagoon-charts](https://github.com/uselagoon/lagoon-charts/blob/main/Makefile) to perform the rest of the setup steps. 9. A suitable ingress controller is installed - we use the [NGINX Ingress Controller](https://kubernetes.github.io/ingress-nginx/). @@ -72,83 +93,101 @@ Ideally, all of the tests pass and it's all done! ### View the test progress and your local cluster -The test routine creates a local Kubeconfig file \(called `kubeconfig.kind.lagoon` in the root of the project, that can be used with a Kubernetes dashboard, viewer or CLI tool to access the local cluster. We use tools like [Lens](https://k8slens.dev/), [Octant](https://octant.dev/), [kubectl](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) or [Portainer](https://www.portainer.io/) in our workflows. Lagoon Core, Remote and Tests all build in the `Lagoon` namespace, and each environment creates its own namespace to run, so make sure to use the correct context when inspecting. +The test routine creates a local Kubeconfig file \(called `kubeconfig.k3d.lagoon` in the root of the project, that can be used with a Kubernetes dashboard, viewer or CLI tool to access the local cluster. We use tools like [Lens](https://k8slens.dev/), [Octant](https://octant.dev/), [kubectl](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) or [Portainer](https://www.portainer.io/) in our workflows. Lagoon Core and the tests build in the `lagoon-core` namespace, Remote is installed in the `Lagoon` namespace. Each lagoon test environment creates its own namespace to run, so make sure to use the correct context when inspecting. In order to use kubectl with the local cluster, you will need to use the correct Kubeconfig. This can be done for every command or it can be added to your preferred tool: -```bash title="kubeconfig.kind.lagoon" -KUBECONFIG=./kubeconfig.kind.lagoon kubectl get pods -n lagoon +```bash title="kubeconfig.k3d.lagoon" +KUBECONFIG=./kubeconfig.k3d.lagoon kubectl get pods -n lagoon ``` -The Helm charts used to build the local Lagoon are cloned into a local folder and symlinked to `lagoon-charts.kind.lagoon` where you can see the configuration. We'll cover how to make easy modifications later in this documentation. +The Helm charts used to build the local Lagoon are cloned into a local folder and symlinked to `lagoon-charts.k3d.lagoon` where you can see the configuration. We'll cover how to make easy modifications later in this documentation. ### Interact with your local Lagoon cluster The Makefile includes a few simple routines that will make interacting with the installed Lagoon simpler: +#### Port forwarding + +Clusters deployed by this makefile will provide loadbalancers and individual IPs, but if you choose to you can port-forward some services using the following + ```bash title="Create local ports" -make kind/port-forwards +make k3d/port-forwards ``` This will create local ports to expose the UI \(6060\), API \(7070\) and Keycloak \(8080\). Note that this logs to `stdout`, so it should be performed in a secondary terminal/window. -```bash title="Retrieve admin creds" -make kind/get-admin-creds -``` +#### Lagoon credentials/information This will retrieve the necessary credentials to interact with the Lagoon. +```bash title="Retrieve admin creds" +make k3d/get-lagoon-details +``` + * The JWT is an admin-scoped token for use as a bearer token with your local GraphQL client. [See more in our GraphQL documentation](../interacting/graphql.md). * There is a token for use with the "admin" user in Keycloak, who can access all users, groups, roles, etc. -* There is also a token for use with the "lagoonadmin" user in Lagoon, which can be allocated default groups, permissions, etc. + +#### Rebuild Lagoon core and push images + +This will re-push the images listed in `KIND_SERVICES` with the correct tag, and redeploy the lagoon-core chart. This is useful for testing small changes to Lagoon services, but does not support "live" development. You will need to rebuild these images locally first, e.g `rm build/api && make build/api`. ```bash title="Re-push images" -make kind/dev +make k3d/dev ``` -This will re-push the images listed in `KIND_SERVICES` with the correct tag, and redeploy the lagoon-core chart. This is useful for testing small changes to Lagoon services, but does not support "live" development. You will need to rebuild these images locally first, e.g `rm build/api && make build/api`. +#### Patch with local node.js + +This will build the typescript services, using your locally installed Node.js \(it should be >16.0\). It will then: ```bash title="Build typescript services" -make kind/local-dev-patch +make k3d/local-dev-patch ``` -This will build the typescript services, using your locally installed Node.js \(it should be >16.0\). It will then: * Mount the "dist" folders from the Lagoon services into the correct lagoon-core pods in Kubernetes * Redeploy the lagoon-core chart with the services running with `nodemon`watching the code for changes * This will facilitate "live" development on Lagoon. * Note that occasionally the pod in Kubernetes may require redeployment for a change to show. Clean any build artifacts from those services if you're rebuilding different branches with `git clean -dfx` as the dist folders are ignored by Git. +#### Install simple Logging support + +This will create a standalone OpenDistro for Elasticsearch cluster in your local Docker, and configure Lagoon to dispatch all logs \(Lagoon and project\) to it, using the configuration in [lagoon-logging](https://github.com/uselagoon/lagoon-charts/tree/main/charts/lagoon-logging). + ```bash title="Initiate logging" -make kind/local-dev-logging +make k3d/local-dev-logging ``` -This will create a standalone OpenDistro for Elasticsearch cluster in your local Docker, and configure Lagoon to dispatch all logs \(Lagoon and project\) to it, using the configuration in [lagoon-logging](https://github.com/uselagoon/lagoon-charts/tree/main/charts/lagoon-logging). +#### Re run specific tests + +This will re-run a suite of tests \(defined in the `TESTS` variable\) against the existing cluster. It will re-push the images needed for tests \(tests, local-git, and the data-watcher-pusher\). You can specify tests to run by passing the TESTS variable inline. ```bash title="Re-run tests." -make kind/retest +make k3d/retest # OR -make kind/retest TESTS='[features-kubernetes]' +make k3d/retest TESTS='[features-kubernetes]' ``` -This will re-run a suite of tests \(defined in the `TESTS` variable\) against the existing cluster. It will re-push the images needed for tests \(tests, local-git, and the data-watcher-pusher\). You can specify tests to run by passing the TESTS variable inline. +If updating a test configuration, the tests image will need to be rebuilt and pushed, e.g `rm build/tests && make build/tests && make k3d/push-images IMAGES='tests' && make k3d/retest TESTS='[api]'` + +#### Push images -If updating a test configuration, the tests image will need to be rebuilt and pushed, e.g `rm build/tests && make build/tests && make kind/push-images IMAGES='tests' && make kind/retest TESTS='[api]'` +This will push all the images up to the image registry. Specifying `IMAGES` will tag and push specific images. ```bash title="Push all images" -make kind/push-images +make k3d/push-images # OR -make kind/push-images IMAGES='tests local-git' +make k3d/push-images IMAGES='tests local-git' ``` -This will push all the images up to the image registry. Specifying `IMAGES` will tag and push specific images. +#### Tear down + +This will remove the K3D Lagoon cluster from your local Docker. ```bash title="Remove cluster" -make kind/clean +make k3d/clean ``` -This will remove the KinD Lagoon cluster from your local Docker. - ### Ansible The Lagoon test uses Ansible to run the test suite. Each range of tests for a specific function has been split into its own routine. If you are performing development work locally, select which tests to run, and update the `$TESTS` variable in the Makefile to reduce the concurrent tests running. @@ -212,7 +251,14 @@ Here are some development scenarios and useful workflows for getting things done ### Add tests -1. Repeat the first step above. +An example + +1. Deploy the lagoon and run the test you're modifying. + +```bash title="Deploy Lagoon" +make k3d/test TESTS=[features-variables] +``` + 2. Edit `tests/tests/features-variables.yaml` and add a test case. 3. Rebuild the `tests` image. @@ -224,11 +270,11 @@ make -j8 build/tests 1. Push the new `tests` image into the cluster registry. ```bash title="Push test image" -make kind/push-images IMAGES=tests +make k3d/push-images IMAGES=tests ``` 1. Rerun the tests. ```bash title="Re-run tests" -make kind/retest TESTS='[features-variables]' +make k3d/retest TESTS=[features-variables] ``` diff --git a/k3d-calico.config.yaml.tpl b/k3d-calico.config.yaml.tpl index 0a70948643..cf2a975c3d 100644 --- a/k3d-calico.config.yaml.tpl +++ b/k3d-calico.config.yaml.tpl @@ -14,19 +14,22 @@ registries: docker.io: endpoint: - "https://imagecache.amazeeio.cloud" - "registry.${K3D_NODE_IP}.nip.io:32080": + "registry.${LAGOON_K3D_NETWORK}.nip.io": endpoint: - - http://registry.${K3D_NODE_IP}.nip.io:32080 + - https://registry.${LAGOON_K3D_NETWORK}.nip.io configs: - "registry.${K3D_NODE_IP}.nip.io:32080": - tls: - insecure_skip_verify: true + "registry.${LAGOON_K3D_NETWORK}.nip.io": + tls: + insecure_skip_verify: true options: k3s: # options passed on to K3s itself extraArgs: # additional arguments passed to the `k3s server|agent` command; same as `--k3s-arg` - arg: --disable=traefik nodeFilters: - server:* + - arg: --disable=servicelb + nodeFilters: + - server:* - arg: --flannel-backend=none nodeFilters: - server:* diff --git a/k3d.config.yaml.tpl b/k3d.config.yaml.tpl index fb61e20821..984198351f 100644 --- a/k3d.config.yaml.tpl +++ b/k3d.config.yaml.tpl @@ -13,11 +13,11 @@ registries: docker.io: endpoint: - "https://imagecache.amazeeio.cloud" - "registry.${K3D_NODE_IP}.nip.io:32080": + "registry.${LAGOON_K3D_NETWORK}.nip.io": endpoint: - - http://registry.${K3D_NODE_IP}.nip.io:32080 + - https://registry.${LAGOON_K3D_NETWORK}.nip.io configs: - "registry.${K3D_NODE_IP}.nip.io:32080": + "registry.${LAGOON_K3D_NETWORK}.nip.io": tls: insecure_skip_verify: true options: @@ -26,6 +26,9 @@ options: - arg: --disable=traefik nodeFilters: - server:* + - arg: --disable=servicelb + nodeFilters: + - server:* - arg: --disable-network-policy nodeFilters: - server:* diff --git a/local-dev/k3d-seed-data/00-populate-kubernetes.gql b/local-dev/k3d-seed-data/00-populate-kubernetes.gql new file mode 100644 index 0000000000..e0838b4724 --- /dev/null +++ b/local-dev/k3d-seed-data/00-populate-kubernetes.gql @@ -0,0 +1,967 @@ +mutation PopulateApi { + + #### Populate API with lagoon-remote Kubernetes + CiLocalKubernetes: addKubernetes( + input: { + id: 2001 + name: "ci-local-control-k8s" + consoleUrl: "https://localhost:8443/" + routerPattern: "${ROUTER_PATTERN}.nip.io" + sshHost: "${SSH_PORTAL_HOST}" + sshPort: "${SSH_PORTAL_PORT}" + token: "${TOKEN}" + } + ) { + id + } + + ## Create some general users with specific role against lagoon-demo for permissions validations in local testing easily + UserExampleGuest: addUser( + input: { + email: "guest@example.com" + comment: "guest user" + } + ) { + id + } + + UserExampleReporter: addUser( + input: { + email: "reporter@example.com" + comment: "reporter user" + } + ) { + id + } + + UserExampleDeveloper: addUser( + input: { + email: "developer@example.com" + comment: "developer user" + } + ) { + id + } + + UserExampleMaintainer: addUser( + input: { + email: "maintainer@example.com" + comment: "maintainer user" + } + ) { + id + } + + UserExampleOwner: addUser( + input: { + email: "owner@example.com" + comment: "owner user" + } + ) { + id + } + + UserExamplePlatformOwner: addUser( + input: { + email: "platformowner@example.com" + comment: "platform owner user" + } + ) { + id + } + + LagoonDemoGroup: addGroup( + input: { + name: "lagoon-demo-group" + } + ) { + id + } + + LagoonDemoToGroup: addGroupsToProject( + input: { + project: { + name: "lagoon-demo" + } + groups: [ + { + name: "lagoon-demo-group" + } + ] + } + ) { + id + } + + UserExampleGuestGroup: addUserToGroup( + input: { + user: { + email:"guest@example.com" + } + group: { + name: "lagoon-demo-group" + } + role: GUEST + } + ) { + name + } + + UserExampleReporterGroup: addUserToGroup( + input: { + user: { + email:"reporter@example.com" + } + group: { + name: "lagoon-demo-group" + } + role: REPORTER + } + ) { + name + } + + UserExampleDeveloperGroup: addUserToGroup( + input: { + user: { + email:"developer@example.com" + } + group: { + name: "lagoon-demo-group" + } + role: DEVELOPER + } + ) { + name + } + + UserExampleMaintainerGroup: addUserToGroup( + input: { + user: { + email:"maintainer@example.com" + } + group: { + name: "lagoon-demo-group" + } + role: MAINTAINER + } + ) { + name + } + + UserExampleOwnerGroup: addUserToGroup( + input: { + user: { + email:"owner@example.com" + } + group: { + name: "lagoon-demo-group" + } + role: OWNER + } + ) { + name + } + + # Organizations + UIOrganization1: addOrganization(input: { + id: 1 + name: "lagoon-demo-organization" + friendlyName: "Lagoon Demo Organization" + description: "An organization for testing" + quotaProject: 5 + quotaEnvironment: 4 + quotaGroup: 10 + quotaNotification: 10 + }) { + id + name + quotaProject + quotaEnvironment + quotaGroup + quotaNotification + } + + UIOrganizationUser: addUser( + input: { + email: "orguser@example.com" + comment: "test user that will be in a group in an organization" + } + ) { + id + } + UIOrganizationViewer: addUser( + input: { + email: "orgviewer@example.com" + comment: "user that will be an organization viewer" + } + ) { + id + } + UIOrganizationOwner: addUser( + input: { + email: "orgowner@example.com" + comment: "user that will be an organization owner" + } + ) { + id + } + + UIOrganizationGroup: addGroupToOrganization( + input: { + name: "lagoon-demo-organization-group" + organization: 1} + ) { + id + } + + UIOrganizationUserToGroup: addUserToGroup( + input: { + user: { + email:"orguser@example.com" + } + group: { + name: "lagoon-demo-organization-group" + } + role: MAINTAINER + } + ) { + name + } + + UIOrganizationAddViewer: addUserToOrganization(input: {user: {email: "orgviewer@example.com"}, organization: 1}) { + id + } + + UIOrganizationAddOwner: addUserToOrganization(input: {user: {email: "orgowner@example.com"}, organization: 1, owner: true}) { + id + } + + UIOrganizationNotificationSlack: addNotificationSlack(input: {organization: 1, name: "slack-test", channel: "lobby", webhook: "http://slack.example.com/hooks/abcdefg"}) { + id + } + + UIOrganizationNotificationRocketChat: addNotificationRocketChat(input: {organization: 1, name: "rocketchat-test", channel: "lobby", webhook: "http://rocketchat.example.com/hooks/abcdefg"}) { + id + } + + UIOrganizationNotificationEmail: addNotificationEmail(input: {organization: 1, name: "email-test", emailAddress: "fake@example.com"}) { + id + } + + UIOrganizationNotificationWebhook: addNotificationWebhook(input: {organization: 1, name: "webhook-test", webhook: "http://webhook.example.com"}) { + id + } + + UIOrganizationNotificationTeams: addNotificationMicrosoftTeams(input: {organization: 1, name: "teams-test", webhook: "http://teams.example.com/hooks/sdgsgsgs"}) { + id + } + + AddUIKubernetesToOrganization: addDeployTargetToOrganization(input:{ + deployTarget: 2001 + organization: 1 + }) { + id + } + + UIProject2: addProject( + input: { + id: 180 + organization: 1 + name: "lagoon-demo-org" + availability: HIGH + openshift: 2001 + gitUrl: "https://github.com/lagoon-examples/drupal10-base" + productionEnvironment: "main" + problemsUi: 1 + factsUi: 1 + } + ) { + id + } + + + UIOrganizationProjectGroup: addGroupsToProject( + input: { + project: { + name: "lagoon-demo-org" + } + groups: [ + { + name: "lagoon-demo-organization-group" + } + ] + } + ) { + id + } + + UIOrganizationProjectEnvironment1: addOrUpdateEnvironment( + input: { + id: 30 + name: "main" + project: 180 + deployType: BRANCH + deployBaseRef: "main" + environmentType: PRODUCTION + openshiftProjectName: "lagoon-demo-org-main" + } + ) { + id + } + + UIOrganizationProjectEnvironment1Update: updateEnvironment( + input: { + id: 30 + patch: { + route: "https://lagoondemoorg.example.org" + routes: "https://lagoondemoorg.example.org,https://nginx.main.lagoon-demo-org.ui-kubernetes.lagoon.sh" + } + } + ) { + id + } + UIOrganizationProjectEnvironment2: addOrUpdateEnvironment( + input: { + id: 31 + name: "staging" + project: 180 + deployType: BRANCH + deployBaseRef: "staging" + environmentType: DEVELOPMENT + openshiftProjectName: "lagoon-demo-org-staging" + } + ) { + id + } + UIOrganizationProjectEnvironment3: addOrUpdateEnvironment( + input: { + id: 32 + name: "development" + project: 180 + deployType: BRANCH + deployBaseRef: "development" + environmentType: DEVELOPMENT + openshiftProjectName: "lagoon-demo-org-development" + } + ) { + id + } + UIOrganizationProjectEnvironment4: addOrUpdateEnvironment( + input: { + id: 33 + name: "pr-15" + project: 180 + deployType: PULLREQUEST + deployBaseRef: "target" + deployHeadRef: "source" + deployTitle: "pr-15" + environmentType: DEVELOPMENT + openshiftProjectName: "lagoon-demo-org-pr-15" + } + ) { + id + } + + + + UICustomer1: addGroup( + input: { name: "lagoon-group" } + ) { + id + } + + UIProject1: addProject( + input: { + id: 18 + name: "lagoon-demo" + availability: HIGH + openshift: 2001 + gitUrl: "https://github.com/lagoon-examples/drupal10-base" + productionEnvironment: "main" + problemsUi: 1 + factsUi: 1 + } + ) { + id + } + + UIProject1Group5: addGroupsToProject( + input: { + project: { + name: "lagoon-demo" + } + groups: [ + { + name: "lagoon-group" + } + ] + } + ) { + id + } + + UIProject1Environment1: addOrUpdateEnvironment( + input: { + id: 3 + name: "main" + project: 18 + deployType: BRANCH + deployBaseRef: "main" + environmentType: PRODUCTION + openshiftProjectName: "lagoon-demo-main" + } + ) { + id + } + + UIProject1Environment1Update: updateEnvironment( + input: { + id: 3 + patch: { + route: "https://lagoondemo.example.org" + routes: "https://lagoondemo.example.org,https://nginx.main.lagoon-demo.ui-kubernetes.lagoon.sh" + } + } + ) { + id + } + + UIProject1Environment1Backup1: addBackup( + input: { + environment: 3 + source: "mariadb" + backupId: "e2e1d31b4a7dfc1687f469b6673f6bf2c0aabee0cc6b3f1bdbde710a9bc6280d" + created: "2023-10-14 00:33:16" + } + ) { + id + } + + UIProject1Environment1Backup1Restore: addRestore( + input: { + backupId: "e2e1d31b4a7dfc1687f469b6673f6bf2c0aabee0cc6b3f1bdbde710a9bc6280d" + execute: false + } + ) { + id + } + + UIProject1Environment1Backup2: addBackup( + input: { + environment: 3 + source: "files" + backupId: "e2e1d31b4a7dfc1687f469b6673f6bf2c0aabee0cc6b3f1bdbde710a9bc6280f" + created: "2023-10-14 00:33:16" + } + ) { + id + } + + UIProject1Environment1Backup2Restore: addRestore( + input: { + backupId: "e2e1d31b4a7dfc1687f469b6673f6bf2c0aabee0cc6b3f1bdbde710a9bc6280f" + status: SUCCESSFUL + restoreLocation: "http://172.17.0.1:9000/restores/lagoon-demo-main.tar.gz" + execute: false + } + ) { + id + } + + UIProject1Environment1Backup3: addBackup( + input: { + environment: 3 + source: "mariadb" + backupId: "e260f07c374e4a3319c5d46e688dab6f1eb23c3e61c166a37609d55762d2ee0b" + created: "2023-10-13 00:33:16" + } + ) { + id + } + + UIProject1Environment1Backup3Restore: addRestore( + input: { + backupId: "e260f07c374e4a3319c5d46e688dab6f1eb23c3e61c166a37609d55762d2ee0b" + status: FAILED + execute: false + } + ) { + id + } + + UIProject1Environment1Backup4: addBackup( + input: { + environment: 3 + source: "files" + backupId: "bf072a09e17726da54adc79936ec8745521993599d41211dfc9466dfd5bc32a5" + created: "2023-10-13 00:33:16" + } + ) { + id + } + + UIProject1Environment1addDeployment1: addDeployment( + input: { + name: "lagoon-build-123abc" + status: COMPLETE + remoteId: "86358316-a755-11ed-8206-032901f4c7e3" + environment: 3 + created: "2023-10-07 23:02:41" + started: "2023-10-07 23:03:41" + completed: "2023-10-07 23:20:41" + } + ) { + id + } + UIProject1Environment1addDeployment2: addDeployment( + input: { + name: "lagoon-build-def456" + remoteId: "85e36e3c-a755-11ed-abf6-df28d8a74109" + status: FAILED + environment: 3 + created: "2023-10-07 23:02:41" + started: "2023-10-07 23:03:41" + completed: "2023-10-07 23:20:41" + } + ) { + id + } + UIProject1Environment1addDeployment3: addDeployment( + input: { + name: "lagoon-build-7g8h9i" + remoteId: "84e1dc8a-a755-11ed-a37d-770f36aa3d4e" + status: RUNNING + environment: 3 + created: "2023-10-07 23:02:41" + started: "2023-10-07 23:03:41" + } + ) { + id + } + + UIProject1Environment1addTask1: addTask( + input: { + name: "Developer task" + id: 123 + remoteId: "5b21aff1-689c-41b7-80d7-6de9f5bff1f3" + status: FAILED + environment: 3 + service: "cli" + command: "site-status" + created: "2023-10-07 23:00:00" + started: "2023-10-07 23:00:10" + completed: "2023-10-07 23:00:20" + execute: false + } + ) { + id + } + + UIProject1Environment1addTask2: addTask( + input: { + name: "Maintainer task" + id: 124 + remoteId: "50573da4-f0dd-477a-9261-c4785ac2adff" + status: COMPLETE + environment: 3 + service: "cli" + command: "site-status" + created: "2023-10-07 23:01:00" + started: "2023-10-07 23:01:10" + completed: "2023-10-07 23:01:30" + execute: false + } + ) { + id + } + + UIProject1Environment1addTask3: addTask( + input: { + name: "Maintainer task" + id: 125 + remoteId: "e3869563-126d-4922-b6c4-36289a374edb" + status: PENDING + environment: 3 + service: "cli" + command: "site-status" + created: "2023-10-07 23:02:00" + started: "2023-10-07 23:02:10" + execute: false + } + ) { + id + } + + UIProjectAdvancedTask1: addAdvancedTaskDefinition( + input:{ + name: "Admin only task" + description: "A task that only admins can see and run (logs also only visible to admin)" + type: COMMAND + service: "cli" + command: "site-status" + permission: MAINTAINER + project: 18 + adminOnlyView: true + deployTokenInjection: true + projectKeyInjection: true + } + ){ + ... on AdvancedTaskDefinitionCommand { + id + name + description + service + command + } + } + + UIProjectAdvancedTask2: addAdvancedTaskDefinition( + input:{ + name: "Maintainer task" + description: "A task that only maintainers can run" + type: COMMAND + service: "cli" + command: "site-status" + permission: MAINTAINER + project: 18 + } + ){ + ... on AdvancedTaskDefinitionCommand { + id + name + description + service + command + } + } + + UIProjectAdvancedTask3: addAdvancedTaskDefinition( + input:{ + name: "Developer task" + description: "A task that developers can run" + type: COMMAND + service: "cli" + command: "site-status" + permission: DEVELOPER + project: 18 + } + ){ + ... on AdvancedTaskDefinitionCommand { + id + name + description + service + command + } + } + + UIProjectInvokeAdvancedTask1: invokeRegisteredTask( + advancedTaskDefinition: 1 + environment: 3 + ) { + id + } + + UIProjectUpdateAdvancedTask1: updateTask( + input:{ + id: 126 + patch: { + remoteId: "0ba18cc1-03e7-4dd6-b6e9-95c1ff54103a" + } + } + ) { + id + } + + UIProject1Environment1addServices1: addOrUpdateEnvironmentService( + input: { + environment: 3 + name: "cli" + type: "cli-persistent" + containers: [{name: "cli"}] + } + ){ + id + name + type + } + UIProject1Environment1addServices2: addOrUpdateEnvironmentService( + input: { + environment: 3 + name: "nginx" + type: "nginx-php-persistent" + containers: [{name: "nginx"},{name:"php"}] + } + ){ + id + name + type + } + UIProject1Environment1addServices3: addOrUpdateEnvironmentService( + input: { + environment: 3 + name: "mariadb" + type: "mariadb-single" + containers: [{name: "mariadb"}] + } + ){ + id + name + type + } + + UIProject1Environment1addStorage1: addOrUpdateStorageOnEnvironment(input:{ + environment: 3 + persistentStorageClaim: "nginx" + kibUsed: 200000 + }) { + id + kibUsed + } + + UIProject1Environment1addStorage2: addOrUpdateStorageOnEnvironment(input:{ + environment: 3 + persistentStorageClaim: "mariadb" + kibUsed: 200000 + }) { + id + kibUsed + } + + UIProject1Environment1addFacts: addFacts( + input: { + facts: [ + { + name: "lagoon-category" + value: "saas" + environment: 3 + source: "" + description: "Category of the site" + category: "Lagoon" + keyFact: true + }, + { + name: "drupal-core" + value: "9.0.1" + environment: 3 + source: "drush-pml" + description: "Drupal CMS version found on environment" + category: "Framework" + keyFact: true + }, + { + name: "php-version" + value: "8.0.3" + environment: 3 + source: "php-version" + description: "PHP version found on environment" + category: "Programming language" + keyFact: true + }, + { + name: "Lagoon" + value: "21.3.0" + environment: 3 + source: "env" + description: "Lagoon version" + category: "Platform" + keyFact: true + }, + { + name: "interesting-package" + value: "1.0.0" + environment: 3 + source: "local-dev" + description: "Description of interesting php package" + category: "Composer package" + }, + { + name: "npm-module" + value: "2.0.0" + environment: 3 + source: "local-dev" + description: "Description of node module" + category: "Node package" + }, + { + name: "site-code-status" + value: "200" + environment: 3 + source: "curl" + description: "Health check of site" + category: "Performance" + keyFact: true + } + ] + } + ) { + id + } + + UIProject1Environment1addFactReference1: addFactReference( + input: { + fid: 2 + name: "lagoondemo.example.org" + } + ) { + id + } + + UIProject1Environment1addFactReference2: addFactReference( + input: { + fid: 2 + name: "cli" + } + ) { + id + } + + UIProject1Environment2: addOrUpdateEnvironment( + input: { + id: 4 + name: "staging" + project: 18 + deployType: BRANCH + deployBaseRef: "staging" + environmentType: DEVELOPMENT + openshiftProjectName: "lagoon-demo-staging" + } + ) { + id + } + UIProject1Environment2Update: updateEnvironment( + input: { + id: 4 + patch: { + route: "https://nginx.staging.lagoon-demo.ui-kubernetes.lagoon.sh" + routes: "https://nginx.staging.lagoon-demo.ui-kubernetes.lagoon.sh" + } + } + ) { + id + } + + UIProject1Environment3: addOrUpdateEnvironment( + input: { + id: 5 + name: "dev" + project: 18 + deployType: BRANCH + deployBaseRef: "dev" + environmentType: DEVELOPMENT + openshiftProjectName: "lagoon-demo-dev" + } + ) { + id + } + UIProject1Environment3Update: updateEnvironment( + input: { + id: 5 + patch: { + route: "" # intentionally empty, for testing + routes: "" # intentionally empty, for testing + } + } + ) { + id + } + + UIProject1Environment3addFacts: addFacts( + input: { + facts: [ + { + name: "lagoon-category" + value: "saas" + environment: 5 + source: "" + description: "Category of the site" + category: "Lagoon" + keyFact: true + }, + { + name: "drupal-core" + value: "9.0.1" + environment: 5 + source: "drush-pml" + description: "Drupal CMS version found on environment" + category: "Framework" + keyFact: true + }, + { + name: "php-version" + value: "8.0.3" + environment: 5 + source: "php-version" + description: "PHP version found on environment" + category: "Programming language" + keyFact: true + }, + { + name: "Lagoon" + value: "21.3.0" + environment: 5 + source: "env" + description: "Lagoon version" + category: "Platform" + keyFact: true + }, + { + name: "interesting-package" + value: "1.0.0" + environment: 5 + source: "local-dev" + description: "Description of interesting php package" + category: "Composer package" + }, + { + name: "npm-module" + value: "2.0.0" + environment: 5 + source: "local-dev" + description: "Description of node module" + category: "Node package" + }, + { + name: "site-code-status" + value: "403" + environment: 5 + source: "curl" + description: "Health check of site" + category: "Performance" + keyFact: true + } + ] + } + ) { + id + } + + UIProject1Environment4: addOrUpdateEnvironment( + input: { + id: 6 + name: "pr-175" + project: 18 + deployType: PULLREQUEST + deployBaseRef: "target" + deployHeadRef: "source" + deployTitle: "pr-175" + environmentType: DEVELOPMENT + openshiftProjectName: "lagoon-demo-pr-175" + } + ) { + id + } + UIProject1Environment4Update: updateEnvironment( + input: { + id: 6 + patch: { + route: "https://nginx.pr-175.lagoon-demo.ui-kubernetes.lagoon.sh" + routes: "https://nginx.pr-175.lagoon-demo.ui-kubernetes.lagoon.sh" + } + } + ) { + id + } + +} diff --git a/local-dev/k3d-seed-data/seed-users.sh b/local-dev/k3d-seed-data/seed-users.sh new file mode 100644 index 0000000000..16074cee8c --- /dev/null +++ b/local-dev/k3d-seed-data/seed-users.sh @@ -0,0 +1,52 @@ +function is_keycloak_running { + local http_code=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/auth/admin/realms) + if [[ $http_code -eq 401 ]]; then + return 0 + else + return 1 + fi +} + +function configure_user_passwords { + + LAGOON_DEMO_USERS=("guest@example.com" "reporter@example.com" "developer@example.com" "maintainer@example.com" "owner@example.com") + LAGOON_DEMO_ORG_USERS=("orguser@example.com" "orgviewer@example.com" "orgowner@example.com" "platformowner@example.com") + + for i in ${LAGOON_DEMO_USERS[@]} + do + echo Configuring password for $i + /opt/keycloak/bin/kcadm.sh set-password --config $CONFIG_PATH --username $i -p $i --target-realm Lagoon + done + + for i in ${LAGOON_DEMO_ORG_USERS[@]} + do + echo Configuring password for $i + /opt/keycloak/bin/kcadm.sh set-password --config $CONFIG_PATH --username $i -p $i --target-realm Lagoon + done +} + +function configure_platformowner { + echo Configuring platform owner role + /opt/keycloak/bin/kcadm.sh add-roles --uusername platformowner@example.com --rolename platform-owner --config $CONFIG_PATH --target-realm Lagoon +} + +function configure_keycloak { + until is_keycloak_running; do + echo Keycloak still not running, waiting 5 seconds + sleep 5 + done + + # Set the config file path because $HOME/.keycloak/kcadm.config resolves to /opt/jboss/?/.keycloak/kcadm.config for some reason, causing it to fail + CONFIG_PATH=/tmp/kcadm.config + + echo Keycloak is running, proceeding with configuration + + /opt/keycloak/bin/kcadm.sh config credentials --config $CONFIG_PATH --server http://localhost:8080/auth --user $KEYCLOAK_ADMIN_USER --password $KEYCLOAK_ADMIN_PASSWORD --realm master + + configure_user_passwords + configure_platformowner + + echo "Config of Keycloak users done" +} + +configure_keycloak \ No newline at end of file diff --git a/tests/files/drupal8-mariadb-single/.lagoon.yml b/tests/files/drupal8-mariadb-single/.lagoon.yml index 35154b460a..51cc4dc07d 100644 --- a/tests/files/drupal8-mariadb-single/.lagoon.yml +++ b/tests/files/drupal8-mariadb-single/.lagoon.yml @@ -1,7 +1,7 @@ docker-compose-yaml: docker-compose.yml -endpoint: lagoon-core-ssh-token.lagoon.svc.cluster.local:2223 -api: lagoon-core-api.lagoon.svc.cluster.local:80 +endpoint: lagoon-core-ssh-token.lagoon-core.svc.cluster.local:2223 +api: lagoon-core-api.lagoon-core.svc.cluster.local:80 environment_variables: git_sha: 'true' diff --git a/tests/files/drupal8-mariadb-single/drush/aliases.drushrc.php b/tests/files/drupal8-mariadb-single/drush/aliases.drushrc.php index a8555e4ff9..e71b467dbd 100644 --- a/tests/files/drupal8-mariadb-single/drush/aliases.drushrc.php +++ b/tests/files/drupal8-mariadb-single/drush/aliases.drushrc.php @@ -4,7 +4,7 @@ * Don't change anything here, it's magic! */ -$aliasUrl = "http://lagoon-core-drush-alias.lagoon.svc.cluster.local:8080/aliases.drushrc.php.stub"; +$aliasUrl = "http://lagoon-core-drush-alias.lagoon-core.svc.cluster.local:8080/aliases.drushrc.php.stub"; $aliasCheckTimeout = 5; //do a head check against the alias stub file, report on failure diff --git a/tests/files/drupal9-mariadb-single/.lagoon.yml b/tests/files/drupal9-mariadb-single/.lagoon.yml index d70f4f8b47..4d0db996d3 100644 --- a/tests/files/drupal9-mariadb-single/.lagoon.yml +++ b/tests/files/drupal9-mariadb-single/.lagoon.yml @@ -1,7 +1,7 @@ docker-compose-yaml: docker-compose.yml -ssh: lagoon-core-ssh-token.lagoon.svc.cluster.local:2223 -api: http://lagoon-core-api.lagoon.svc:80/graphql +ssh: lagoon-core-ssh-token.lagoon-core.svc.cluster.local:2223 +api: http://lagoon-core-api.lagoon-core.svc:80/graphql environment_variables: git_sha: 'true'