From a49b55fb408f840a8887074a41c5ea1b28ff141c Mon Sep 17 00:00:00 2001 From: Claire Nollet Date: Tue, 19 Sep 2023 18:48:38 +0200 Subject: [PATCH 01/31] ci: :wrench: add eslint and commitlint --- .eslintignore | 29 + .eslintrc.js | 15 + .github/workflows/cache.yml | 35 + .github/workflows/cd.yml | 15 + .github/workflows/ci.yml | 35 + .github/workflows/lint.yml | 53 + .github/workflows/release.yml | 40 + .gitignore | 5 +- .husky/commit-msg | 4 + .husky/pre-commit | 4 + .lintstagedrc | 3 + README.md | 199 ++-- commitlint.config.cjs | 5 + package.json | 18 + pnpm-lock.yaml | 2088 +++++++++++++++++++++++++++++++++ 15 files changed, 2474 insertions(+), 74 deletions(-) create mode 100644 .eslintignore create mode 100644 .eslintrc.js create mode 100644 .github/workflows/cache.yml create mode 100644 .github/workflows/cd.yml create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/lint.yml create mode 100644 .github/workflows/release.yml create mode 100755 .husky/commit-msg create mode 100755 .husky/pre-commit create mode 100644 .lintstagedrc create mode 100644 commitlint.config.cjs create mode 100644 package.json create mode 100644 pnpm-lock.yaml diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 00000000..15a414e8 --- /dev/null +++ b/.eslintignore @@ -0,0 +1,29 @@ +# code +.vscode/* + +# modules +node_modules +**/node_modules + +# package +package.json + +# doc +README.md + +# github actions +!.github + +# TODO : Fix later +# socle/roles/harbor/templates/ca-cm.yaml +# 8:1 error Parsing error: Plain value cannot start with directive indicator character % + +# socle/roles/keycloak/templates/console-frontend-redirectUris.yaml +# 2:0 error Parsing error: Unexpected flow-map-start at node end + +# socle/roles/keycloak/templates/console-frontend-webOrigins.yaml +# 2:0 error Parsing error: Unexpected flow-map-start at node end + +roles/harbor/templates/ca-cm.yaml +roles/keycloak/templates/console-frontend-redirectUris.yaml +roles/keycloak/templates/console-frontend-webOrigins.yaml \ No newline at end of file diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 00000000..74745aa4 --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,15 @@ +module.exports = { + extends: [ + "plugin:yml/standard", + ], + rules: { + "yml/no-empty-mapping-value": "off", + "yml/plain-scalar": "off", + }, + overrides: [ + { + files: ["*.yaml", "*.yml"], + parser: "yaml-eslint-parser", + }, + ], +} \ No newline at end of file diff --git a/.github/workflows/cache.yml b/.github/workflows/cache.yml new file mode 100644 index 00000000..75cf9fe8 --- /dev/null +++ b/.github/workflows/cache.yml @@ -0,0 +1,35 @@ +name: Clean cache + +on: + pull_request: + types: + - closed + workflow_dispatch: + +jobs: + cleanup: + runs-on: ubuntu-latest + steps: + - name: Check out code + uses: actions/checkout@v3 + + - name: Clean cache for closed branch + run: | + gh extension install actions/gh-actions-cache + + REPO=${{ github.repository }} + BRANCH="refs/pull/${{ github.event.pull_request.number }}/merge" + + echo "Fetching list of cache key" + cacheKeysForPR=$(gh actions-cache list -R $REPO -B $BRANCH -L 100 | cut -f 1 ) + + ## Setting this to not fail the workflow while deleting cache keys. + set +e + echo "Deleting caches..." + for cacheKey in $cacheKeysForPR + do + gh actions-cache delete $cacheKey -R $REPO -B $BRANCH --confirm + done + echo "Done" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml new file mode 100644 index 00000000..4b401b83 --- /dev/null +++ b/.github/workflows/cd.yml @@ -0,0 +1,15 @@ +name: CD + +on: + push: + branches: + - main + +env: + REGISTRY: ghcr.io + NAMESPACE: "${{ github.repository }}" + PLATFORM: "linux/amd64,linux/arm64" + +jobs: + release: + uses: ./.github/workflows/release.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..4dc48883 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,35 @@ +name: CI + +on: + pull_request: + types: + - opened + - reopened + - synchronize + - ready_for_review + branches: + - "**" + workflow_dispatch: + +env: + NODE_VERSION: 18.17.1 + PNPM_VERSION: "8" + +jobs: + expose-vars: + runs-on: ubuntu-latest + if: ${{ !github.event.pull_request.draft }} + outputs: + NODE_VERSION: ${{ env.NODE_VERSION }} + PNPM_VERSION: ${{ env.PNPM_VERSION }} + steps: + - name: Exposing env vars + run: echo "Exposing env vars" + + lint: + uses: ./.github/workflows/lint.yml + needs: + - expose-vars + with: + NODE_VERSION: ${{ needs.expose-vars.outputs.NODE_VERSION }} + PNPM_VERSION: ${{ needs.expose-vars.outputs.PNPM_VERSION }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..2a08fa8c --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,53 @@ +name: Lint + +on: + workflow_call: + inputs: + NODE_VERSION: + required: false + type: string + PNPM_VERSION: + required: false + type: string + +jobs: + lint: + name: Setup project + runs-on: ubuntu-latest + steps: + - name: Checks-out repository + uses: actions/checkout@v3 + + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: "${{ inputs.NODE_VERSION }}" + check-latest: true + + - name: Install pnpm + uses: pnpm/action-setup@v2 + id: pnpm-install + with: + version: "${{ inputs.PNPM_VERSION }}" + run_install: false + + - name: Get pnpm store directory + id: pnpm-cache + run: | + echo "STORE_PATH=$(pnpm store path)" >> $GITHUB_ENV + + - name: Setup pnpm cache + uses: actions/cache@v3 + with: + path: | + ${{ env.STORE_PATH }} + /home/runner/.cache/Cypress + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('**/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + run: pnpm install --frozen-lockfile + + - name: Check lint error + run: pnpm run lint diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000..5e5cef6e --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,40 @@ +name: Release + +on: + workflow_call: + outputs: + release-created: + description: Has the releease been created + value: ${{ jobs.release.outputs.release-created }} + major-tag: + description: Major version tag + value: ${{ jobs.release.outputs.major-tag }} + minor-tag: + description: Minor version tag + value: ${{ jobs.release.outputs.minor-tag }} + patch-tag: + description: Patch version tag + value: ${{ jobs.release.outputs.patch-tag }} + +jobs: + release: + name: Create new release + runs-on: ubuntu-latest + outputs: + release-created: ${{ steps.release.outputs.release_created }} + major-tag: ${{ steps.release.outputs.major }} + minor-tag: ${{ steps.release.outputs.minor }} + patch-tag: ${{ steps.release.outputs.patch }} + steps: + - name: Checks-out repository + uses: actions/checkout@v3 + + - name: Pre release new version + uses: google-github-actions/release-please-action@v3 + id: release + with: + package-name: socle + release-type: node + default-branch: main + group-pull-request-title-pattern: release v${version} + token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 02342c97..4fa7b705 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,7 @@ vars.yaml # compiled filter_plugins -**/__pycache__/* \ No newline at end of file +**/__pycache__/* + +# node modules +node_modules \ No newline at end of file diff --git a/.husky/commit-msg b/.husky/commit-msg new file mode 100755 index 00000000..c160a771 --- /dev/null +++ b/.husky/commit-msg @@ -0,0 +1,4 @@ +#!/usr/bin/env sh +. "$(dirname -- "$0")/_/husky.sh" + +npx --no -- commitlint --edit ${1} diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100755 index 00000000..36af2198 --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1,4 @@ +#!/bin/sh +. "$(dirname "$0")/_/husky.sh" + +npx lint-staged diff --git a/.lintstagedrc b/.lintstagedrc new file mode 100644 index 00000000..c929db74 --- /dev/null +++ b/.lintstagedrc @@ -0,0 +1,3 @@ +{ + "*.{yml, yaml}": "pnpm run format" +} \ No newline at end of file diff --git a/README.md b/README.md index 23980ec2..3716989c 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # Installation de la plateforme DSO ## Sommaire + - [Installation de la plateforme DSO](#installation-de-la-plateforme-dso) - [Sommaire](#sommaire) - [Introduction](#introduction) @@ -38,6 +39,9 @@ - [Gel de l'image](#gel-de-limage-2) - [Vault](#vault) - [Gel des images](#gel-des-images-1) + - [Les commandes de l'application](#les-commandes-de-lapplication) + - [Conventions](#conventions) + - [Contributions](#contributions) ## Introduction @@ -47,21 +51,22 @@ Les éléments déployés seront les suivants : | Outil | Site officiel | | --------------------------- | ---------------------------------------------------------------------------- | -| Argo CD | https://argo-cd.readthedocs.io | -| Cert-manager | https://cert-manager.io | -| Console Cloud π Native | https://github.com/cloud-pi-native/console | -| CloudNativePG | https://cloudnative-pg.io | -| GitLab | https://about.gitlab.com | -| GitLab Runner | https://docs.gitlab.com/runner | -| Harbor | https://goharbor.io | -| Keycloak | https://www.keycloak.org | -| Kubed | https://appscode.com/products/kubed | -| Sonatype Nexus Repository | https://www.sonatype.com/products/sonatype-nexus-repository | -| SonarQube Community Edition | https://www.sonarsource.com/open-source-editions/sonarqube-community-edition | -| SOPS | https://github.com/isindir/sops-secrets-operator | -| HashiCorp Vault | https://www.vaultproject.io | +| Argo CD | | +| Cert-manager | | +| Console Cloud π Native | | +| CloudNativePG | | +| GitLab | | +| GitLab Runner | | +| Harbor | | +| Keycloak | | +| Kubed | | +| Sonatype Nexus Repository | | +| SonarQube Community Edition | | +| SOPS | | +| HashiCorp Vault | | Certains outils peuvent prendre un peu de temps pour s'installer, par exemple Keycloak ou GitLab. + ## Prérequis Cette installation s'effectue dans un cluster OpenShift opérationnel et correctement démarré. @@ -71,6 +76,7 @@ Vous devrez disposer d'un **accès administrateur au cluster**. Vous aurez besoin d'une machine distincte du cluster, tournant sous GNU/Linux avec une distribution de la famille Debian ou Red Hat. Cette machine vous servira en tant qu'**environnement de déploiement** [Ansible control node](https://docs.ansible.com/ansible/latest/network/getting_started/basic_concepts.html#control-node). Elle nécessitera donc l'installation d'[Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html), et plus précisément du paquet **ansible**, pour disposer au moins de la commande `ansible-playbook` ainsi que de la collection [community.general](https://github.com/ansible-collections/community.general). Toujours sur votre environnement de déploiement, vous devrez : + - Clôner le présent [dépôt](https://github.com/cloud-pi-native/socle). - Disposer d'un fichier de configuration ```~/.kube/config``` paramétré avec les accès administrateur, pour l'appel à l'API du cluster (section users du fichier en question). @@ -87,6 +93,7 @@ Sinon vous devrez utiliser l'option `-K` (abréviation de l'option `--ask-become ```bash ansible-playbook -K admin-tools/install-requirements.yaml ``` + Pour information, le playbook `install-requirements.yaml` vous installera les éléments suivants **sur l'environnement de déploiement** : - Paquet requis pour l'installation des modules python : @@ -309,6 +316,7 @@ spec: ``` Les champs utilisables dans cette ressource de type **dsc** peuvent être décrits pour chaque outil à l'aide de la commande `kubectl explain`. Exemple avec argocd : + ``` kubectl explain dsc.spec.argocd ``` @@ -331,6 +339,7 @@ Voici les liens vers les documentations de chart helm pour les outils concernés ## Installation ### Lancement + Dès que votre [configuration](#configuration) est prête, c'est à dire que la ressource `dsc` par défaut `conf-dso` a bien été mise à jour, relancez la commande suivante : ```bash @@ -377,13 +386,14 @@ kubectl get dsc ma-dsc -o yaml Dès lors, il vous sera possible de déployer une nouvelle chaîne DSO dans ce cluster, en plus de celle existante. Pour cela, vous utiliserez l'[extra variable](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#defining-variables-at-runtime) prévue à cet effet, nommée `dsc_cr` (pour DSO Socle Config Custom Resource). -Par exemple, si votre nouvelle ressource `dsc` se nomme `ma-dsc`, alors vous lancerez l'installation correspondante comme ceci : +Par exemple, si votre nouvelle ressource `dsc` se nomme `ma-dsc`, alors vous lancerez l'installation correspondante comme ceci : ```bash ansible-playbook install.yaml -e dsc_cr=ma-dsc ``` ## Récupération des secrets + Au moment de leur initialisation, certains outils stockent des secrets qui ne sont en principe plus disponibles ultérieurement. **Attention !** Pour garantir l'[idempotence](https://fr.wikipedia.org/wiki/Idempotence), ces secrets sont stockés dans plusieurs ressources du cluster. Supprimer ces ressources **indique à ansible qu'il doit réinitialiser les composants**. @@ -417,7 +427,9 @@ ansible-playbook admin-tools/get-credentials.yaml -e dsc_cr=ma-conf -t keycloak, **Remarque importante** : Il est **vivement encouragé** de conserver les valeurs qui vous sont fournies par le playbook « get-credentials.yaml ». Par exemple dans un fichier de base de données chiffré de type KeePass ou Bitwarden. Il est toutefois important de **ne pas les modifier ou les supprimer** sous peine de voir certains composants, par exemple Vault, être réinitialisés. ## Debug + ### Réinstallation + Si vous rencontrez des problèmes lors de l'éxécution du playbook, vous voudrez certainement relancer l'installation d'un ou plusieurs composants plutôt que d'avoir à tout réinstaller. Pour cela, vous pouvez utiliser les tags associés aux rôles dans le fichier « install.yaml ». @@ -439,6 +451,7 @@ ansible-playbook install.yaml -e dsc_cr=ma-dsc -t keycloak,console La BDD PostgreSQL du composant Keycloak est installée à l'aide de l'opérateur communautaire [CloudNativePG](https://cloudnative-pg.io/), via le role "cloudnativepg". Le playbook d'installation, en s'appuyant sur le role en question, s'assurera préalablement que cet opérateur n'est pas déjà installé dans le cluster. Il vérifiera pour cela la présence de deux éléments : + - L'API "postgresql.cnpg.io/v1". - La "MutatingWebhookConfiguration" nommée "cnpg-mutating-webhook-configuration". @@ -475,6 +488,7 @@ watch "kubectl get ns | grep 'mynamespace-'" ``` **Remarques importantes** : + - Par défaut le playbook de désinstallation, s'il est lancé sans aucun tag, ne supprimera pas les ressources suivantes : - **Kubed** déployé dans le namespace `openshift-infra`. - **Cert-manager** déployé dans le namespace `cert-manager`. @@ -515,8 +529,9 @@ Ceci est géré par divers paramètres que vous pourrez spécifier dans la resso Les sections suivantes détaillent comment procéder, outil par outil. **Remarques importantes** : - * Comme vu dans la section d'installation (sous-section [Déploiement de plusieurs forges DSO dans un même cluster](#déploiement-de-plusieurs-forges-dso-dans-un-même-cluster )), si vous utilisez votre propre ressource `dsc` de configuration, distincte de `conf-dso`, alors toutes les commandes `ansible-playbook` indiquées ci-dessous devront être complétées par l'[extra variable](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#defining-variables-at-runtime) `dsc_cr` appropriée. - * Pour le gel des versions d'images, il est recommandé, si possible, de positionner un **tag d'image en adéquation avec la version du chart Helm utilisé**, c'est à dire d'utiliser le numéro "APP VERSION" retourné par la commande `helm search repo`. + +- Comme vu dans la section d'installation (sous-section [Déploiement de plusieurs forges DSO dans un même cluster](#déploiement-de-plusieurs-forges-dso-dans-un-même-cluster )), si vous utilisez votre propre ressource `dsc` de configuration, distincte de `conf-dso`, alors toutes les commandes `ansible-playbook` indiquées ci-dessous devront être complétées par l'[extra variable](https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_variables.html#defining-variables-at-runtime) `dsc_cr` appropriée. +- Pour le gel des versions d'images, il est recommandé, si possible, de positionner un **tag d'image en adéquation avec la version du chart Helm utilisé**, c'est à dire d'utiliser le numéro "APP VERSION" retourné par la commande `helm search repo`. ### Argo CD @@ -524,7 +539,7 @@ Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par d Ceci est lié au fait que le paramètre de configuration `chartVersion` d'Argo CD, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : ```bash helm search repo argo-cd @@ -551,7 +566,7 @@ helm search repo argo-cd Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. -Pour connaître la liste des versions de charts helm d'Argo CD que vous pouvez maintenant installer, utilisez la commande suivante : +Pour connaître la liste des versions de charts helm d'Argo CD que vous pouvez maintenant installer, utilisez la commande suivante : ```bash helm search repo -l argo-cd @@ -582,11 +597,12 @@ Puis de relancer l'installation d'Argo CD, laquelle mettra à jour la version du ```bash ansible-playbook install.yaml -t argocd ``` + #### Gel de l'image -En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer la version d'image d'Argo CD de façon plus fine, en utilisant un tag dit "[immutable](https://docs.bitnami.com/kubernetes/infrastructure/argo-cd/configuration/understand-rolling-immutable-tags)" (**recommandé en production**). +En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer la version d'image d'Argo CD de façon plus fine, en utilisant un tag dit "[immutable](https://docs.bitnami.com/kubernetes/infrastructure/argo-cd/configuration/understand-rolling-immutable-tags)" (**recommandé en production**). -Les différents tags utilisables pour l'image d'Argo CD sont disponibles ici : https://hub.docker.com/r/bitnami/argo-cd/tags +Les différents tags utilisables pour l'image d'Argo CD sont disponibles ici : Les tags dits "immutables" sont ceux qui possèdent un suffixe de type rXX, lequel correspond au numéro de révision. Ils pointent toujours vers la même image. Par exemple le tag "2.7.6-debian-11-r2" est un tag immutable. @@ -620,16 +636,17 @@ Puis relancer l'installation avec le tag `argocd` pour procéder au remplacement ansible-playbook install.yaml -t argocd ``` -Pour mémoire, les values utilisables sont disponibles ici : https://github.com/bitnami/charts/blob/main/bitnami/argo-cd/values.yaml +Pour mémoire, les values utilisables sont disponibles ici : + +Les releases d'Argo CD et leurs changelogs se trouvent ici : -Les releases d'Argo CD et leurs changelogs se trouvent ici : https://github.com/argoproj/argo-cd/releases ### Cert-manager **Attention !** Cert-manager est déployé dans le namespace "cert-manager", **commun à toutes les instances de la chaîne DSO**. Si vous modifiez sa version, ceci affectera toutes les instances DSO installées dans un même cluster. Ce n'est pas forcément génant, car un retour arrière sur la version est toujours possible, mais l'impact est à évaluer si votre cluster héberge un environnement de production. Le composant cert-manager est déployé directement via son manifest, téléchargé sur GitHub. -La liste des versions ("releases") est disponible ici : https://github.com/cert-manager/cert-manager/releases +La liste des versions ("releases") est disponible ici : Si vous utilisez la `dsc` par défaut nommée `conf-dso` c'est la release "v1.11.0" qui sera déployée. @@ -639,11 +656,13 @@ Pour déployer une autre version, il suffira d'éditer cette même `dsc`, de pr certmanager: version: v1.11.1 ``` + Il vous faudra ensuite appliquer le changement de configuration en utisant votre fichier de définition, exemple : ```bash kubectl apply -f ma-conf-dso.yaml ``` + Puis relancer l'installation de cert-manager, laquelle procédera à la mise à jour de version sans coupure de service : ```bash @@ -658,7 +677,7 @@ Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par d Ceci est lié au fait que le paramètre de configuration `chartVersion` de CloudNativePG, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : ```bash helm search repo cloudnative-pg @@ -685,7 +704,7 @@ helm search repo cloudnative-pg Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. -Pour connaître la liste des versions de charts helm de CloudNativePG que vous pouvez maintenant installer, utilisez la commande suivante : +Pour connaître la liste des versions de charts helm de CloudNativePG que vous pouvez maintenant installer, utilisez la commande suivante : ```bash helm search repo -l cloudnative-pg @@ -712,6 +731,7 @@ Puis de relancer l'installation de CloudNativePG, laquelle mettra à jour la ver ```bash ansible-playbook install.yaml -t cloudnativepg ``` + #### Gel de l'image Il existe une correspondance biunivoque entre la version de chart utilisée et la version d'application ("APP VERSION") de l'opérateur. @@ -730,7 +750,7 @@ Le gel d'image de conteneur PostgreSQL est géré par l'installation du socle DS Le composant console est déployé directement via son manifest, téléchargé sur GitHub. -La liste des versions ("releases") est disponible ici : https://github.com/cloud-pi-native/console/releases +La liste des versions ("releases") est disponible ici : Si vous utilisez la `dsc` par défaut nommée `conf-dso` c'est la release "v4.1.0" qui sera déployée. @@ -749,11 +769,13 @@ Puis appliquer le changement de configuration, exemple : ```bash kubectl apply -f ma-conf-dso.yaml ``` + Et relancer l'installation de la console, laquelle procédera à la mise à jour de version sans coupure de service : ```bash ansible-playbook install.yaml -t console ``` + ### GitLab Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par défaut sans modification, le rôle gitlab déploiera la dernière version **stable** de l'[opérateur GitLab](https://operatorhub.io/operator/gitlab-operator-kubernetes). @@ -775,12 +797,12 @@ Via cet opérateur, le rôle tentera de déployer par défaut la version 6.11.10 La version de GitLab installée est donc déjà figée via la version du chart utilisée, car il existe une correspondance biunivoque entre les deux. Les correspondances entre versions du chart et versions de GitLab sont fournies ici : -https://docs.gitlab.com/charts/installation/version_mappings.html + L'opérateur sera en capacité de proposer différentes versions du chart à l'installation. Pour connaître les versions de chart **utilisables**, il sera possible de se référer à la page suivante, exemple avec la branche 0.21 stable de l'opérateur : -https://gitlab.com/gitlab-org/cloud-native/gitlab-operator/-/blob/0-21-stable/CHART_VERSIONS + Ces versions de charts proposées par l'opérateur évolueront dans le temps, afin de tenir compte notamment des mises à jour de sécurité. @@ -798,7 +820,7 @@ Dans l'exemple ci-dessus, nous avons tenté une installation de GitLab avec la v Il nous faudra donc spécifier une version valide, en l'occurence 6.11.10 si nous voulons rester sur la branche 15.11 de GitLab au moment de l'installation, ou bien l'une des deux autres version supérieures proposées. Rappel : les correspondances entre versions du chart et versions de GitLab sont fournies ici : -https://docs.gitlab.com/charts/installation/version_mappings.html + Si vous souhaitez changer la version du chart helm utilisé, il vous suffira de relever le **numéro de version du chart** désiré **parmi ceux supportés par l'opérateur**, puis l'indiquer dans votre ressource `dsc` de configuration. @@ -830,7 +852,7 @@ Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par d Ceci est lié au fait que le paramètre de configuration `chartVersion` de Harbor, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : ```bash helm search repo harbor/harbor @@ -857,7 +879,7 @@ helm search repo harbor/harbor Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. -Pour connaître la liste des versions de charts helm Harbor que vous pouvez maintenant installer, utilisez la commande suivante : +Pour connaître la liste des versions de charts helm Harbor que vous pouvez maintenant installer, utilisez la commande suivante : ```bash helm search repo -l harbor/harbor @@ -880,11 +902,13 @@ Il vous suffit alors de mettre à jour votre configuration, exemple : ```bash kubectl apply -f ma-conf-dso.yaml ``` + **Remarques importantes** : -* Il est fortement recommnandé de **sauvegarder votre base de données** avant de poursuivre, sauf s'il s'agit d'une première installation de Harbor, ou d'une [suppression complète](#un-ou-plusieurs-outils) suivie d'une réinstallation sans persistance des données. -* S'il s'agit d'un **upgrade** de version sans désinstallation préalable, il est également plutôt recommandé de réaliser cet upgrade **vers une version directement supérieure** et ainsi de suite, jusqu'à parvenir à la version désirée. Par exemple de "1.12.0" vers "1.12.1" puis vers "1.12.2". -* Le **downgrade** par mise à jour de la version du chart est source de problèmes. Il est susceptible de mal se passer et n'est donc pas recommandé. Mieux vaut désinstaller Harbor (cf. [désinstallation](#un-ou-plusieurs-outils)), puis procéder à sa réinstallation en spécifiant le numéro de version du chart souhaité, puis en important vos données sauvegardées. -* Fixer le numéro de version du chart Helm sera normalement suffisant pour fixer aussi le numéro de version des images associées. Le numéro de version de ces images sera celui visible dans la colonne "APP VERSION" de la commande `helm search repo -l harbor/harbor`. + +- Il est fortement recommnandé de **sauvegarder votre base de données** avant de poursuivre, sauf s'il s'agit d'une première installation de Harbor, ou d'une [suppression complète](#un-ou-plusieurs-outils) suivie d'une réinstallation sans persistance des données. +- S'il s'agit d'un **upgrade** de version sans désinstallation préalable, il est également plutôt recommandé de réaliser cet upgrade **vers une version directement supérieure** et ainsi de suite, jusqu'à parvenir à la version désirée. Par exemple de "1.12.0" vers "1.12.1" puis vers "1.12.2". +- Le **downgrade** par mise à jour de la version du chart est source de problèmes. Il est susceptible de mal se passer et n'est donc pas recommandé. Mieux vaut désinstaller Harbor (cf. [désinstallation](#un-ou-plusieurs-outils)), puis procéder à sa réinstallation en spécifiant le numéro de version du chart souhaité, puis en important vos données sauvegardées. +- Fixer le numéro de version du chart Helm sera normalement suffisant pour fixer aussi le numéro de version des images associées. Le numéro de version de ces images sera celui visible dans la colonne "APP VERSION" de la commande `helm search repo -l harbor/harbor`. Si vous avez bien pris connaissance des avertissements ci-dessus, vous pouvez maintenant relancer l'installation de Harbor, laquelle mettra à jour la version du chart et de l'application **avec coupure de service** : @@ -898,23 +922,24 @@ Pour fixer les versions d'images, voir ci-dessous. #### Gel des images -En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer les versions d'images de Harbor de façon plus fine (**recommandé en production**). +En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer les versions d'images de Harbor de façon plus fine (**recommandé en production**). Il sera ainsi possible de fixer l'image de chacun des composants. Les différents tags utilisables sont disponibles ici : -* nginx : https://hub.docker.com/r/goharbor/nginx-photon/tags -* portal : https://hub.docker.com/r/goharbor/harbor-portal/tags -* core : https://hub.docker.com/r/goharbor/harbor-core/tags -* jobservice : https://hub.docker.com/r/goharbor/harbor-jobservice/tags -* registry (registry) : https://hub.docker.com/r/goharbor/registry-photon/tags -* registry (controller) : https://hub.docker.com/r/goharbor/harbor-registryctl/tags -* trivy : https://hub.docker.com/r/goharbor/trivy-adapter-photon/tags -* notary (server) : https://hub.docker.com/r/goharbor/notary-server-photon/tags -* notary (signer) : https://hub.docker.com/r/goharbor/notary-signer-photon/tags -* database : https://hub.docker.com/r/goharbor/harbor-db/tags -* redis : https://hub.docker.com/r/goharbor/redis-photon/tags -* exporter : https://hub.docker.com/r/goharbor/harbor-exporter/tags + +- nginx : +- portal : +- core : +- jobservice : +- registry (registry) : +- registry (controller) : +- trivy : +- notary (server) : +- notary (signer) : +- database : +- redis : +- exporter : **Rappel** : Il est néanmoins recommandé, si possible, de positionner des tags d'images en adéquation avec la version du chart Helm utilisé, c'est à dire d'utiliser le numéro "APP VERSION" retourné par la commande `helm search repo -l harbor/harbor` vue précédemment. @@ -985,7 +1010,7 @@ Pour spécifier nos tags, il nous suffira d'éditer la ressource `dsc` de config tag: v2.8.2 ``` -Pour mémoire, les values utilisables sont disponibles et documentées ici : https://github.com/goharbor/harbor-helm/tree/master +Pour mémoire, les values utilisables sont disponibles et documentées ici : Lorsque vos values sont à jour avec les versions désirées, appliquez le changement en utilisant votre fichier de définition, exemple : @@ -1013,7 +1038,7 @@ Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par d Ceci est lié au fait que le paramètre de configuration `chartVersion` de Keycloak, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : ```bash helm search repo bitnami/keycloak @@ -1040,7 +1065,7 @@ helm search repo bitnami/keycloak Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. -Pour connaître la liste des versions de charts helm de Keycloak que vous pouvez maintenant installer, utilisez la commande suivante : +Pour connaître la liste des versions de charts helm de Keycloak que vous pouvez maintenant installer, utilisez la commande suivante : ```bash helm search repo -l bitnami/keycloak @@ -1068,11 +1093,12 @@ Puis de relancer l'installation de Keycloak, laquelle mettra à jour la version ```bash ansible-playbook install.yaml -t keycloak ``` + #### Gel de l'image Keycloak En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer la version d'image de Keycloak de façon plus fine, en utilisant un tag dit "[immutable](https://docs.bitnami.com/kubernetes/apps/keycloak/configuration/understand-rolling-immutable-tags/)" (**recommandé en production**). -Les différents tags utilisables pour l'image de Keycloak sont disponibles ici : https://hub.docker.com/r/bitnami/keycloak/tags +Les différents tags utilisables pour l'image de Keycloak sont disponibles ici : Les tags dits "immutables" sont ceux qui possèdent un suffixe de type rXX, lequel correspond au numéro de révision. Ils pointent toujours vers la même image. Par exemple le tag "19.0.3-debian-11-r22" est un tag immutable. @@ -1102,9 +1128,10 @@ Puis relancer l'installation avec le tag `keycloak` pour procéder au remplaceme ansible-playbook install.yaml -t keycloak ``` -Pour mémoire, les values utilisables sont disponibles ici : https://github.com/bitnami/charts/blob/main/bitnami/keycloak/values.yaml +Pour mémoire, les values utilisables sont disponibles ici : + +Les release notes de Keycloak se trouvent ici : -Les release notes de Keycloak se trouvent ici : https://github.com/keycloak/keycloak/releases #### Gel de l'image PostgreSQL pour Keycloak Tel qu'il est déployé, Keycloak s'appuie sur un cluster de base de donnée PostgreSQL géré par l'opérateur CloudNativePG. @@ -1117,7 +1144,7 @@ Il est toutefois possible et **recommandé en production** de fixer la version d Pour cela, nous utiliserons l'un des tags d'image immutables proposés par CloudNativePG. -Les tags en question sont disponibles ici : https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql +Les tags en question sont disponibles ici : Pour spécifier un tel tag, il nous suffira d'éditer la ressource `dsc` de configuration (par défaut ce sera la `dsc` nommée `conf-dso`) et d'indiquer le tag souhaité au niveau du paramètre `postgreSQLimageName`. Exemple : @@ -1152,11 +1179,11 @@ ansible-playbook install.yaml -t keycloak **Attention !** Kubed est déployé dans le namespace "openshift-infra", **commun à toutes les instances de la chaîne DSO**. Si vous modifiez sa version, ceci affectera toutes les instances DSO installées dans un même cluster. Ce n'est pas forcément génant, car un retour arrière sur la version est toujours possible, mais l'impact est à évaluer si votre cluster héberge un environnement de production. -Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par défaut sans modification, le rôle confSyncer qui sert à installer Kubed déploie par défaut la dernière version du [chart helm ](https://github.com/appscode/charts/tree/master/stable/kubed) disponible dans le cache des dépôts helm de l'utilisateur. +Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par défaut sans modification, le rôle confSyncer qui sert à installer Kubed déploie par défaut la dernière version du [chart helm](https://github.com/appscode/charts/tree/master/stable/kubed) disponible dans le cache des dépôts helm de l'utilisateur. Ceci est lié au fait que le paramètre de configuration `chartVersion` de Kubed, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : ```bash helm search repo kubed @@ -1183,7 +1210,7 @@ helm search repo kubed Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. -Pour connaître la liste des versions de charts helm de Kubed que vous pouvez maintenant installer, utilisez la commande suivante : +Pour connaître la liste des versions de charts helm de Kubed que vous pouvez maintenant installer, utilisez la commande suivante : ```bash helm search repo -l kubed @@ -1209,6 +1236,7 @@ Puis de relancer l'installation de Kubed, laquelle mettra à jour la version du ```bash ansible-playbook install.yaml -t kubed ``` + **Remarque importante** : Le numéro de version du chart Helm est corrélé à celui de l'image utilisée pour l'application, de sorte que fixer ce numéro de version fixe aussi celui de l'image. ### Sonatype Nexus Repository @@ -1217,7 +1245,7 @@ Le composant nexus est installé directement via le manifest de deployment "nexu Si vous utilisez la `dsc` par défaut nommée `conf-dso` c'est l'image "3.56.0" qui sera déployée. -Les tags d'images utilisables sont disponibles ici : https://hub.docker.com/r/sonatype/nexus3/tags +Les tags d'images utilisables sont disponibles ici : Pour déployer une autre version, il suffira d'éditer la `dsc`, de préférence avec le fichier YAML que vous avez initialement utilisé pendant l'installation, puis modifier la section suivante en y indiquant la version d'image désirée au niveau du paramètre **imageTag**. Exemple : @@ -1234,6 +1262,7 @@ Puis appliquer le changement de configuration, exemple : ```bash kubectl apply -f ma-conf-dso.yaml ``` + Et relancer l'installation de nexus, laquelle procédera à la mise à jour de version, **avec coupure de service** : ```bash @@ -1246,7 +1275,7 @@ Le composant sonarqube est installé directement via le manifest de deployment " Si vous utilisez la `dsc` par défaut nommée `conf-dso` c'est l'image "9.9-community" qui sera déployée. -Les tags d'images utilisables pour l'édition community sont disponibles ici : https://hub.docker.com/_/sonarqube/tags?name=community +Les tags d'images utilisables pour l'édition community sont disponibles ici : Pour déployer une autre version, il suffira d'éditer la `dsc`, de préférence avec le fichier YAML que vous avez initialement utilisé pendant l'installation, puis modifier la section suivante en y indiquant la version d'image désirée au niveau du paramètre **imageTag**. Exemple : @@ -1262,6 +1291,7 @@ Puis appliquer le changement de configuration, exemple : ```bash kubectl apply -f ma-conf-dso.yaml ``` + Et relancer l'installation de sonarqube, laquelle procédera à la mise à jour de version **avec coupure de service** : ```bash @@ -1274,7 +1304,7 @@ Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par d Ceci est lié au fait que le paramètre de configuration `chartVersion` de SOPS, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : ```bash helm search repo sops/sops-secrets-operator @@ -1301,7 +1331,7 @@ helm search repo sops/sops-secrets-operator Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. -Pour connaître la liste des versions de charts helm de SOPS que vous pouvez maintenant installer, utilisez la commande suivante : +Pour connaître la liste des versions de charts helm de SOPS que vous pouvez maintenant installer, utilisez la commande suivante : ```bash helm search repo -l sops/sops-secrets-operator @@ -1333,7 +1363,7 @@ Pour fixer la version d'image, voir ci-dessous. #### Gel de l'image -En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer la version d'image de SOPS de façon plus fine (**recommandé en production**). +En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer la version d'image de SOPS de façon plus fine (**recommandé en production**). Pour spécifier cette version d'image, il nous suffira d'éditer la ressource `dsc` de configuration (par défaut ce sera la `dsc` nommée `conf-dso`) et de surcharger les "values" correspondantes du chart helm, en ajoutant celles dont nous avons besoin. Exemple : @@ -1346,9 +1376,9 @@ Pour spécifier cette version d'image, il nous suffira d'éditer la ressource `d tag: 0.9.1 ``` -Pour mémoire, les values utilisables sont disponibles et documentées ici : https://github.com/isindir/sops-secrets-operator/tree/master/chart/helm3/sops-secrets-operator +Pour mémoire, les values utilisables sont disponibles et documentées ici : -Les numéros de version de chart Helm et d'image se trouvent ici : https://github.com/isindir/sops-secrets-operator/blob/master/README.md#versioning +Les numéros de version de chart Helm et d'image se trouvent ici : S'agissant de l'image, ces numéros correspondent à la colonne "Operator". @@ -1365,6 +1395,7 @@ Lorsque vos values ont été actualisées, avec la version d'image désirée, ap ```bash kubectl apply -f ma-conf-dso.yaml ``` + Puis relancez l'installation avec le tag `sops` pour procéder à la mise à jour et au gel de l'image : ```bash @@ -1379,7 +1410,7 @@ Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par d Ceci est lié au fait que le paramètre de configuration `chartVersion` de Vault, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : ```bash helm search repo hashicorp/vault @@ -1406,7 +1437,7 @@ helm search repo hashicorp/vault Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. -Pour connaître la liste des versions de charts helm de Vault que vous pouvez maintenant installer, utilisez la commande suivante : +Pour connaître la liste des versions de charts helm de Vault que vous pouvez maintenant installer, utilisez la commande suivante : ```bash helm search repo -l hashicorp/vault @@ -1441,15 +1472,17 @@ Pour fixer les versions d'images, voir ci-dessous. #### Gel des images -En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer les versions d'images de Vault de façon plus fine (**recommandé en production**). +En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer les versions d'images de Vault de façon plus fine (**recommandé en production**). Il sera ainsi possible de fixer l'image : -* du Vault Agent Sidecar Injector (via le repository hashicorp/vault-k8s), -* du Vault Agent (via le repository hashicorp/vault). + +- du Vault Agent Sidecar Injector (via le repository hashicorp/vault-k8s), +- du Vault Agent (via le repository hashicorp/vault). Les différents tags d'images utilisables sont disponibles ici : -* Pour le Vault Agent Sidecar Injector : https://hub.docker.com/r/hashicorp/vault-k8s/tags -* Pour le Vault Agent : https://hub.docker.com/r/hashicorp/vault/tags + +- Pour le Vault Agent Sidecar Injector : +- Pour le Vault Agent : Pour spécifier nos tags, il nous suffira d'éditer la ressource `dsc` de configuration (par défaut ce sera la `dsc` nommée `conf-dso`) et de surcharger les "values" correspondantes du chart helm, en ajoutant celles dont nous avons besoin. Exemple : @@ -1477,7 +1510,7 @@ Pour spécifier nos tags, il nous suffira d'éditer la ressource `dsc` de config **Remarque importante** : Dans la section `server` de vos values, le paramètre `updateStrategyType` doit impérativement être présent et positionné sur "RollingUpdate" pour que l'image du serveur Vault puisse se mettre à jour avec le tag que vous avez indiqué. -Pour mémoire, les values utilisables sont disponibles et documentées ici : https://developer.hashicorp.com/vault/docs/platform/k8s/helm/configuration +Pour mémoire, les values utilisables sont disponibles et documentées ici : Lorsque vos values sont à jour avec les versions désirées, appliquez le changement en utilisant votre fichier de définition, exemple : @@ -1508,3 +1541,23 @@ ansible-playbook install.yaml -t vault ``` Puis revérifiez l'état du vault-0 qui devrait maintenant être déployé comme attendu. + +### Les commandes de l'application + +```shell +# Lancer la vérification syntaxique +pnpm install && pnpm run lint + +# Lancer le formattage du code +pnpm install && pnpm run format +``` + +## Conventions + +Cf. [Conventions - MIOM Fabrique Numérique](https://projets-ts-fabnum.netlify.app/conventions/nommage.html). + +## Contributions + +Les commits doivent suivre la spécification des [Commits Conventionnels](https://www.conventionalcommits.org/en/v1.0.0/), il est possible d'ajouter l'[extension VSCode](https://github.com/vivaxy/vscode-conventional-commits) pour faciliter la création des commits. + +Une PR doit être faite avec une branche à jour avec la branche `develop` en rebase (et sans merge) avant demande de fusion, et la fusion doit être demandée dans `develop`. diff --git a/commitlint.config.cjs b/commitlint.config.cjs new file mode 100644 index 00000000..e4a28e09 --- /dev/null +++ b/commitlint.config.cjs @@ -0,0 +1,5 @@ +module.exports = { + extends: [ + '@commitlint/config-conventional' + ] +} diff --git a/package.json b/package.json new file mode 100644 index 00000000..0d74bd20 --- /dev/null +++ b/package.json @@ -0,0 +1,18 @@ +{ + "name": "dso-socle", + "version": "1.0.0", + "private": true, + "scripts": { + "format": "eslint ./ --ext .js,.yaml,.yml --fix", + "lint": "eslint ./ --ext .js,.yaml,.yml" + }, + "devDependencies": { + "@commitlint/cli": "^17.7.1", + "@commitlint/config-conventional": "^17.7.0", + "husky": "^8.0.3", + "lint-staged": "^14.0.1", + "eslint": "^8.49.0", + "eslint-plugin-yml": "^1.9.0", + "yaml-eslint-parser": "^1.2.2" + } +} \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml new file mode 100644 index 00000000..58aa4f2a --- /dev/null +++ b/pnpm-lock.yaml @@ -0,0 +1,2088 @@ +lockfileVersion: "6.0" + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +devDependencies: + "@commitlint/cli": + specifier: ^17.7.1 + version: 17.7.1 + "@commitlint/config-conventional": + specifier: ^17.7.0 + version: 17.7.0 + eslint: + specifier: ^8.49.0 + version: 8.49.0 + eslint-plugin-yml: + specifier: ^1.9.0 + version: 1.9.0(eslint@8.49.0) + husky: + specifier: ^8.0.3 + version: 8.0.3 + lint-staged: + specifier: ^14.0.1 + version: 14.0.1 + yaml-eslint-parser: + specifier: ^1.2.2 + version: 1.2.2 + +packages: + + /@aashutoshrathi/word-wrap@1.2.6: + resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==} + engines: {node: ">=0.10.0"} + dev: true + + /@babel/code-frame@7.22.13: + resolution: {integrity: sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==} + engines: {node: ">=6.9.0"} + dependencies: + "@babel/highlight": 7.22.20 + chalk: 2.4.2 + dev: true + + /@babel/helper-validator-identifier@7.22.20: + resolution: {integrity: sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A==} + engines: {node: ">=6.9.0"} + dev: true + + /@babel/highlight@7.22.20: + resolution: {integrity: sha512-dkdMCN3py0+ksCgYmGG8jKeGA/8Tk+gJwSYYlFGxG5lmhfKNoAy004YpLxpS1W2J8m/EK2Ew+yOs9pVRwO89mg==} + engines: {node: ">=6.9.0"} + dependencies: + "@babel/helper-validator-identifier": 7.22.20 + chalk: 2.4.2 + js-tokens: 4.0.0 + dev: true + + /@commitlint/cli@17.7.1: + resolution: {integrity: sha512-BCm/AT06SNCQtvFv921iNhudOHuY16LswT0R3OeolVGLk8oP+Rk9TfQfgjH7QPMjhvp76bNqGFEcpKojxUNW1g==} + engines: {node: ">=v14"} + hasBin: true + dependencies: + "@commitlint/format": 17.4.4 + "@commitlint/lint": 17.7.0 + "@commitlint/load": 17.7.1 + "@commitlint/read": 17.5.1 + "@commitlint/types": 17.4.4 + execa: 5.1.1 + lodash.isfunction: 3.0.9 + resolve-from: 5.0.0 + resolve-global: 1.0.0 + yargs: 17.7.2 + transitivePeerDependencies: + - "@swc/core" + - "@swc/wasm" + dev: true + + /@commitlint/config-conventional@17.7.0: + resolution: {integrity: sha512-iicqh2o6et+9kWaqsQiEYZzfLbtoWv9uZl8kbI8EGfnc0HeGafQBF7AJ0ylN9D/2kj6txltsdyQs8+2fTMwWEw==} + engines: {node: ">=v14"} + dependencies: + conventional-changelog-conventionalcommits: 6.1.0 + dev: true + + /@commitlint/config-validator@17.6.7: + resolution: {integrity: sha512-vJSncmnzwMvpr3lIcm0I8YVVDJTzyjy7NZAeXbTXy+MPUdAr9pKyyg7Tx/ebOQ9kqzE6O9WT6jg2164br5UdsQ==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/types": 17.4.4 + ajv: 8.12.0 + dev: true + + /@commitlint/ensure@17.6.7: + resolution: {integrity: sha512-mfDJOd1/O/eIb/h4qwXzUxkmskXDL9vNPnZ4AKYKiZALz4vHzwMxBSYtyL2mUIDeU9DRSpEUins8SeKtFkYHSw==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/types": 17.4.4 + lodash.camelcase: 4.3.0 + lodash.kebabcase: 4.1.1 + lodash.snakecase: 4.1.1 + lodash.startcase: 4.4.0 + lodash.upperfirst: 4.3.1 + dev: true + + /@commitlint/execute-rule@17.4.0: + resolution: {integrity: sha512-LIgYXuCSO5Gvtc0t9bebAMSwd68ewzmqLypqI2Kke1rqOqqDbMpYcYfoPfFlv9eyLIh4jocHWwCK5FS7z9icUA==} + engines: {node: ">=v14"} + dev: true + + /@commitlint/format@17.4.4: + resolution: {integrity: sha512-+IS7vpC4Gd/x+uyQPTAt3hXs5NxnkqAZ3aqrHd5Bx/R9skyCAWusNlNbw3InDbAK6j166D9asQM8fnmYIa+CXQ==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/types": 17.4.4 + chalk: 4.1.2 + dev: true + + /@commitlint/is-ignored@17.7.0: + resolution: {integrity: sha512-043rA7m45tyEfW7Zv2vZHF++176MLHH9h70fnPoYlB1slKBeKl8BwNIlnPg4xBdRBVNPaCqvXxWswx2GR4c9Hw==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/types": 17.4.4 + semver: 7.5.4 + dev: true + + /@commitlint/lint@17.7.0: + resolution: {integrity: sha512-TCQihm7/uszA5z1Ux1vw+Nf3yHTgicus/+9HiUQk+kRSQawByxZNESeQoX9ujfVd3r4Sa+3fn0JQAguG4xvvbA==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/is-ignored": 17.7.0 + "@commitlint/parse": 17.7.0 + "@commitlint/rules": 17.7.0 + "@commitlint/types": 17.4.4 + dev: true + + /@commitlint/load@17.7.1: + resolution: {integrity: sha512-S/QSOjE1ztdogYj61p6n3UbkUvweR17FQ0zDbNtoTLc+Hz7vvfS7ehoTMQ27hPSjVBpp7SzEcOQu081RLjKHJQ==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/config-validator": 17.6.7 + "@commitlint/execute-rule": 17.4.0 + "@commitlint/resolve-extends": 17.6.7 + "@commitlint/types": 17.4.4 + "@types/node": 20.4.7 + chalk: 4.1.2 + cosmiconfig: 8.3.6(typescript@5.2.2) + cosmiconfig-typescript-loader: 4.4.0(@types/node@20.4.7)(cosmiconfig@8.3.6)(ts-node@10.9.1)(typescript@5.2.2) + lodash.isplainobject: 4.0.6 + lodash.merge: 4.6.2 + lodash.uniq: 4.5.0 + resolve-from: 5.0.0 + ts-node: 10.9.1(@types/node@20.4.7)(typescript@5.2.2) + typescript: 5.2.2 + transitivePeerDependencies: + - "@swc/core" + - "@swc/wasm" + dev: true + + /@commitlint/message@17.4.2: + resolution: {integrity: sha512-3XMNbzB+3bhKA1hSAWPCQA3lNxR4zaeQAQcHj0Hx5sVdO6ryXtgUBGGv+1ZCLMgAPRixuc6en+iNAzZ4NzAa8Q==} + engines: {node: ">=v14"} + dev: true + + /@commitlint/parse@17.7.0: + resolution: {integrity: sha512-dIvFNUMCUHqq5Abv80mIEjLVfw8QNuA4DS7OWip4pcK/3h5wggmjVnlwGCDvDChkw2TjK1K6O+tAEV78oxjxag==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/types": 17.4.4 + conventional-changelog-angular: 6.0.0 + conventional-commits-parser: 4.0.0 + dev: true + + /@commitlint/read@17.5.1: + resolution: {integrity: sha512-7IhfvEvB//p9aYW09YVclHbdf1u7g7QhxeYW9ZHSO8Huzp8Rz7m05aCO1mFG7G8M+7yfFnXB5xOmG18brqQIBg==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/top-level": 17.4.0 + "@commitlint/types": 17.4.4 + fs-extra: 11.1.1 + git-raw-commits: 2.0.11 + minimist: 1.2.8 + dev: true + + /@commitlint/resolve-extends@17.6.7: + resolution: {integrity: sha512-PfeoAwLHtbOaC9bGn/FADN156CqkFz6ZKiVDMjuC2N5N0740Ke56rKU7Wxdwya8R8xzLK9vZzHgNbuGhaOVKIg==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/config-validator": 17.6.7 + "@commitlint/types": 17.4.4 + import-fresh: 3.3.0 + lodash.mergewith: 4.6.2 + resolve-from: 5.0.0 + resolve-global: 1.0.0 + dev: true + + /@commitlint/rules@17.7.0: + resolution: {integrity: sha512-J3qTh0+ilUE5folSaoK91ByOb8XeQjiGcdIdiB/8UT1/Rd1itKo0ju/eQVGyFzgTMYt8HrDJnGTmNWwcMR1rmA==} + engines: {node: ">=v14"} + dependencies: + "@commitlint/ensure": 17.6.7 + "@commitlint/message": 17.4.2 + "@commitlint/to-lines": 17.4.0 + "@commitlint/types": 17.4.4 + execa: 5.1.1 + dev: true + + /@commitlint/to-lines@17.4.0: + resolution: {integrity: sha512-LcIy/6ZZolsfwDUWfN1mJ+co09soSuNASfKEU5sCmgFCvX5iHwRYLiIuoqXzOVDYOy7E7IcHilr/KS0e5T+0Hg==} + engines: {node: ">=v14"} + dev: true + + /@commitlint/top-level@17.4.0: + resolution: {integrity: sha512-/1loE/g+dTTQgHnjoCy0AexKAEFyHsR2zRB4NWrZ6lZSMIxAhBJnmCqwao7b4H8888PsfoTBCLBYIw8vGnej8g==} + engines: {node: ">=v14"} + dependencies: + find-up: 5.0.0 + dev: true + + /@commitlint/types@17.4.4: + resolution: {integrity: sha512-amRN8tRLYOsxRr6mTnGGGvB5EmW/4DDjLMgiwK3CCVEmN6Sr/6xePGEpWaspKkckILuUORCwe6VfDBw6uj4axQ==} + engines: {node: ">=v14"} + dependencies: + chalk: 4.1.2 + dev: true + + /@cspotcode/source-map-support@0.8.1: + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: ">=12"} + dependencies: + "@jridgewell/trace-mapping": 0.3.9 + dev: true + + /@eslint-community/eslint-utils@4.4.0(eslint@8.49.0): + resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + dependencies: + eslint: 8.49.0 + eslint-visitor-keys: 3.4.3 + dev: true + + /@eslint-community/regexpp@4.8.1: + resolution: {integrity: sha512-PWiOzLIUAjN/w5K17PoF4n6sKBw0gqLHPhywmYHP4t1VFQQVYeb1yWsJwnMVEMl3tUHME7X/SJPZLmtG7XBDxQ==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + dev: true + + /@eslint/eslintrc@2.1.2: + resolution: {integrity: sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + ajv: 6.12.6 + debug: 4.3.4 + espree: 9.6.1 + globals: 13.21.0 + ignore: 5.2.4 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@eslint/js@8.49.0: + resolution: {integrity: sha512-1S8uAY/MTJqVx0SC4epBq+N2yhuwtNwLbJYNZyhL2pO1ZVKn5HFXav5T41Ryzy9K9V7ZId2JB2oy/W4aCd9/2w==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: true + + /@humanwhocodes/config-array@0.11.11: + resolution: {integrity: sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA==} + engines: {node: ">=10.10.0"} + dependencies: + "@humanwhocodes/object-schema": 1.2.1 + debug: 4.3.4 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@humanwhocodes/module-importer@1.0.1: + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: ">=12.22"} + dev: true + + /@humanwhocodes/object-schema@1.2.1: + resolution: {integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==} + dev: true + + /@jridgewell/resolve-uri@3.1.1: + resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} + engines: {node: ">=6.0.0"} + dev: true + + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + dev: true + + /@jridgewell/trace-mapping@0.3.9: + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + dependencies: + "@jridgewell/resolve-uri": 3.1.1 + "@jridgewell/sourcemap-codec": 1.4.15 + dev: true + + /@nodelib/fs.scandir@2.1.5: + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: ">= 8"} + dependencies: + "@nodelib/fs.stat": 2.0.5 + run-parallel: 1.2.0 + dev: true + + /@nodelib/fs.stat@2.0.5: + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: ">= 8"} + dev: true + + /@nodelib/fs.walk@1.2.8: + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: ">= 8"} + dependencies: + "@nodelib/fs.scandir": 2.1.5 + fastq: 1.15.0 + dev: true + + /@tsconfig/node10@1.0.9: + resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==} + dev: true + + /@tsconfig/node12@1.0.11: + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + dev: true + + /@tsconfig/node14@1.0.3: + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + dev: true + + /@tsconfig/node16@1.0.4: + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} + dev: true + + /@types/minimist@1.2.2: + resolution: {integrity: sha512-jhuKLIRrhvCPLqwPcx6INqmKeiA5EWrsCOPhrlFSrbrmU4ZMPjj5Ul/oLCMDO98XRUIwVm78xICz4EPCektzeQ==} + dev: true + + /@types/node@20.4.7: + resolution: {integrity: sha512-bUBrPjEry2QUTsnuEjzjbS7voGWCc30W0qzgMf90GPeDGFRakvrz47ju+oqDAKCXLUCe39u57/ORMl/O/04/9g==} + dev: true + + /@types/normalize-package-data@2.4.1: + resolution: {integrity: sha512-Gj7cI7z+98M282Tqmp2K5EIsoouUEzbBJhQQzDE3jSIRk6r9gsz0oUokqIUR4u1R3dMHo0pDHM7sNOHyhulypw==} + dev: true + + /JSONStream@1.3.5: + resolution: {integrity: sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==} + hasBin: true + dependencies: + jsonparse: 1.3.1 + through: 2.3.8 + dev: true + + /acorn-jsx@5.3.2(acorn@8.10.0): + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + dependencies: + acorn: 8.10.0 + dev: true + + /acorn-walk@8.2.0: + resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} + engines: {node: ">=0.4.0"} + dev: true + + /acorn@8.10.0: + resolution: {integrity: sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==} + engines: {node: ">=0.4.0"} + hasBin: true + dev: true + + /ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + dev: true + + /ajv@8.12.0: + resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} + dependencies: + fast-deep-equal: 3.1.3 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + uri-js: 4.4.1 + dev: true + + /ansi-escapes@5.0.0: + resolution: {integrity: sha512-5GFMVX8HqE/TB+FuBJGuO5XG0WrsA6ptUqoODaT/n9mmUaZFkqnBueB4leqGBCmrUHnCnC4PCZTCd0E7QQ83bA==} + engines: {node: ">=12"} + dependencies: + type-fest: 1.4.0 + dev: true + + /ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: ">=8"} + dev: true + + /ansi-regex@6.0.1: + resolution: {integrity: sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==} + engines: {node: ">=12"} + dev: true + + /ansi-styles@3.2.1: + resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} + engines: {node: ">=4"} + dependencies: + color-convert: 1.9.3 + dev: true + + /ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: ">=8"} + dependencies: + color-convert: 2.0.1 + dev: true + + /ansi-styles@6.2.1: + resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} + engines: {node: ">=12"} + dev: true + + /arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + dev: true + + /argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + dev: true + + /array-ify@1.0.0: + resolution: {integrity: sha512-c5AMf34bKdvPhQ7tBGhqkgKNUzMr4WUs+WDtC2ZUGOUncbxKMTvqxYctiseW3+L4bA8ec+GcZ6/A/FW4m8ukng==} + dev: true + + /arrify@1.0.1: + resolution: {integrity: sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==} + engines: {node: ">=0.10.0"} + dev: true + + /balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + dev: true + + /brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + dev: true + + /braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: ">=8"} + dependencies: + fill-range: 7.0.1 + dev: true + + /callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: ">=6"} + dev: true + + /camelcase-keys@6.2.2: + resolution: {integrity: sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==} + engines: {node: ">=8"} + dependencies: + camelcase: 5.3.1 + map-obj: 4.3.0 + quick-lru: 4.0.1 + dev: true + + /camelcase@5.3.1: + resolution: {integrity: sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==} + engines: {node: ">=6"} + dev: true + + /chalk@2.4.2: + resolution: {integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==} + engines: {node: ">=4"} + dependencies: + ansi-styles: 3.2.1 + escape-string-regexp: 1.0.5 + supports-color: 5.5.0 + dev: true + + /chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: ">=10"} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + dev: true + + /chalk@5.3.0: + resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==} + engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + dev: true + + /cli-cursor@4.0.0: + resolution: {integrity: sha512-VGtlMu3x/4DOtIUwEkRezxUZ2lBacNJCHash0N0WeZDBS+7Ux1dm3XWAgWYxLJFMMdOeXMHXorshEFhbMSGelg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + restore-cursor: 4.0.0 + dev: true + + /cli-truncate@3.1.0: + resolution: {integrity: sha512-wfOBkjXteqSnI59oPcJkcPl/ZmwvMMOj340qUIY1SKZCv0B9Cf4D4fAucRkIKQmsIuYK3x1rrgU7MeGRruiuiA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + slice-ansi: 5.0.0 + string-width: 5.1.2 + dev: true + + /cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: ">=12"} + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + dev: true + + /color-convert@1.9.3: + resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==} + dependencies: + color-name: 1.1.3 + dev: true + + /color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: ">=7.0.0"} + dependencies: + color-name: 1.1.4 + dev: true + + /color-name@1.1.3: + resolution: {integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==} + dev: true + + /color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + dev: true + + /colorette@2.0.20: + resolution: {integrity: sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==} + dev: true + + /commander@11.0.0: + resolution: {integrity: sha512-9HMlXtt/BNoYr8ooyjjNRdIilOTkVJXB+GhxMTtOKwk0R4j4lS4NpjuqmRxroBfnfTSHQIHQB7wryHhXarNjmQ==} + engines: {node: ">=16"} + dev: true + + /compare-func@2.0.0: + resolution: {integrity: sha512-zHig5N+tPWARooBnb0Zx1MFcdfpyJrfTJ3Y5L+IFvUm8rM74hHz66z0gw0x4tijh5CorKkKUCnW82R2vmpeCRA==} + dependencies: + array-ify: 1.0.0 + dot-prop: 5.3.0 + dev: true + + /concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + dev: true + + /conventional-changelog-angular@6.0.0: + resolution: {integrity: sha512-6qLgrBF4gueoC7AFVHu51nHL9pF9FRjXrH+ceVf7WmAfH3gs+gEYOkvxhjMPjZu57I4AGUGoNTY8V7Hrgf1uqg==} + engines: {node: ">=14"} + dependencies: + compare-func: 2.0.0 + dev: true + + /conventional-changelog-conventionalcommits@6.1.0: + resolution: {integrity: sha512-3cS3GEtR78zTfMzk0AizXKKIdN4OvSh7ibNz6/DPbhWWQu7LqE/8+/GqSodV+sywUR2gpJAdP/1JFf4XtN7Zpw==} + engines: {node: ">=14"} + dependencies: + compare-func: 2.0.0 + dev: true + + /conventional-commits-parser@4.0.0: + resolution: {integrity: sha512-WRv5j1FsVM5FISJkoYMR6tPk07fkKT0UodruX4je86V4owk451yjXAKzKAPOs9l7y59E2viHUS9eQ+dfUA9NSg==} + engines: {node: ">=14"} + hasBin: true + dependencies: + JSONStream: 1.3.5 + is-text-path: 1.0.1 + meow: 8.1.2 + split2: 3.2.2 + dev: true + + /cosmiconfig-typescript-loader@4.4.0(@types/node@20.4.7)(cosmiconfig@8.3.6)(ts-node@10.9.1)(typescript@5.2.2): + resolution: {integrity: sha512-BabizFdC3wBHhbI4kJh0VkQP9GkBfoHPydD0COMce1nJ1kJAB3F2TmJ/I7diULBKtmEWSwEbuN/KDtgnmUUVmw==} + engines: {node: ">=v14.21.3"} + peerDependencies: + "@types/node": "*" + cosmiconfig: ">=7" + ts-node: ">=10" + typescript: ">=4" + dependencies: + "@types/node": 20.4.7 + cosmiconfig: 8.3.6(typescript@5.2.2) + ts-node: 10.9.1(@types/node@20.4.7)(typescript@5.2.2) + typescript: 5.2.2 + dev: true + + /cosmiconfig@8.3.6(typescript@5.2.2): + resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} + engines: {node: ">=14"} + peerDependencies: + typescript: ">=4.9.5" + peerDependenciesMeta: + typescript: + optional: true + dependencies: + import-fresh: 3.3.0 + js-yaml: 4.1.0 + parse-json: 5.2.0 + path-type: 4.0.0 + typescript: 5.2.2 + dev: true + + /create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + dev: true + + /cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: ">= 8"} + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + dev: true + + /dargs@7.0.0: + resolution: {integrity: sha512-2iy1EkLdlBzQGvbweYRFxmFath8+K7+AKB0TlhHWkNuH+TmovaMH/Wp7V7R4u7f4SnX3OgLsU9t1NI9ioDnUpg==} + engines: {node: ">=8"} + dev: true + + /debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: ">=6.0"} + peerDependencies: + supports-color: "*" + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 + dev: true + + /decamelize-keys@1.1.1: + resolution: {integrity: sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==} + engines: {node: ">=0.10.0"} + dependencies: + decamelize: 1.2.0 + map-obj: 1.0.1 + dev: true + + /decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: ">=0.10.0"} + dev: true + + /deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + dev: true + + /diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: ">=0.3.1"} + dev: true + + /doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: ">=6.0.0"} + dependencies: + esutils: 2.0.3 + dev: true + + /dot-prop@5.3.0: + resolution: {integrity: sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==} + engines: {node: ">=8"} + dependencies: + is-obj: 2.0.0 + dev: true + + /eastasianwidth@0.2.0: + resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + dev: true + + /emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + dev: true + + /emoji-regex@9.2.2: + resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} + dev: true + + /error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + dependencies: + is-arrayish: 0.2.1 + dev: true + + /escalade@3.1.1: + resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + engines: {node: ">=6"} + dev: true + + /escape-string-regexp@1.0.5: + resolution: {integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==} + engines: {node: ">=0.8.0"} + dev: true + + /escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: ">=10"} + dev: true + + /eslint-plugin-yml@1.9.0(eslint@8.49.0): + resolution: {integrity: sha512-ayuC57WyVQ5+QZ02y62GiB//5+zsiyzUGxUX/mrhLni+jfsKA4KoITjkbR65iUdjjhWpyTJHPcAIFLKQIOwgsw==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ">=6.0.0" + dependencies: + debug: 4.3.4 + eslint: 8.49.0 + lodash: 4.17.21 + natural-compare: 1.4.0 + yaml-eslint-parser: 1.2.2 + transitivePeerDependencies: + - supports-color + dev: true + + /eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + dev: true + + /eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: true + + /eslint@8.49.0: + resolution: {integrity: sha512-jw03ENfm6VJI0jA9U+8H5zfl5b+FvuU3YYvZRdZHOlU2ggJkxrlkJH4HcDrZpj6YwD8kuYqvQM8LyesoazrSOQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + dependencies: + "@eslint-community/eslint-utils": 4.4.0(eslint@8.49.0) + "@eslint-community/regexpp": 4.8.1 + "@eslint/eslintrc": 2.1.2 + "@eslint/js": 8.49.0 + "@humanwhocodes/config-array": 0.11.11 + "@humanwhocodes/module-importer": 1.0.1 + "@nodelib/fs.walk": 1.2.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.5.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.21.0 + graphemer: 1.4.0 + ignore: 5.2.4 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.3 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + dev: true + + /espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.10.0 + acorn-jsx: 5.3.2(acorn@8.10.0) + eslint-visitor-keys: 3.4.3 + dev: true + + /esquery@1.5.0: + resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + engines: {node: ">=0.10"} + dependencies: + estraverse: 5.3.0 + dev: true + + /esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: ">=4.0"} + dependencies: + estraverse: 5.3.0 + dev: true + + /estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: ">=4.0"} + dev: true + + /esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: ">=0.10.0"} + dev: true + + /eventemitter3@5.0.1: + resolution: {integrity: sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==} + dev: true + + /execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: ">=10"} + dependencies: + cross-spawn: 7.0.3 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + dev: true + + /execa@7.2.0: + resolution: {integrity: sha512-UduyVP7TLB5IcAQl+OzLyLcS/l32W/GLg+AhHJ+ow40FOk2U3SAllPwR44v4vmdFwIWqpdwxxpQbF1n5ta9seA==} + engines: {node: ^14.18.0 || ^16.14.0 || >=18.0.0} + dependencies: + cross-spawn: 7.0.3 + get-stream: 6.0.1 + human-signals: 4.3.1 + is-stream: 3.0.0 + merge-stream: 2.0.0 + npm-run-path: 5.1.0 + onetime: 6.0.0 + signal-exit: 3.0.7 + strip-final-newline: 3.0.0 + dev: true + + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: true + + /fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + dev: true + + /fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + dev: true + + /fastq@1.15.0: + resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==} + dependencies: + reusify: 1.0.4 + dev: true + + /file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flat-cache: 3.1.0 + dev: true + + /fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: ">=8"} + dependencies: + to-regex-range: 5.0.1 + dev: true + + /find-up@4.1.0: + resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} + engines: {node: ">=8"} + dependencies: + locate-path: 5.0.0 + path-exists: 4.0.0 + dev: true + + /find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: ">=10"} + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + dev: true + + /flat-cache@3.1.0: + resolution: {integrity: sha512-OHx4Qwrrt0E4jEIcI5/Xb+f+QmJYNj2rrK8wiIdQOIrB9WrrJL8cjZvXdXuBTkkEwEqLycb5BeZDV1o2i9bTew==} + engines: {node: ">=12.0.0"} + dependencies: + flatted: 3.2.9 + keyv: 4.5.3 + rimraf: 3.0.2 + dev: true + + /flatted@3.2.9: + resolution: {integrity: sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==} + dev: true + + /fs-extra@11.1.1: + resolution: {integrity: sha512-MGIE4HOvQCeUCzmlHs0vXpih4ysz4wg9qiSAu6cd42lVwPbTM1TjV7RusoyQqMmk/95gdQZX72u+YW+c3eEpFQ==} + engines: {node: ">=14.14"} + dependencies: + graceful-fs: 4.2.11 + jsonfile: 6.1.0 + universalify: 2.0.0 + dev: true + + /fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + dev: true + + /function-bind@1.1.1: + resolution: {integrity: sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==} + dev: true + + /get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + dev: true + + /get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: ">=10"} + dev: true + + /git-raw-commits@2.0.11: + resolution: {integrity: sha512-VnctFhw+xfj8Va1xtfEqCUD2XDrbAPSJx+hSrE5K7fGdjZruW7XV+QOrN7LF/RJyvspRiD2I0asWsxFp0ya26A==} + engines: {node: ">=10"} + hasBin: true + dependencies: + dargs: 7.0.0 + lodash: 4.17.21 + meow: 8.1.2 + split2: 3.2.2 + through2: 4.0.2 + dev: true + + /glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: ">=10.13.0"} + dependencies: + is-glob: 4.0.3 + dev: true + + /glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /global-dirs@0.1.1: + resolution: {integrity: sha512-NknMLn7F2J7aflwFOlGdNIuCDpN3VGoSoB+aap3KABFWbHVn1TCgFC+np23J8W2BiZbjfEw3BFBycSMv1AFblg==} + engines: {node: ">=4"} + dependencies: + ini: 1.3.8 + dev: true + + /globals@13.21.0: + resolution: {integrity: sha512-ybyme3s4yy/t/3s35bewwXKOf7cvzfreG2lH0lZl0JB7I4GxRP2ghxOK/Nb9EkRXdbBXZLfq/p/0W2JUONB/Gg==} + engines: {node: ">=8"} + dependencies: + type-fest: 0.20.2 + dev: true + + /graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + dev: true + + /graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + dev: true + + /hard-rejection@2.1.0: + resolution: {integrity: sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==} + engines: {node: ">=6"} + dev: true + + /has-flag@3.0.0: + resolution: {integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==} + engines: {node: ">=4"} + dev: true + + /has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: ">=8"} + dev: true + + /has@1.0.3: + resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} + engines: {node: ">= 0.4.0"} + dependencies: + function-bind: 1.1.1 + dev: true + + /hosted-git-info@2.8.9: + resolution: {integrity: sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==} + dev: true + + /hosted-git-info@4.1.0: + resolution: {integrity: sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==} + engines: {node: ">=10"} + dependencies: + lru-cache: 6.0.0 + dev: true + + /human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: ">=10.17.0"} + dev: true + + /human-signals@4.3.1: + resolution: {integrity: sha512-nZXjEF2nbo7lIw3mgYjItAfgQXog3OjJogSbKa2CQIIvSGWcKgeJnQlNXip6NglNzYH45nSRiEVimMvYL8DDqQ==} + engines: {node: ">=14.18.0"} + dev: true + + /husky@8.0.3: + resolution: {integrity: sha512-+dQSyqPh4x1hlO1swXBiNb2HzTDN1I2IGLQx1GrBuiqFJfoMrnZWwVmatvSiO+Iz8fBUnf+lekwNo4c2LlXItg==} + engines: {node: ">=14"} + hasBin: true + dev: true + + /ignore@5.2.4: + resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==} + engines: {node: ">= 4"} + dev: true + + /import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: ">=6"} + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + dev: true + + /imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: ">=0.8.19"} + dev: true + + /indent-string@4.0.0: + resolution: {integrity: sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==} + engines: {node: ">=8"} + dev: true + + /inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + dev: true + + /inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + dev: true + + /ini@1.3.8: + resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} + dev: true + + /is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + dev: true + + /is-core-module@2.13.0: + resolution: {integrity: sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==} + dependencies: + has: 1.0.3 + dev: true + + /is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: ">=0.10.0"} + dev: true + + /is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: ">=8"} + dev: true + + /is-fullwidth-code-point@4.0.0: + resolution: {integrity: sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==} + engines: {node: ">=12"} + dev: true + + /is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: ">=0.10.0"} + dependencies: + is-extglob: 2.1.1 + dev: true + + /is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: ">=0.12.0"} + dev: true + + /is-obj@2.0.0: + resolution: {integrity: sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==} + engines: {node: ">=8"} + dev: true + + /is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: ">=8"} + dev: true + + /is-plain-obj@1.1.0: + resolution: {integrity: sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==} + engines: {node: ">=0.10.0"} + dev: true + + /is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: ">=8"} + dev: true + + /is-stream@3.0.0: + resolution: {integrity: sha512-LnQR4bZ9IADDRSkvpqMGvt/tEJWclzklNgSw48V5EAaAeDd6qGvN8ei6k5p0tvxSR171VmGyHuTiAOfxAbr8kA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dev: true + + /is-text-path@1.0.1: + resolution: {integrity: sha512-xFuJpne9oFz5qDaodwmmG08e3CawH/2ZV8Qqza1Ko7Sk8POWbkRdwIoAWVhqvq0XeUzANEhKo2n0IXUGBm7A/w==} + engines: {node: ">=0.10.0"} + dependencies: + text-extensions: 1.9.0 + dev: true + + /isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + dev: true + + /js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + dev: true + + /js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + dependencies: + argparse: 2.0.1 + dev: true + + /json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + dev: true + + /json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + dev: true + + /json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + dev: true + + /json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + dev: true + + /json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + dev: true + + /jsonfile@6.1.0: + resolution: {integrity: sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==} + dependencies: + universalify: 2.0.0 + optionalDependencies: + graceful-fs: 4.2.11 + dev: true + + /jsonparse@1.3.1: + resolution: {integrity: sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==} + engines: {"0": node >= 0.2.0} + dev: true + + /keyv@4.5.3: + resolution: {integrity: sha512-QCiSav9WaX1PgETJ+SpNnx2PRRapJ/oRSXM4VO5OGYGSjrxbKPVFVhB3l2OCbLCk329N8qyAtsJjSjvVBWzEug==} + dependencies: + json-buffer: 3.0.1 + dev: true + + /kind-of@6.0.3: + resolution: {integrity: sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==} + engines: {node: ">=0.10.0"} + dev: true + + /levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: ">= 0.8.0"} + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: true + + /lilconfig@2.1.0: + resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==} + engines: {node: ">=10"} + dev: true + + /lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + dev: true + + /lint-staged@14.0.1: + resolution: {integrity: sha512-Mw0cL6HXnHN1ag0mN/Dg4g6sr8uf8sn98w2Oc1ECtFto9tvRF7nkXGJRbx8gPlHyoR0pLyBr2lQHbWwmUHe1Sw==} + engines: {node: ^16.14.0 || >=18.0.0} + hasBin: true + dependencies: + chalk: 5.3.0 + commander: 11.0.0 + debug: 4.3.4 + execa: 7.2.0 + lilconfig: 2.1.0 + listr2: 6.6.1 + micromatch: 4.0.5 + pidtree: 0.6.0 + string-argv: 0.3.2 + yaml: 2.3.1 + transitivePeerDependencies: + - enquirer + - supports-color + dev: true + + /listr2@6.6.1: + resolution: {integrity: sha512-+rAXGHh0fkEWdXBmX+L6mmfmXmXvDGEKzkjxO+8mP3+nI/r/CWznVBvsibXdxda9Zz0OW2e2ikphN3OwCT/jSg==} + engines: {node: ">=16.0.0"} + peerDependencies: + enquirer: ">= 2.3.0 < 3" + peerDependenciesMeta: + enquirer: + optional: true + dependencies: + cli-truncate: 3.1.0 + colorette: 2.0.20 + eventemitter3: 5.0.1 + log-update: 5.0.1 + rfdc: 1.3.0 + wrap-ansi: 8.1.0 + dev: true + + /locate-path@5.0.0: + resolution: {integrity: sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==} + engines: {node: ">=8"} + dependencies: + p-locate: 4.1.0 + dev: true + + /locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: ">=10"} + dependencies: + p-locate: 5.0.0 + dev: true + + /lodash.camelcase@4.3.0: + resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} + dev: true + + /lodash.isfunction@3.0.9: + resolution: {integrity: sha512-AirXNj15uRIMMPihnkInB4i3NHeb4iBtNg9WRWuK2o31S+ePwwNmDPaTL3o7dTJ+VXNZim7rFs4rxN4YU1oUJw==} + dev: true + + /lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + dev: true + + /lodash.kebabcase@4.1.1: + resolution: {integrity: sha512-N8XRTIMMqqDgSy4VLKPnJ/+hpGZN+PHQiJnSenYqPaVV/NCqEogTnAdZLQiGKhxX+JCs8waWq2t1XHWKOmlY8g==} + dev: true + + /lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: true + + /lodash.mergewith@4.6.2: + resolution: {integrity: sha512-GK3g5RPZWTRSeLSpgP8Xhra+pnjBC56q9FZYe1d5RN3TJ35dbkGy3YqBSMbyCrlbi+CM9Z3Jk5yTL7RCsqboyQ==} + dev: true + + /lodash.snakecase@4.1.1: + resolution: {integrity: sha512-QZ1d4xoBHYUeuouhEq3lk3Uq7ldgyFXGBhg04+oRLnIz8o9T65Eh+8YdroUwn846zchkA9yDsDl5CVVaV2nqYw==} + dev: true + + /lodash.startcase@4.4.0: + resolution: {integrity: sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==} + dev: true + + /lodash.uniq@4.5.0: + resolution: {integrity: sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==} + dev: true + + /lodash.upperfirst@4.3.1: + resolution: {integrity: sha512-sReKOYJIJf74dhJONhU4e0/shzi1trVbSWDOhKYE5XV2O+H7Sb2Dihwuc7xWxVl+DgFPyTqIN3zMfT9cq5iWDg==} + dev: true + + /lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + dev: true + + /log-update@5.0.1: + resolution: {integrity: sha512-5UtUDQ/6edw4ofyljDNcOVJQ4c7OjDro4h3y8e1GQL5iYElYclVHJ3zeWchylvMaKnDbDilC8irOVyexnA/Slw==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + ansi-escapes: 5.0.0 + cli-cursor: 4.0.0 + slice-ansi: 5.0.0 + strip-ansi: 7.1.0 + wrap-ansi: 8.1.0 + dev: true + + /lru-cache@6.0.0: + resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} + engines: {node: ">=10"} + dependencies: + yallist: 4.0.0 + dev: true + + /make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + dev: true + + /map-obj@1.0.1: + resolution: {integrity: sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==} + engines: {node: ">=0.10.0"} + dev: true + + /map-obj@4.3.0: + resolution: {integrity: sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==} + engines: {node: ">=8"} + dev: true + + /meow@8.1.2: + resolution: {integrity: sha512-r85E3NdZ+mpYk1C6RjPFEMSE+s1iZMuHtsHAqY0DT3jZczl0diWUZ8g6oU7h0M9cD2EL+PzaYghhCLzR0ZNn5Q==} + engines: {node: ">=10"} + dependencies: + "@types/minimist": 1.2.2 + camelcase-keys: 6.2.2 + decamelize-keys: 1.1.1 + hard-rejection: 2.1.0 + minimist-options: 4.1.0 + normalize-package-data: 3.0.3 + read-pkg-up: 7.0.1 + redent: 3.0.0 + trim-newlines: 3.0.1 + type-fest: 0.18.1 + yargs-parser: 20.2.9 + dev: true + + /merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + dev: true + + /micromatch@4.0.5: + resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==} + engines: {node: ">=8.6"} + dependencies: + braces: 3.0.2 + picomatch: 2.3.1 + dev: true + + /mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: ">=6"} + dev: true + + /mimic-fn@4.0.0: + resolution: {integrity: sha512-vqiC06CuhBTUdZH+RYl8sFrL096vA45Ok5ISO6sE/Mr1jRbGH4Csnhi8f3wKVl7x8mO4Au7Ir9D3Oyv1VYMFJw==} + engines: {node: ">=12"} + dev: true + + /min-indent@1.0.1: + resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==} + engines: {node: ">=4"} + dev: true + + /minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + dependencies: + brace-expansion: 1.1.11 + dev: true + + /minimist-options@4.1.0: + resolution: {integrity: sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==} + engines: {node: ">= 6"} + dependencies: + arrify: 1.0.1 + is-plain-obj: 1.1.0 + kind-of: 6.0.3 + dev: true + + /minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + dev: true + + /ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + dev: true + + /natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + dev: true + + /normalize-package-data@2.5.0: + resolution: {integrity: sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==} + dependencies: + hosted-git-info: 2.8.9 + resolve: 1.22.6 + semver: 5.7.2 + validate-npm-package-license: 3.0.4 + dev: true + + /normalize-package-data@3.0.3: + resolution: {integrity: sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==} + engines: {node: ">=10"} + dependencies: + hosted-git-info: 4.1.0 + is-core-module: 2.13.0 + semver: 7.5.4 + validate-npm-package-license: 3.0.4 + dev: true + + /npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: ">=8"} + dependencies: + path-key: 3.1.1 + dev: true + + /npm-run-path@5.1.0: + resolution: {integrity: sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + path-key: 4.0.0 + dev: true + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + dev: true + + /onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: ">=6"} + dependencies: + mimic-fn: 2.1.0 + dev: true + + /onetime@6.0.0: + resolution: {integrity: sha512-1FlR+gjXK7X+AsAHso35MnyN5KqGwJRi/31ft6x0M194ht7S+rWAvd7PHss9xSKMzE0asv1pyIHaJYq+BbacAQ==} + engines: {node: ">=12"} + dependencies: + mimic-fn: 4.0.0 + dev: true + + /optionator@0.9.3: + resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==} + engines: {node: ">= 0.8.0"} + dependencies: + "@aashutoshrathi/word-wrap": 1.2.6 + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: true + + /p-limit@2.3.0: + resolution: {integrity: sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==} + engines: {node: ">=6"} + dependencies: + p-try: 2.2.0 + dev: true + + /p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: ">=10"} + dependencies: + yocto-queue: 0.1.0 + dev: true + + /p-locate@4.1.0: + resolution: {integrity: sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==} + engines: {node: ">=8"} + dependencies: + p-limit: 2.3.0 + dev: true + + /p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: ">=10"} + dependencies: + p-limit: 3.1.0 + dev: true + + /p-try@2.2.0: + resolution: {integrity: sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==} + engines: {node: ">=6"} + dev: true + + /parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: ">=6"} + dependencies: + callsites: 3.1.0 + dev: true + + /parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: ">=8"} + dependencies: + "@babel/code-frame": 7.22.13 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + dev: true + + /path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: ">=8"} + dev: true + + /path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: ">=0.10.0"} + dev: true + + /path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: ">=8"} + dev: true + + /path-key@4.0.0: + resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==} + engines: {node: ">=12"} + dev: true + + /path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + dev: true + + /path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: ">=8"} + dev: true + + /picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: ">=8.6"} + dev: true + + /pidtree@0.6.0: + resolution: {integrity: sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==} + engines: {node: ">=0.10"} + hasBin: true + dev: true + + /prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: ">= 0.8.0"} + dev: true + + /punycode@2.3.0: + resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==} + engines: {node: ">=6"} + dev: true + + /queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + dev: true + + /quick-lru@4.0.1: + resolution: {integrity: sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==} + engines: {node: ">=8"} + dev: true + + /read-pkg-up@7.0.1: + resolution: {integrity: sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==} + engines: {node: ">=8"} + dependencies: + find-up: 4.1.0 + read-pkg: 5.2.0 + type-fest: 0.8.1 + dev: true + + /read-pkg@5.2.0: + resolution: {integrity: sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==} + engines: {node: ">=8"} + dependencies: + "@types/normalize-package-data": 2.4.1 + normalize-package-data: 2.5.0 + parse-json: 5.2.0 + type-fest: 0.6.0 + dev: true + + /readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: ">= 6"} + dependencies: + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 + dev: true + + /redent@3.0.0: + resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==} + engines: {node: ">=8"} + dependencies: + indent-string: 4.0.0 + strip-indent: 3.0.0 + dev: true + + /require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: ">=0.10.0"} + dev: true + + /require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: ">=0.10.0"} + dev: true + + /resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: ">=4"} + dev: true + + /resolve-from@5.0.0: + resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} + engines: {node: ">=8"} + dev: true + + /resolve-global@1.0.0: + resolution: {integrity: sha512-zFa12V4OLtT5XUX/Q4VLvTfBf+Ok0SPc1FNGM/z9ctUdiU618qwKpWnd0CHs3+RqROfyEg/DhuHbMWYqcgljEw==} + engines: {node: ">=8"} + dependencies: + global-dirs: 0.1.1 + dev: true + + /resolve@1.22.6: + resolution: {integrity: sha512-njhxM7mV12JfufShqGy3Rz8j11RPdLy4xi15UurGJeoHLfJpVXKdh3ueuOqbYUcDZnffr6X739JBo5LzyahEsw==} + hasBin: true + dependencies: + is-core-module: 2.13.0 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + dev: true + + /restore-cursor@4.0.0: + resolution: {integrity: sha512-I9fPXU9geO9bHOt9pHHOhOkYerIMsmVaWB0rA2AI9ERh/+x/i7MV5HKBNrg+ljO5eoPVgCcnFuRjJ9uH6I/3eg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + onetime: 5.1.2 + signal-exit: 3.0.7 + dev: true + + /reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: ">=1.0.0", node: ">=0.10.0"} + dev: true + + /rfdc@1.3.0: + resolution: {integrity: sha512-V2hovdzFbOi77/WajaSMXk2OLm+xNIeQdMMuB7icj7bk6zi2F8GGAxigcnDFpJHbNyNcgyJDiP+8nOrY5cZGrA==} + dev: true + + /rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: true + + /run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + dependencies: + queue-microtask: 1.2.3 + dev: true + + /safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + dev: true + + /semver@5.7.2: + resolution: {integrity: sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==} + hasBin: true + dev: true + + /semver@7.5.4: + resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==} + engines: {node: ">=10"} + hasBin: true + dependencies: + lru-cache: 6.0.0 + dev: true + + /shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: ">=8"} + dependencies: + shebang-regex: 3.0.0 + dev: true + + /shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: ">=8"} + dev: true + + /signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + dev: true + + /slice-ansi@5.0.0: + resolution: {integrity: sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==} + engines: {node: ">=12"} + dependencies: + ansi-styles: 6.2.1 + is-fullwidth-code-point: 4.0.0 + dev: true + + /spdx-correct@3.2.0: + resolution: {integrity: sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==} + dependencies: + spdx-expression-parse: 3.0.1 + spdx-license-ids: 3.0.14 + dev: true + + /spdx-exceptions@2.3.0: + resolution: {integrity: sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A==} + dev: true + + /spdx-expression-parse@3.0.1: + resolution: {integrity: sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==} + dependencies: + spdx-exceptions: 2.3.0 + spdx-license-ids: 3.0.14 + dev: true + + /spdx-license-ids@3.0.14: + resolution: {integrity: sha512-U0eS5wcpu/O2/QZk6PcAMOA8H3ZuvRe4mFHA3Q+LNl1SRDmfQ+mD3RoD6tItqnvqubJ32m/zV2Z/ikSmxccD1Q==} + dev: true + + /split2@3.2.2: + resolution: {integrity: sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==} + dependencies: + readable-stream: 3.6.2 + dev: true + + /string-argv@0.3.2: + resolution: {integrity: sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q==} + engines: {node: ">=0.6.19"} + dev: true + + /string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: ">=8"} + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + dev: true + + /string-width@5.1.2: + resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} + engines: {node: ">=12"} + dependencies: + eastasianwidth: 0.2.0 + emoji-regex: 9.2.2 + strip-ansi: 7.1.0 + dev: true + + /string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: ">=8"} + dependencies: + ansi-regex: 5.0.1 + dev: true + + /strip-ansi@7.1.0: + resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} + engines: {node: ">=12"} + dependencies: + ansi-regex: 6.0.1 + dev: true + + /strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: ">=6"} + dev: true + + /strip-final-newline@3.0.0: + resolution: {integrity: sha512-dOESqjYr96iWYylGObzd39EuNTa5VJxyvVAEm5Jnh7KGo75V43Hk1odPQkNDyXNmUR6k+gEiDVXnjB8HJ3crXw==} + engines: {node: ">=12"} + dev: true + + /strip-indent@3.0.0: + resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==} + engines: {node: ">=8"} + dependencies: + min-indent: 1.0.1 + dev: true + + /strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: ">=8"} + dev: true + + /supports-color@5.5.0: + resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} + engines: {node: ">=4"} + dependencies: + has-flag: 3.0.0 + dev: true + + /supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: ">=8"} + dependencies: + has-flag: 4.0.0 + dev: true + + /supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: ">= 0.4"} + dev: true + + /text-extensions@1.9.0: + resolution: {integrity: sha512-wiBrwC1EhBelW12Zy26JeOUkQ5mRu+5o8rpsJk5+2t+Y5vE7e842qtZDQ2g1NpX/29HdyFeJ4nSIhI47ENSxlQ==} + engines: {node: ">=0.10"} + dev: true + + /text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + dev: true + + /through2@4.0.2: + resolution: {integrity: sha512-iOqSav00cVxEEICeD7TjLB1sueEL+81Wpzp2bY17uZjZN0pWZPuo4suZ/61VujxmqSGFfgOcNuTZ85QJwNZQpw==} + dependencies: + readable-stream: 3.6.2 + dev: true + + /through@2.3.8: + resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} + dev: true + + /to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: ">=8.0"} + dependencies: + is-number: 7.0.0 + dev: true + + /trim-newlines@3.0.1: + resolution: {integrity: sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==} + engines: {node: ">=8"} + dev: true + + /ts-node@10.9.1(@types/node@20.4.7)(typescript@5.2.2): + resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} + hasBin: true + peerDependencies: + "@swc/core": ">=1.2.50" + "@swc/wasm": ">=1.2.50" + "@types/node": "*" + typescript: ">=2.7" + peerDependenciesMeta: + "@swc/core": + optional: true + "@swc/wasm": + optional: true + dependencies: + "@cspotcode/source-map-support": 0.8.1 + "@tsconfig/node10": 1.0.9 + "@tsconfig/node12": 1.0.11 + "@tsconfig/node14": 1.0.3 + "@tsconfig/node16": 1.0.4 + "@types/node": 20.4.7 + acorn: 8.10.0 + acorn-walk: 8.2.0 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.2.2 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + dev: true + + /type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: ">= 0.8.0"} + dependencies: + prelude-ls: 1.2.1 + dev: true + + /type-fest@0.18.1: + resolution: {integrity: sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==} + engines: {node: ">=10"} + dev: true + + /type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: ">=10"} + dev: true + + /type-fest@0.6.0: + resolution: {integrity: sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==} + engines: {node: ">=8"} + dev: true + + /type-fest@0.8.1: + resolution: {integrity: sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==} + engines: {node: ">=8"} + dev: true + + /type-fest@1.4.0: + resolution: {integrity: sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA==} + engines: {node: ">=10"} + dev: true + + /typescript@5.2.2: + resolution: {integrity: sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==} + engines: {node: ">=14.17"} + hasBin: true + dev: true + + /universalify@2.0.0: + resolution: {integrity: sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==} + engines: {node: ">= 10.0.0"} + dev: true + + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.0 + dev: true + + /util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + dev: true + + /v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + dev: true + + /validate-npm-package-license@3.0.4: + resolution: {integrity: sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==} + dependencies: + spdx-correct: 3.2.0 + spdx-expression-parse: 3.0.1 + dev: true + + /which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: ">= 8"} + hasBin: true + dependencies: + isexe: 2.0.0 + dev: true + + /wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: ">=10"} + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + dev: true + + /wrap-ansi@8.1.0: + resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==} + engines: {node: ">=12"} + dependencies: + ansi-styles: 6.2.1 + string-width: 5.1.2 + strip-ansi: 7.1.0 + dev: true + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + dev: true + + /y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: ">=10"} + dev: true + + /yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + dev: true + + /yaml-eslint-parser@1.2.2: + resolution: {integrity: sha512-pEwzfsKbTrB8G3xc/sN7aw1v6A6c/pKxLAkjclnAyo5g5qOh6eL9WGu0o3cSDQZKrTNk4KL4lQSwZW+nBkANEg==} + engines: {node: ^14.17.0 || >=16.0.0} + dependencies: + eslint-visitor-keys: 3.4.3 + lodash: 4.17.21 + yaml: 2.3.2 + dev: true + + /yaml@2.3.1: + resolution: {integrity: sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==} + engines: {node: ">= 14"} + dev: true + + /yaml@2.3.2: + resolution: {integrity: sha512-N/lyzTPaJasoDmfV7YTrYCI0G/3ivm/9wdG0aHuheKowWQwGTsK0Eoiw6utmzAnI6pkJa0DUVygvp3spqqEKXg==} + engines: {node: ">= 14"} + dev: true + + /yargs-parser@20.2.9: + resolution: {integrity: sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==} + engines: {node: ">=10"} + dev: true + + /yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: ">=12"} + dev: true + + /yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: ">=12"} + dependencies: + cliui: 8.0.1 + escalade: 3.1.1 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + dev: true + + /yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: ">=6"} + dev: true + + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: ">=10"} + dev: true From f18bf0caeb7b32800201b3d47410577425777a61 Mon Sep 17 00:00:00 2001 From: Claire Nollet Date: Wed, 20 Sep 2023 13:02:48 +0200 Subject: [PATCH 02/31] style: :rotating_light: format yaml files with eslint plugin --- admin-tools/get-credentials.yaml | 20 +- roles/ca/tasks/exposed_ca.yaml | 2 +- roles/gitlab/tasks/main.yaml | 20 +- roles/keycloak/tasks/main.yml | 10 +- roles/logs/files/cluster-logging.yaml | 8 +- roles/logs/files/operator-subscription.yaml | 12 +- roles/metrics/grafana/1-subscription.yaml | 2 +- roles/metrics/grafana/datasource.yaml | 6 +- roles/metrics/grafana/grafana-instance.yaml | 2 +- roles/socle-config/files/crd-conf-dso.yaml | 1070 +++++++++---------- roles/socle-config/tasks/main.yaml | 2 +- roles/sonarqube/tasks/main.yaml | 2 +- 12 files changed, 578 insertions(+), 578 deletions(-) diff --git a/admin-tools/get-credentials.yaml b/admin-tools/get-credentials.yaml index f893562d..92301be6 100644 --- a/admin-tools/get-credentials.yaml +++ b/admin-tools/get-credentials.yaml @@ -119,21 +119,21 @@ - console - console-dso -# - name: Get Keycloak admin secret -# kubernetes.core.k8s_info: -# namespace: "{{ dsc.keycloak.namespace }}" -# kind: Secret -# name: credential-dso-keycloak -# register: keycloak_creds -# tags: -# - keycloak + # - name: Get Keycloak admin secret + # kubernetes.core.k8s_info: + # namespace: "{{ dsc.keycloak.namespace }}" + # kind: Secret + # name: credential-dso-keycloak + # register: keycloak_creds + # tags: + # - keycloak - name: Display Keycloak credentials ansible.builtin.debug: msg: - "URL : https://{{ dsc.keycloak.subDomain }}{{ dsc.global.rootDomain }} " -# - "Admin username : {{ keycloak_creds.resources[0].data.ADMIN_USERNAME | b64decode }} " -# - "Admin password : {{ keycloak_creds.resources[0].data.ADMIN_PASSWORD | b64decode }} " + # - "Admin username : {{ keycloak_creds.resources[0].data.ADMIN_USERNAME | b64decode }} " + # - "Admin password : {{ keycloak_creds.resources[0].data.ADMIN_PASSWORD | b64decode }} " - "Admin username: {{ dso_console_configmap.resources[0].data.KEYCLOAK_ADMIN }} " - "Admin password: {{ dso_console_configmap.resources[0].data.KEYCLOAK_ADMIN_PASSWORD }} " tags: diff --git a/roles/ca/tasks/exposed_ca.yaml b/roles/ca/tasks/exposed_ca.yaml index e727dcf9..ce44ab32 100644 --- a/roles/ca/tasks/exposed_ca.yaml +++ b/roles/ca/tasks/exposed_ca.yaml @@ -53,7 +53,7 @@ cmd: "curl {{ dsc.exposedCA.url }} -s | openssl x509" changed_when: false register: exposed_ca_resource - tags: ['skip_ansible_lint'] + tags: ["skip_ansible_lint"] - name: Extract key ansible.builtin.set_fact: diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index 8cd04d22..8200dbe8 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -83,16 +83,16 @@ version: "{{ dsc.gitlab.chartVersion }}" values: "{{ gitlab_values }}" - #- name: Wait gitlab instance to be 'Running' - # kubernetes.core.k8s_info: - # api_version: apps.gitlab.com/v1beta1 - # kind: GitLab - # namespace: "{{ dsc.gitlab.namespace }}" - # name: gitlab - # register: gitlab_instance - # until: gitlab_instance.resources[0] is defined and gitlab_instance.resources[0].status is defined and gitlab_instance.resources[0].status.phase == 'Running' - # retries: 45 - # delay: 20 + # - name: Wait gitlab instance to be 'Running' + # kubernetes.core.k8s_info: + # api_version: apps.gitlab.com/v1beta1 + # kind: GitLab + # namespace: "{{ dsc.gitlab.namespace }}" + # name: gitlab + # register: gitlab_instance + # until: gitlab_instance.resources[0] is defined and gitlab_instance.resources[0].status is defined and gitlab_instance.resources[0].status.phase == 'Running' + # retries: 45 + # delay: 20 - name: Wait Gitlab webservice endpoint to be available kubernetes.core.k8s_info: diff --git a/roles/keycloak/tasks/main.yml b/roles/keycloak/tasks/main.yml index 2d00322d..79b714e4 100644 --- a/roles/keycloak/tasks/main.yml +++ b/roles/keycloak/tasks/main.yml @@ -232,9 +232,9 @@ state: present realm: dso credentials: - - temporary: false - type: password - value: "{{ admin_user_password }}" + - temporary: false + type: password + value: "{{ admin_user_password }}" username: admin@example.com first_name: Admin last_name: Admin @@ -273,8 +273,8 @@ attributes: include.in.token.scope: true display.on.consent.screen: true - gui.order: '' - consent.screen.text: '' + gui.order: "" + consent.screen.text: "" protocolMappers: "{{ lookup('ansible.builtin.file', 'generic-client-scope-protocolMappers.yaml') | from_yaml }}" - ansible.builtin.include_tasks: diff --git a/roles/logs/files/cluster-logging.yaml b/roles/logs/files/cluster-logging.yaml index a0a5cc54..ecf44b32 100644 --- a/roles/logs/files/cluster-logging.yaml +++ b/roles/logs/files/cluster-logging.yaml @@ -20,10 +20,10 @@ spec: storageClassName: "ocs-storagecluster-ceph-rbd" size: 50G resources: - limits: - memory: "4Gi" - requests: - memory: "4Gi" + limits: + memory: "4Gi" + requests: + memory: "4Gi" proxy: resources: limits: diff --git a/roles/logs/files/operator-subscription.yaml b/roles/logs/files/operator-subscription.yaml index 1bade09b..d7091667 100644 --- a/roles/logs/files/operator-subscription.yaml +++ b/roles/logs/files/operator-subscription.yaml @@ -21,12 +21,12 @@ spec: targetNamespaces: - openshift-logging --- -#apiVersion: operators.coreos.com/v1 -#kind: OperatorGroup -#metadata: +# apiVersion: operators.coreos.com/v1 +# kind: OperatorGroup +# metadata: # name: openshift-operators-redhat-group # namespace: openshift-operators-redhat -#--- +# --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: @@ -38,7 +38,7 @@ spec: name: cluster-logging source: redhat-operators sourceNamespace: openshift-marketplace - #startingCSV: cluster-logging.5.5.4 + # startingCSV: cluster-logging.5.5.4 --- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription @@ -51,6 +51,6 @@ spec: name: elasticsearch-operator source: redhat-operators sourceNamespace: openshift-marketplace - #startingCSV: elasticsearch-operator.5.5.4 + # startingCSV: elasticsearch-operator.5.5.4 diff --git a/roles/metrics/grafana/1-subscription.yaml b/roles/metrics/grafana/1-subscription.yaml index 3299d042..039795b7 100644 --- a/roles/metrics/grafana/1-subscription.yaml +++ b/roles/metrics/grafana/1-subscription.yaml @@ -9,4 +9,4 @@ spec: name: grafana-operator source: community-operators sourceNamespace: openshift-marketplace - #startingCSV: grafana-operator.v4.4.1 + # startingCSV: grafana-operator.v4.4.1 diff --git a/roles/metrics/grafana/datasource.yaml b/roles/metrics/grafana/datasource.yaml index eb02ab81..adfce340 100644 --- a/roles/metrics/grafana/datasource.yaml +++ b/roles/metrics/grafana/datasource.yaml @@ -6,14 +6,14 @@ metadata: spec: datasources: - access: proxy - #jsonData: + # jsonData: # tlsSkipVerify: true basicAuth: true basicAuthPassword: mysupersecretpasswd basicAuthUser: grafana-user - #isDefault: true + # isDefault: true editable: true name: prometheus-internal type: prometheus - url: 'https://prometheus-k8s.openshift-monitoring.svc:9091' + url: "https://prometheus-k8s.openshift-monitoring.svc:9091" name: prometheus-internal diff --git a/roles/metrics/grafana/grafana-instance.yaml b/roles/metrics/grafana/grafana-instance.yaml index d91f8a79..efc8eda3 100644 --- a/roles/metrics/grafana/grafana-instance.yaml +++ b/roles/metrics/grafana/grafana-instance.yaml @@ -18,5 +18,5 @@ spec: admin_user: admin-dso ingress: enabled: true - #previousServiceName: grafana-service + # previousServiceName: grafana-service diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index 108ced2f..3754d123 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -11,551 +11,551 @@ spec: listKind: DsoSocleConfigList plural: dso-socle-configs shortNames: - - dsc + - dsc singular: dso-socle-config scope: Cluster versions: - - name: v1alpha - schema: - openAPIV3Schema: - properties: - spec: - properties: - additionalsCA: - description: Additional CAs to inject into tools; the resources should - be available in all namespaces - items: - properties: - key: - description: CA Resource key, optional, if not set, all keys - will be imported - type: string - kind: - default: ConfigMap - description: CA Resource kind only ConfigMap and Secret are - supported - enum: - - ConfigMap - - Secret - type: string - name: - default: kube-root-ca.crt - description: CA Resource name - type: string - required: - - kind - - name - type: object - type: array - exposedCA: - description: Private CA cert needed to validate HTTPS traffic between tools. - type: object - required: - - type - properties: - configmap: - description: The configmap with private CA - type: object + - name: v1alpha + schema: + openAPIV3Schema: + properties: + spec: + properties: + additionalsCA: + description: Additional CAs to inject into tools; the resources should + be available in all namespaces + items: properties: - namespace: - description: The configmap namespace - type: string - name: - description: The configmap name - type: string - key: - description: The configmap key providing the Private CA cert + key: + description: CA Resource key, optional, if not set, all keys + will be imported type: string - required: - - namespace - - name - - key - secret: - description: The secret with private CA - type: object - properties: - namespace: - description: The secret namespace + kind: + default: ConfigMap + description: CA Resource kind only ConfigMap and Secret are + supported + enum: + - ConfigMap + - Secret type: string name: - description: The secret name - type: string - key: - description: The secret key providing the Private CA cert - type: string - required: - - namespace - - name - - key - url: - description: An URL providing the private CA cert (it should be plain text) - type: string - type: - description: | - Method to find the private CA cert: - - none: No private CA cert needed - - configmap: Private CA cert is stored as a configmap - - secret: Private CA cert is stored as a secret - - url: Private CA cert comes from an external URL - - certmanager: Private CA cert is managed by certmanager, please use ingress.tls.ca accordingly - type: string - enum: - - none - - configmap - - secret - - url - - certmanager - default: none - argocd: - description: Configuration for ArgoCD. - properties: - admin: - description: Configuration for the ArgoCD admin user. - properties: - # TO DO - enabled: - default: false - description: Specifies whether the ArgoCD admin user is enabled. - type: boolean - password: - description: The password for the ArgoCD admin user. + default: kube-root-ca.crt + description: CA Resource name type: string required: - - enabled - type: object - namespace: - default: dso-argocd - description: The namespace for ArgoCD. - type: string - subDomain: - default: argocd - description: The subdomain for ArgoCD. - type: string - chartVersion: - default: 4.7.13 - description: ArgoCD Bitnami helm chart version (e.g., "4.7.13"). - type: string - values: - description: | - You can merge customs values for argocd, it will be merged with roles/argocd/tasks/main.yaml - See https://github.com/bitnami/charts/tree/main/bitnami/argo-cd - type: object - default: {} - x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion - type: object - certmanager: - description: Configuration for Cert Manager. - properties: - version: - default: v1.11.0 - description: Specifies the version of Cert Manager to use. - type: string - required: - - version - type: object - cloudnativepg: - description: Configuration for CloudNativePG. - properties: - namespace: - default: dso-cloudnativepg - description: The namespace for cloudnativepg. - type: string - chartVersion: - default: 0.18.2 - description: CloudNativePG helm chart version (e.g., "0.18.2"). - type: string - required: - - chartVersion - - namespace - type: object - console: - description: Configuration for the console. - properties: - dbPassword: - description: The password for the console's database. - type: string - namespace: - default: dso-console - description: The namespace for console. - type: string - release: - default: 4.1.0 - description: Console version (e.g., "4.1.0"). - type: string - subDomain: - default: console - description: The subdomain for console. - type: string - required: - - dbPassword - type: object - gitlab: - description: Configuration for GitLab. - properties: - namespace: - default: dso-gitlab - description: The namespace for GitLab. - type: string - subDomain: - default: gitlab - description: The subdomain for GitLab. - type: string - chartVersion: - default: 6.11.10 - description: GitLab chart version (e.g., "6.11.10"). - type: string - values: - description: | - You can merge customs values for gitlab, it will be merged with roles/gitlab/templates/gitlab-instance.yaml.j2 - See https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/values.yaml - And https://docs.gitlab.com/charts/charts/globals.html + - kind + - name type: object - default: {} - x-kubernetes-preserve-unknown-fields: true - insecureCI: - description: | - If you use a private CA in exposed_ca, you should set it to true. - Configuring tools in pipelines container is not an easy job. - type: boolean - default: false - required: - - chartVersion - type: object - global: - description: Global configuration not specific to one service - properties: - environment: - default: production - description: | - Defines DSO environment type, i.e. development or production. - type: string - projectsRootDir: - default: - - forge - description: | - Defines root directory for projects in Gitlab and Vault - These values should NEVER be changed once a project is used ! - projects will not be migrated automatically - Represented as array of strings (ex: ['company', 'forge', 'projects']) - Cannot be an empty Array - items: - type: string - minItems: 1 - type: array - rootDomain: - description: The top level of your domain. To expose Argo as "argo.mycompany.com", - the value should be ".mycompany.com" (notice the leading dot). - type: string - default: .example.com - pattern: "^\\..*$" - required: - - projectsRootDir - - rootDomain - - environment - type: object - harbor: - description: Configuration for Harbor. - properties: - adminPassword: - description: Administrator password for Harbor. - type: string - namespace: - default: dso-harbor - description: The namespace for Harbor. - type: string - subDomain: - default: harbor - description: The subdomain for Harbor. - type: string - chartVersion: - default: 1.12.2 - description: Harbor helm chart version (e.g., "1.12.2"). - type: string - values: - description: | - You can merge customs values for harbor, it will be merged with roles/harbor/tasks/main.yaml - See https://github.com/goharbor/harbor-helm - type: object - default: {} - x-kubernetes-preserve-unknown-fields: true - required: - - adminPassword - - chartVersion - type: object - ingress: - description: General configuration for ingress. - properties: - annotations: - x-kubernetes-preserve-unknown-fields: true - default: {} - description: Additionals annotations to add to all tools' ingresses - type: object - labels: - x-kubernetes-preserve-unknown-fields: true - default: {} - description: Additionals labels to add to all tools' ingresses - type: object - tls: - description: TLS configuration for ingresses. - properties: - acme: - description: acme/let'sencrypt configuration, only http challenge - properties: - email: - description: User email used for ACME - type: string - environment: - description: | - Let's encrypt environment to use for issuing certificates: - - production : Use this value for production ready certificates. - Beware of rate limits. See: https://letsencrypt.org/docs/rate-limits/ - - staging : Use this value for testing purposes. It has significantly higher rate limits. - Beware of root certificates. See: https://letsencrypt.org/docs/staging-environment/ - enum: - - production - - staging - type: string - default: production - required: - - email - - environment - type: object - ca: - description: CA configuration, need a valid CA key/cert in - cert-manager namespace - properties: - secretName: - description: The TLS secret name available in cert-manager - namespace - type: string - required: - - secretName - type: object - tlsSecret: - description: Define the tls secret name to use in ingress - properties: - method: - description: | - How to retrieve the secret names: - - in-namespace: you are in charge to replicate the secret in tools' namespaces - enum: - - in-namespace - type: string - default: in-namespace - name: - description: tls secret name - type: string - required: + type: array + exposedCA: + description: Private CA cert needed to validate HTTPS traffic between tools. + type: object + required: + - type + properties: + configmap: + description: The configmap with private CA + type: object + properties: + namespace: + description: The configmap namespace + type: string + name: + description: The configmap name + type: string + key: + description: The configmap key providing the Private CA cert + type: string + required: + - namespace - name - - method - type: object - type: - default: none - description: "- none: no TLS (seems like a bad idea, unstable - deployment). \n- acme: TLS with HTTP ACME challenge -> https://cert-manager.io/docs/configuration/acme/http01/\n- - ca: TLS with custom CA -> https://cert-manager.io/docs/configuration/ca/\n- - tlsSecret: TLS with a custom TLS secret name specified in - ingress.spec.tls.secretName, should be wildcard or include - all hosts -> https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" - enum: + - key + secret: + description: The secret with private CA + type: object + properties: + namespace: + description: The secret namespace + type: string + name: + description: The secret name + type: string + key: + description: The secret key providing the Private CA cert + type: string + required: + - namespace + - name + - key + url: + description: An URL providing the private CA cert (it should be plain text) + type: string + type: + description: | + Method to find the private CA cert: + - none: No private CA cert needed + - configmap: Private CA cert is stored as a configmap + - secret: Private CA cert is stored as a secret + - url: Private CA cert comes from an external URL + - certmanager: Private CA cert is managed by certmanager, please use ingress.tls.ca accordingly + type: string + enum: - none - - acme - - ca - - tlsSecret + - configmap + - secret + - url + - certmanager + default: none + argocd: + description: Configuration for ArgoCD. + properties: + admin: + description: Configuration for the ArgoCD admin user. + properties: + # TO DO + enabled: + default: false + description: Specifies whether the ArgoCD admin user is enabled. + type: boolean + password: + description: The password for the ArgoCD admin user. + type: string + required: + - enabled + type: object + namespace: + default: dso-argocd + description: The namespace for ArgoCD. + type: string + subDomain: + default: argocd + description: The subdomain for ArgoCD. + type: string + chartVersion: + default: 4.7.13 + description: ArgoCD Bitnami helm chart version (e.g., "4.7.13"). + type: string + values: + description: | + You can merge customs values for argocd, it will be merged with roles/argocd/tasks/main.yaml + See https://github.com/bitnami/charts/tree/main/bitnami/argo-cd + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true + required: + - chartVersion + type: object + certmanager: + description: Configuration for Cert Manager. + properties: + version: + default: v1.11.0 + description: Specifies the version of Cert Manager to use. + type: string + required: + - version + type: object + cloudnativepg: + description: Configuration for CloudNativePG. + properties: + namespace: + default: dso-cloudnativepg + description: The namespace for cloudnativepg. + type: string + chartVersion: + default: 0.18.2 + description: CloudNativePG helm chart version (e.g., "0.18.2"). + type: string + required: + - chartVersion + - namespace + type: object + console: + description: Configuration for the console. + properties: + dbPassword: + description: The password for the console's database. + type: string + namespace: + default: dso-console + description: The namespace for console. + type: string + release: + default: 4.1.0 + description: Console version (e.g., "4.1.0"). + type: string + subDomain: + default: console + description: The subdomain for console. + type: string + required: + - dbPassword + type: object + gitlab: + description: Configuration for GitLab. + properties: + namespace: + default: dso-gitlab + description: The namespace for GitLab. + type: string + subDomain: + default: gitlab + description: The subdomain for GitLab. + type: string + chartVersion: + default: 6.11.10 + description: GitLab chart version (e.g., "6.11.10"). + type: string + values: + description: | + You can merge customs values for gitlab, it will be merged with roles/gitlab/templates/gitlab-instance.yaml.j2 + See https://gitlab.com/gitlab-org/charts/gitlab/-/blob/master/values.yaml + And https://docs.gitlab.com/charts/charts/globals.html + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true + insecureCI: + description: | + If you use a private CA in exposed_ca, you should set it to true. + Configuring tools in pipelines container is not an easy job. + type: boolean + default: false + required: + - chartVersion + type: object + global: + description: Global configuration not specific to one service + properties: + environment: + default: production + description: | + Defines DSO environment type, i.e. development or production. + type: string + projectsRootDir: + default: + - forge + description: | + Defines root directory for projects in Gitlab and Vault + These values should NEVER be changed once a project is used ! + projects will not be migrated automatically + Represented as array of strings (ex: ['company', 'forge', 'projects']) + Cannot be an empty Array + items: type: string - required: - - type - type: object - type: object - keycloak: - description: Configuration for Keycloak. - properties: - namespace: - default: dso-keycloak - description: The namespace for Keycloak. - type: string - subDomain: - default: keycloak - description: The subdomain for Keycloak. - type: string - chartVersion: - default: 16.0.3 - description: Keycloak chart version (e.g., "16.0.3"). - type: string - postgreSQLimageName: - default: "" - description: | - PostgreSQL image name that will be installed by CNPG operator. - By default, the operator will install the latest available minor version of the latest major version of PostgreSQL when the operator was released. - See : https://cloudnative-pg.io/documentation/1.20/quickstart/#part-3-deploy-a-postgresql-cluster - You can override this by setting the postgreSQLimageName here. - In example, you could set the value to "ghcr.io/cloudnative-pg/postgresql:14.9". - Remember you should NEVER use tags like "latest" or just "14" in a production environment. - More about container image requirements here : - https://cloudnative-pg.io/documentation/1.20/container_images/ - You can browse available image tags here : - https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql - type: string - values: - description: | - You can merge customs values for keycloak, they will be merged with roles/keycloak/templates/values.j2 - See https://github.com/bitnami/charts/tree/main/bitnami/keycloak - And https://github.com/bitnami/charts/blob/main/bitnami/keycloak/values.yaml - type: object - default: {} - x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion - type: object - kubed: - description: Configuration for Kubed (config-syncer). - properties: - chartVersion: - default: v0.13.2 - description: Kubed helm chart version (e.g., "v0.13.2"). - type: string - required: - - chartVersion - type: object - nexus: - description: Configuration for Nexus. - properties: - namespace: - default: dso-nexus - description: The namespace for Nexus. - type: string - subDomain: - default: nexus - description: The subdomain for Nexus. - type: string - storageSize: - description: | - The storage size for Nexus. - Must comply with Kubernetes size defnitions (i.e 100Gi). - type: string - default: 25Gi - imageTag: - default: 3.56.0 - description: Nexus version based on image tag (e.g., "3.56.0"). - type: string - required: - - storageSize - - imageTag - type: object - proxy: - description: Proxy configuration for tools. - properties: - enabled: - default: false - description: Enable or disable proxy on tools. - type: boolean - host: - description: Distant proxy ip/hostname - type: string - http_proxy: - description: URL for http traffic, (http://:/) - type: string - https_proxy: - description: URL for https traffic, (http://:/) - type: string - no_proxy: - default: .cluster.local,.svc,10.0.0.0/8,127.0.0.1,192.168.0.0/16,localhost,svc.cluster.local,localdomain - description: "Networks destination excluded by the proxy. Not - so easy to configure. \nExample: .cluster.local,.svc,10.0.0.0/8,127.0.0.1,192.168.0.0/16,localhost,svc.cluster.local,localdomain\n" - type: string - port: - default: "3128" - description: Distant proxy port listenning - type: string - required: - - enabled - type: object - sonarqube: - description: Configuration for SonarQube. - properties: - namespace: - default: dso-sonarqube - description: The namespace for SonarQube. - type: string - subDomain: - default: sonarqube - description: The subdomain for SonarQube. - type: string - imageTag: - default: 9.9-community - description: SonarQube version based on image tag (e.g., "9.9-community"). - type: string - required: - - imageTag - type: object - sops: - properties: - namespace: - default: dso-sops - description: Namespace for SOPS. - type: string - chartVersion: - default: 0.15.1 - description: SOPS helm chart version (e.g., "0.15.1"). - type: string - values: - description: | - You can merge customs values for sops, it will be merged with roles/sops/tasks/main.yaml - See https://github.com/isindir/sops-secrets-operator/tree/master/chart/helm3/sops-secrets-operator - type: object - default: {} - x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion - type: object - vault: - description: Configuration for Vault. - properties: - namespace: - default: dso-vault - description: The namespace for Vault. - type: string - subDomain: - default: vault - description: The subdomain for Vault. - type: string - chartVersion: - default: 0.25.0 - description: Hashicorp Vault helm chart version (e.g., "0.25.0"). - type: string - values: - description: | - You can merge customs values for vault, it will be merged with roles/vault/tasks/main.yaml - See https://github.com/hashicorp/vault-helm - type: object - default: {} - x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion - type: object - required: - - additionalsCA - - exposedCA - - ingress - - proxy - - certmanager - - cloudnativepg - - console - - sonarqube - - vault - - keycloak - - nexus - - harbor - - gitlab - - argocd - - sops - - global - type: object - required: - - spec - - metadata - type: object - served: true - storage: true \ No newline at end of file + minItems: 1 + type: array + rootDomain: + description: The top level of your domain. To expose Argo as "argo.mycompany.com", + the value should be ".mycompany.com" (notice the leading dot). + type: string + default: .example.com + pattern: "^\\..*$" + required: + - projectsRootDir + - rootDomain + - environment + type: object + harbor: + description: Configuration for Harbor. + properties: + adminPassword: + description: Administrator password for Harbor. + type: string + namespace: + default: dso-harbor + description: The namespace for Harbor. + type: string + subDomain: + default: harbor + description: The subdomain for Harbor. + type: string + chartVersion: + default: 1.12.2 + description: Harbor helm chart version (e.g., "1.12.2"). + type: string + values: + description: | + You can merge customs values for harbor, it will be merged with roles/harbor/tasks/main.yaml + See https://github.com/goharbor/harbor-helm + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true + required: + - adminPassword + - chartVersion + type: object + ingress: + description: General configuration for ingress. + properties: + annotations: + x-kubernetes-preserve-unknown-fields: true + default: {} + description: Additionals annotations to add to all tools' ingresses + type: object + labels: + x-kubernetes-preserve-unknown-fields: true + default: {} + description: Additionals labels to add to all tools' ingresses + type: object + tls: + description: TLS configuration for ingresses. + properties: + acme: + description: acme/let'sencrypt configuration, only http challenge + properties: + email: + description: User email used for ACME + type: string + environment: + description: | + Let's encrypt environment to use for issuing certificates: + - production : Use this value for production ready certificates. + Beware of rate limits. See: https://letsencrypt.org/docs/rate-limits/ + - staging : Use this value for testing purposes. It has significantly higher rate limits. + Beware of root certificates. See: https://letsencrypt.org/docs/staging-environment/ + enum: + - production + - staging + type: string + default: production + required: + - email + - environment + type: object + ca: + description: CA configuration, need a valid CA key/cert in + cert-manager namespace + properties: + secretName: + description: The TLS secret name available in cert-manager + namespace + type: string + required: + - secretName + type: object + tlsSecret: + description: Define the tls secret name to use in ingress + properties: + method: + description: | + How to retrieve the secret names: + - in-namespace: you are in charge to replicate the secret in tools' namespaces + enum: + - in-namespace + type: string + default: in-namespace + name: + description: tls secret name + type: string + required: + - name + - method + type: object + type: + default: none + description: "- none: no TLS (seems like a bad idea, unstable + deployment). \n- acme: TLS with HTTP ACME challenge -> https://cert-manager.io/docs/configuration/acme/http01/\n- + ca: TLS with custom CA -> https://cert-manager.io/docs/configuration/ca/\n- + tlsSecret: TLS with a custom TLS secret name specified in + ingress.spec.tls.secretName, should be wildcard or include + all hosts -> https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets\n" + enum: + - none + - acme + - ca + - tlsSecret + type: string + required: + - type + type: object + type: object + keycloak: + description: Configuration for Keycloak. + properties: + namespace: + default: dso-keycloak + description: The namespace for Keycloak. + type: string + subDomain: + default: keycloak + description: The subdomain for Keycloak. + type: string + chartVersion: + default: 16.0.3 + description: Keycloak chart version (e.g., "16.0.3"). + type: string + postgreSQLimageName: + default: "" + description: | + PostgreSQL image name that will be installed by CNPG operator. + By default, the operator will install the latest available minor version of the latest major version of PostgreSQL when the operator was released. + See : https://cloudnative-pg.io/documentation/1.20/quickstart/#part-3-deploy-a-postgresql-cluster + You can override this by setting the postgreSQLimageName here. + In example, you could set the value to "ghcr.io/cloudnative-pg/postgresql:14.9". + Remember you should NEVER use tags like "latest" or just "14" in a production environment. + More about container image requirements here : + https://cloudnative-pg.io/documentation/1.20/container_images/ + You can browse available image tags here : + https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql + type: string + values: + description: | + You can merge customs values for keycloak, they will be merged with roles/keycloak/templates/values.j2 + See https://github.com/bitnami/charts/tree/main/bitnami/keycloak + And https://github.com/bitnami/charts/blob/main/bitnami/keycloak/values.yaml + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true + required: + - chartVersion + type: object + kubed: + description: Configuration for Kubed (config-syncer). + properties: + chartVersion: + default: v0.13.2 + description: Kubed helm chart version (e.g., "v0.13.2"). + type: string + required: + - chartVersion + type: object + nexus: + description: Configuration for Nexus. + properties: + namespace: + default: dso-nexus + description: The namespace for Nexus. + type: string + subDomain: + default: nexus + description: The subdomain for Nexus. + type: string + storageSize: + description: | + The storage size for Nexus. + Must comply with Kubernetes size defnitions (i.e 100Gi). + type: string + default: 25Gi + imageTag: + default: 3.56.0 + description: Nexus version based on image tag (e.g., "3.56.0"). + type: string + required: + - storageSize + - imageTag + type: object + proxy: + description: Proxy configuration for tools. + properties: + enabled: + default: false + description: Enable or disable proxy on tools. + type: boolean + host: + description: Distant proxy ip/hostname + type: string + http_proxy: + description: URL for http traffic, (http://:/) + type: string + https_proxy: + description: URL for https traffic, (http://:/) + type: string + no_proxy: + default: .cluster.local,.svc,10.0.0.0/8,127.0.0.1,192.168.0.0/16,localhost,svc.cluster.local,localdomain + description: "Networks destination excluded by the proxy. Not + so easy to configure. \nExample: .cluster.local,.svc,10.0.0.0/8,127.0.0.1,192.168.0.0/16,localhost,svc.cluster.local,localdomain\n" + type: string + port: + default: "3128" + description: Distant proxy port listenning + type: string + required: + - enabled + type: object + sonarqube: + description: Configuration for SonarQube. + properties: + namespace: + default: dso-sonarqube + description: The namespace for SonarQube. + type: string + subDomain: + default: sonarqube + description: The subdomain for SonarQube. + type: string + imageTag: + default: 9.9-community + description: SonarQube version based on image tag (e.g., "9.9-community"). + type: string + required: + - imageTag + type: object + sops: + properties: + namespace: + default: dso-sops + description: Namespace for SOPS. + type: string + chartVersion: + default: 0.15.1 + description: SOPS helm chart version (e.g., "0.15.1"). + type: string + values: + description: | + You can merge customs values for sops, it will be merged with roles/sops/tasks/main.yaml + See https://github.com/isindir/sops-secrets-operator/tree/master/chart/helm3/sops-secrets-operator + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true + required: + - chartVersion + type: object + vault: + description: Configuration for Vault. + properties: + namespace: + default: dso-vault + description: The namespace for Vault. + type: string + subDomain: + default: vault + description: The subdomain for Vault. + type: string + chartVersion: + default: 0.25.0 + description: Hashicorp Vault helm chart version (e.g., "0.25.0"). + type: string + values: + description: | + You can merge customs values for vault, it will be merged with roles/vault/tasks/main.yaml + See https://github.com/hashicorp/vault-helm + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true + required: + - chartVersion + type: object + required: + - additionalsCA + - exposedCA + - ingress + - proxy + - certmanager + - cloudnativepg + - console + - sonarqube + - vault + - keycloak + - nexus + - harbor + - gitlab + - argocd + - sops + - global + type: object + required: + - spec + - metadata + type: object + served: true + storage: true \ No newline at end of file diff --git a/roles/socle-config/tasks/main.yaml b/roles/socle-config/tasks/main.yaml index a5ab5301..2b22fc1a 100644 --- a/roles/socle-config/tasks/main.yaml +++ b/roles/socle-config/tasks/main.yaml @@ -7,7 +7,7 @@ kind: dsc name: conf-dso api_version: cloud-pi-native.fr/v1alpha -# register: socle_config_default + # register: socle_config_default register: socle_config - name: Get socle config from dsc_cr extra var when defined diff --git a/roles/sonarqube/tasks/main.yaml b/roles/sonarqube/tasks/main.yaml index 9d1b86af..e30c3e5d 100644 --- a/roles/sonarqube/tasks/main.yaml +++ b/roles/sonarqube/tasks/main.yaml @@ -93,7 +93,7 @@ - name: Generate random password ansible.builtin.set_fact: - admin: '{{ admin_account.stdout | from_json }}' + admin: "{{ admin_account.stdout | from_json }}" token_pass: "{{ lookup('password', '/dev/null length=32 chars=ascii_letters') }}" - name: Get sha384sum of token From 8a947e48b6706571927619deb9dbd90cdb5c8098 Mon Sep 17 00:00:00 2001 From: this-is-tobi Date: Tue, 26 Sep 2023 01:30:07 +0200 Subject: [PATCH 03/31] fix: :bug: pass proxy values to console --- roles/console-dso/templates/app.yaml.j2 | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/roles/console-dso/templates/app.yaml.j2 b/roles/console-dso/templates/app.yaml.j2 index 63d58775..bc7912bc 100644 --- a/roles/console-dso/templates/app.yaml.j2 +++ b/roles/console-dso/templates/app.yaml.j2 @@ -39,6 +39,14 @@ spec: value: "{{ keycloak_domain }}" - name: keycloak.realm value: dso +{% if dsc.proxy.enabled %} + - name: server.container.env.HTTP_PROXY + value: {{ dsc.proxy.http_proxy }} + - name: server.container.env.HTTPS_PROXY + value: {{ dsc.proxy.https_proxy }} + - name: server.container.env.NO_PROXY + value: {{ dsc.proxy.no_proxy }} +{% endif %} {% if dsc.exposedCA.type != 'none' %} - name: server.extraCa.name value: bundle From a5515f20e2f29dcce2f7ad50004d2e6867a64168 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Thu, 31 Aug 2023 10:46:03 +0200 Subject: [PATCH 04/31] feat: :sparkles: installation via helm chart officiel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit https://github.com/SonarSource/helm-chart-sonarqube refactor de l'installation via helm chart, utilisation de l'opérateur CNPG pour la BDD, mise à jour CRD. docs: :memo: mise à jour du README sur partie SonarQube refactor: :fire: mise à jour des values PostgreSQL feat: :art: creation token selon branches de SonarQube adaptation de la creation du token liée à évolution BDD + suppression déclaration de PV inutiles --- README.md | 145 ++++++++++-- .../files/cr-conf-dso-default.yaml | 16 +- roles/socle-config/files/crd-conf-dso.yaml | 189 +++++++++------- roles/sonarqube/tasks/main.yaml | 208 +++++++++++++----- .../templates/pg-cluster-sonar.yaml.j2 | 35 +++ .../templates/postgres-deployment.yaml.j2 | 50 ----- .../templates/postgres-pv-claim.yaml.j2 | 11 - .../templates/postgres-service.yaml.j2 | 13 -- .../templates/sonar-deployment.yaml.j2 | 71 ------ .../sonarqube/templates/sonar-ingress.yaml.j2 | 35 --- .../templates/sonar-pv-claim.yaml.j2 | 11 - .../sonarqube/templates/sonar-service.yaml.j2 | 15 -- roles/sonarqube/templates/values.yaml.j2 | 162 ++++++++++++++ 13 files changed, 596 insertions(+), 365 deletions(-) create mode 100644 roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 delete mode 100644 roles/sonarqube/templates/postgres-deployment.yaml.j2 delete mode 100644 roles/sonarqube/templates/postgres-pv-claim.yaml.j2 delete mode 100644 roles/sonarqube/templates/postgres-service.yaml.j2 delete mode 100644 roles/sonarqube/templates/sonar-deployment.yaml.j2 delete mode 100644 roles/sonarqube/templates/sonar-ingress.yaml.j2 delete mode 100644 roles/sonarqube/templates/sonar-pv-claim.yaml.j2 delete mode 100644 roles/sonarqube/templates/sonar-service.yaml.j2 create mode 100644 roles/sonarqube/templates/values.yaml.j2 diff --git a/README.md b/README.md index 3716989c..2e6bbf46 100644 --- a/README.md +++ b/README.md @@ -35,6 +35,8 @@ - [Kubed (config-syncer)](#kubed-config-syncer) - [Sonatype Nexus Repository](#sonatype-nexus-repository) - [SonarQube Community Edition](#sonarqube-community-edition) + - [Gel de l'image SonarQube](#gel-de-limage-sonarqube) + - [Gel de l'image PostgreSQL pour SonarQube](#gel-de-limage-postgresql-pour-sonarqube) - [SOPS](#sops) - [Gel de l'image](#gel-de-limage-2) - [Vault](#vault) @@ -174,7 +176,7 @@ spec: certmanager: version: v1.11.0 cloudnativepg: - namespace: mynamespace-cloudnativepg + namespace: cnpg-system chartVersion: 0.18.2 console: dbPassword: AnotherPassBitesTheDust @@ -285,9 +287,15 @@ spec: no_proxy: .cluster.local,.svc,10.0.0.0/8,127.0.0.1,192.168.0.0/16,api.example.com,api-int.example.com,canary-openshift-ingress-canary.apps.example.com,console-openshift-console.apps.example.com,localhost,oauth-openshift.apps.example.com,svc.cluster.local,localdomain port: "3128" sonarqube: - namespace: mynamespace-sonarqube - subDomain: sonarqube - imageTag: 9.9-community + chartVersion: 3.3.0 + namespace: mynamespace-sonar + postgreSQLimageName: ghcr.io/cloudnative-pg/postgresql:15.4 + subDomain: sonar + values: + image: + registry: docker.io + repository: bitnami/sonarqube + tag: 9.9.1-debian-11-r101 sops: namespace: mynamespace-sops chartVersion: "0.15.1" @@ -333,6 +341,7 @@ Voici les liens vers les documentations de chart helm pour les outils concernés - [GitLab](https://docs.gitlab.com/charts) - [Harbor](https://github.com/goharbor/harbor-helm) - [Keycloak](https://github.com/bitnami/charts/tree/main/bitnami/keycloak) +- [SonarQube](https://github.com/bitnami/charts/tree/main/bitnami/sonarqube) - [SOPS](https://github.com/isindir/sops-secrets-operator/tree/master/chart/helm3/sops-secrets-operator) - [HashiCorp Vault](https://github.com/hashicorp/vault-helm) @@ -1268,36 +1277,144 @@ Et relancer l'installation de nexus, laquelle procédera à la mise à jour de v ```bash ansible-playbook install.yaml -t nexus ``` - ### SonarQube Community Edition -Le composant sonarqube est installé directement via le manifest de deployment "sonar-deployment.yaml.j2" intégré au role associé. +Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par défaut sans modification, le rôle sonarqube déploiera la dernière version du [chart helm Bitnami SonarQube](https://bitnami.com/stack/sonarqube/helm) disponible dans le cache des dépôts helm de l'utilisateur. -Si vous utilisez la `dsc` par défaut nommée `conf-dso` c'est l'image "9.9-community" qui sera déployée. +Ceci est lié au fait que le paramètre de configuration `chartVersion` de SonarQube, présent dans la `dsc` par défaut `conf-dso`, est laissé vide (`chartVersion: ""`). -Les tags d'images utilisables pour l'édition community sont disponibles ici : +Pour connaître la dernière version du chart helm et de l'application actuellement disponibles dans votre cache local, utilisez la commande suivante : -Pour déployer une autre version, il suffira d'éditer la `dsc`, de préférence avec le fichier YAML que vous avez initialement utilisé pendant l'installation, puis modifier la section suivante en y indiquant la version d'image désirée au niveau du paramètre **imageTag**. Exemple : +```bash +helm search repo bitnami/sonarqube +``` + +Exemple de sortie avec un cache de dépôts qui n'est pas à jour : + +``` +NAME CHART VERSION APP VERSION DESCRIPTION +bitnami/sonarqube 3.2.8 10.1.0 SonarQube(TM) is an open source quality managem... +``` + +Pour mettre à jour votre cache de dépôts helm, et obtenir ainsi la dernière version du chart et de l'application : + +```bash +helm repo update +``` + +Relancer immédiatement la commande de recherche : + +```bash +helm search repo bitnami/sonarqube +``` + +Si votre cache n'était pas déjà à jour, la sortie doit alors vous indiquer des versions plus récentes. + +Pour connaître la liste des versions de charts helm de SonarQube que vous pouvez maintenant installer, utilisez la commande suivante : + +```bash +helm search repo -l bitnami/sonarqube +``` + +Si vous souhaitez fixer la version du chart helm, et donc celle de SonarQube, il vous suffira de relever le **numéro de version du chart** désiré, puis l'indiquer dans votre ressource `dsc` de configuration. + +Par exemple, si vous utilisez la `dsc` par défaut nommée `conf-dso`, vous pourrez éditer le fichier YAML que vous aviez utilisé pour la paramétrer lors de l'installation, puis adapter la section suivante en y spécifiant le numéro souhaité au niveau du paramètre **chartVersion**. Exemple : ```yaml sonarqube: - namespace: mynamespace-sonarqube - subDomain: sonarqube - imageTag: 9.9.1-community + chartVersion: 3.3.0 + namespace: mynamespace-sonar + subDomain: sonar ``` -Puis appliquer le changement de configuration, exemple : +Il vous suffit alors de mettre à jour votre configuration, exemple : + +```bash +kubectl apply -f ma-conf-dso.yaml +``` + +Puis de relancer l'installation de SonarQube, laquelle mettra à jour la version du chart et l'image associée, sans coupure de service : + +```bash +ansible-playbook install.yaml -t sonarqube +``` +#### Gel de l'image SonarQube + +En complément de l'usage du paramètre `chartVersion`, il est également possible de fixer la version d'image de SonarQube de façon plus fine, en utilisant un tag dit "[immutable](https://docs.bitnami.com/kubernetes/apps/sonarqube/configuration/understand-rolling-immutable-tags/)" (**recommandé en production**). + +Les différents tags utilisables pour l'image de SonarQube sont disponibles ici : https://hub.docker.com/r/bitnami/sonarqube/tags + +Les tags dits "immutables" sont ceux qui possèdent un suffixe de type rXXX, lequel correspond au numéro de révision. Ils pointent toujours vers la même image. Par exemple le tag "9.9.1-debian-11-r101" est un tag immutable. + +Pour spécifier un tel tag, il nous suffira d'éditer la ressource `dsc` de configuration (par défaut ce sera la `dsc` nommée `conf-dso`) et de surcharger les "values" correspondantes du chart helm, en ajoutant celles dont nous avons besoin. Exemple : + +```yaml + sonarqube: + chartVersion: 3.3.0 + namespace: mynamespace-sonar + subDomain: sonar + values: + image: + registry: docker.io + repository: bitnami/sonarqube + tag: 9.9.1-debian-11-r101 +``` + +Appliquer le changement en utilisant votre fichier de définition, exemple : ```bash kubectl apply -f ma-conf-dso.yaml ``` -Et relancer l'installation de sonarqube, laquelle procédera à la mise à jour de version **avec coupure de service** : +Puis relancer l'installation avec le tag `sonarqube` pour procéder au remplacement par l'image spécifiée, sans coupure de service : ```bash ansible-playbook install.yaml -t sonarqube ``` +Pour mémoire, les values utilisables sont disponibles ici : https://github.com/bitnami/charts/blob/main/bitnami/sonarqube/values.yaml +#### Gel de l'image PostgreSQL pour SonarQube + +Tel qu'il est déployé, SonarQube s'appuie sur un cluster de base de donnée PostgreSQL géré par l'opérateur CloudNativePG. + +Comme indiqué dans sa [documentation officielle](https://cloudnative-pg.io/documentation/1.20/quickstart/#part-3-deploy-a-postgresql-cluster), par défaut CloudNativePG installera la dernière version mineure disponible de la dernière version majeure de PostgreSQL au moment de la publication de l'opérateur. + +De plus, comme l'indique la [FAQ officielle](https://cloudnative-pg.io/documentation/1.20/faq/), CloudNativePG utilise des conteneurs d'application immutables. Cela signifie que le conteneur ne sera pas modifié durant tout son cycle de vie (aucun patch, aucune mise à jour ni changement de configuration). + +Il est toutefois possible et **recommandé en production** de fixer la version d'image de BDD pour SonarQube. + +Pour cela, nous utiliserons l'un des tags d'image immutables proposés par CloudNativePG. + +Les tags en question sont disponibles ici : https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql + +Pour spécifier un tel tag, il nous suffira d'éditer la ressource `dsc` de configuration (par défaut ce sera la `dsc` nommée `conf-dso`) et d'indiquer le tag souhaité au niveau du paramètre `postgreSQLimageName`. Exemple : + +```yaml + sonarqube: + chartVersion: 3.3.0 + namespace: mynamespace-sonar + postgreSQLimageName: ghcr.io/cloudnative-pg/postgresql:15.4 + subDomain: sonar + values: + image: + registry: docker.io + repository: bitnami/sonarqube + tag: 9.9.1-debian-11-r101 +``` + +**Attention !** : Comme indiqué dans la [documentation officielle de CloudNativePG](https://cloudnative-pg.io/documentation/1.20/quickstart/#part-3-deploy-a-postgresql-cluster) il ne faudra **jamais** utiliser en production de tag tel que `latest` ou juste `15` (sans numéro de version mineure). + +Appliquer le changement en utilisant votre fichier de définition, exemple : + +```bash +kubectl apply -f ma-conf-dso.yaml +``` + +Puis relancer l'installation avec le tag `sonarqube` pour procéder au remplacement par l'image spécifiée, sans coupure de service : + +```bash +ansible-playbook install.yaml -t sonarqube +``` ### SOPS Tel qu'il est conçu, et s'il est utilisé avec la `dsc` de configuration par défaut sans modification, le rôle sops déploiera la dernière version du [chart helm SOPS](https://github.com/isindir/sops-secrets-operator) disponible dans le cache des dépôts helm de l'utilisateur. diff --git a/roles/socle-config/files/cr-conf-dso-default.yaml b/roles/socle-config/files/cr-conf-dso-default.yaml index 273493bc..cc1ae99b 100644 --- a/roles/socle-config/files/cr-conf-dso-default.yaml +++ b/roles/socle-config/files/cr-conf-dso-default.yaml @@ -11,18 +11,18 @@ spec: argocd: chartVersion: "" certmanager: - version: v1.11.0 + version: "" cloudnativepg: chartVersion: "" + namespace: "" console: dbPassword: myAwesomePassword gitlab: - chartVersion: "6.11.10" + chartVersion: "" global: - projectsRootDir: - - forge - rootDomain: - .example.com + environment: "" + projectsRootDir: [] + rootDomain: "" harbor: adminPassword: anotherGreatPassword chartVersion: "" @@ -32,12 +32,12 @@ spec: kubed: chartVersion: "" nexus: - storageSize: 25Gi + storageSize: "" imageTag: "" proxy: enabled: false sonarqube: - imageTag: "" + chartVersion: "" sops: chartVersion: "" vault: diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index 3754d123..3b328abb 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -47,65 +47,6 @@ spec: - name type: object type: array - exposedCA: - description: Private CA cert needed to validate HTTPS traffic between tools. - type: object - required: - - type - properties: - configmap: - description: The configmap with private CA - type: object - properties: - namespace: - description: The configmap namespace - type: string - name: - description: The configmap name - type: string - key: - description: The configmap key providing the Private CA cert - type: string - required: - - namespace - - name - - key - secret: - description: The secret with private CA - type: object - properties: - namespace: - description: The secret namespace - type: string - name: - description: The secret name - type: string - key: - description: The secret key providing the Private CA cert - type: string - required: - - namespace - - name - - key - url: - description: An URL providing the private CA cert (it should be plain text) - type: string - type: - description: | - Method to find the private CA cert: - - none: No private CA cert needed - - configmap: Private CA cert is stored as a configmap - - secret: Private CA cert is stored as a secret - - url: Private CA cert comes from an external URL - - certmanager: Private CA cert is managed by certmanager, please use ingress.tls.ca accordingly - type: string - enum: - - none - - configmap - - secret - - url - - certmanager - default: none argocd: description: Configuration for ArgoCD. properties: @@ -149,7 +90,7 @@ spec: description: Configuration for Cert Manager. properties: version: - default: v1.11.0 + default: v1.11.1 description: Specifies the version of Cert Manager to use. type: string required: @@ -159,7 +100,7 @@ spec: description: Configuration for CloudNativePG. properties: namespace: - default: dso-cloudnativepg + default: cnpg-system description: The namespace for cloudnativepg. type: string chartVersion: @@ -181,8 +122,8 @@ spec: description: The namespace for console. type: string release: - default: 4.1.0 - description: Console version (e.g., "4.1.0"). + default: 5.6.0 + description: Console version (e.g., "5.6.0"). type: string subDomain: default: console @@ -191,6 +132,65 @@ spec: required: - dbPassword type: object + exposedCA: + description: Private CA cert needed to validate HTTPS traffic between tools. + type: object + required: + - type + properties: + configmap: + description: The configmap with private CA + type: object + properties: + namespace: + description: The configmap namespace + type: string + name: + description: The configmap name + type: string + key: + description: The configmap key providing the Private CA cert + type: string + required: + - namespace + - name + - key + secret: + description: The secret with private CA + type: object + properties: + namespace: + description: The secret namespace + type: string + name: + description: The secret name + type: string + key: + description: The secret key providing the Private CA cert + type: string + required: + - namespace + - name + - key + url: + description: An URL providing the private CA cert (it should be plain text) + type: string + type: + description: | + Method to find the private CA cert: + - none: No private CA cert needed + - configmap: Private CA cert is stored as a configmap + - secret: Private CA cert is stored as a secret + - url: Private CA cert comes from an external URL + - certmanager: Private CA cert is managed by certmanager, please use ingress.tls.ca accordingly + type: string + enum: + - none + - configmap + - secret + - url + - certmanager + default: none gitlab: description: Configuration for GitLab. properties: @@ -203,8 +203,8 @@ spec: description: The subdomain for GitLab. type: string chartVersion: - default: 6.11.10 - description: GitLab chart version (e.g., "6.11.10"). + default: 7.0.8 + description: GitLab chart version (e.g., "7.0.8"). type: string values: description: | @@ -433,7 +433,7 @@ spec: storageSize: description: | The storage size for Nexus. - Must comply with Kubernetes size defnitions (i.e 100Gi). + Must comply with Kubernetes size definitions (i.e 100Gi). type: string default: 25Gi imageTag: @@ -483,12 +483,40 @@ spec: default: sonarqube description: The subdomain for SonarQube. type: string - imageTag: - default: 9.9-community - description: SonarQube version based on image tag (e.g., "9.9-community"). + chartVersion: + default: 3.2.10 + description: SonarQube Bitnami helm chart version (e.g., "3.2.10"). type: string + postgreSQLimageName: + default: "" + description: | + PostgreSQL image name that will be installed by CNPG operator. + By default, the operator will install the latest available minor version of the latest major version of PostgreSQL when the operator was released. + See : https://cloudnative-pg.io/documentation/1.20/quickstart/#part-3-deploy-a-postgresql-cluster + You can override this by setting the postgreSQLimageName here. + In example, you could set the value to "ghcr.io/cloudnative-pg/postgresql:14.9". + Remember you should NEVER use tags like "latest" or just "14" in a production environment. + More about container image requirements here : + https://cloudnative-pg.io/documentation/1.20/container_images/ + You can browse available image tags here : + https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql + type: string + postgreSQLvolumeSize: + description: | + The storage size for SonarQube PostgreSQL PVC. + Must comply with Kubernetes size definitions (i.e 25Gi). + type: string + default: 25Gi + values: + description: | + You can merge customs values for sonarqube, it will be merged with roles/sonarqube/tasks/main.yaml + See https://github.com/bitnami/charts/tree/main/bitnami/sonarqube + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true required: - - imageTag + - chartVersion + - postgreSQLvolumeSize type: object sops: properties: @@ -537,25 +565,26 @@ spec: type: object required: - additionalsCA - - exposedCA - - ingress - - proxy + - argocd - certmanager - cloudnativepg - console - - sonarqube - - vault + - exposedCA + - gitlab + - global + - harbor + - ingress - keycloak + - kubed - nexus - - harbor - - gitlab - - argocd + - proxy + - sonarqube - sops - - global + - vault type: object required: - spec - metadata type: object served: true - storage: true \ No newline at end of file + storage: true diff --git a/roles/sonarqube/tasks/main.yaml b/roles/sonarqube/tasks/main.yaml index e30c3e5d..db812886 100644 --- a/roles/sonarqube/tasks/main.yaml +++ b/roles/sonarqube/tasks/main.yaml @@ -3,73 +3,136 @@ kind: Namespace name: "{{ dsc.sonarqube.namespace }}" -- name: Get dso-postgres-secret +- name: Create PostgreSQL cluster and sonar database + kubernetes.core.k8s: + template: pg-cluster-sonar.yaml.j2 + +- name: Wait pg-cluster-sonar-rw endpoint + kubernetes.core.k8s_info: + kind: Endpoints + namespace: "{{ dsc.sonarqube.namespace }}" + name: pg-cluster-sonar-rw + register: endpoint + until: endpoint.resources[0].subsets[0].addresses[0] is defined + retries: 30 + delay: 5 + +- name: Wait job.batch/pg-cluster-sonar-1-initdb to be terminated + kubernetes.core.k8s_info: + kind: Job + api_version: batch/v1 + namespace: "{{ dsc.sonarqube.namespace }}" + name: pg-cluster-sonar-1-initdb + register: job1 + until: job1.resources | length == 0 + retries: 30 + delay: 5 + +- name: Add SonarQube helm repo + kubernetes.core.helm_repository: + name: sonarqube + repo_url: https://SonarSource.github.io/helm-chart-sonarqube + +- name: Get admin password secret kubernetes.core.k8s_info: kind: Secret - name: dso-postgres-secret namespace: "{{ dsc.sonarqube.namespace }}" - register: dso_postgres_secret + name: sonarqube + register: admin_password_secret -- name: Create dso-postgres-secret values - when: dso_postgres_secret.resources | length == 0 - block: - - name: Set postgres login - ansible.builtin.set_fact: - postgres_admin_user: admin-dso - postgres_admin_password: "{{ lookup('password', '/dev/null length=32 chars=ascii_letters') }}" +- name: Set admin password secret + when: admin_password_secret.resources | length == 0 + no_log: true + kubernetes.core.k8s: + state: present + definition: + apiVersion: v1 + kind: Secret + data: + password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits')|b64encode }}" + currentPassword: "{{ 'admin'|b64encode }}" + metadata: + name: sonarqube + namespace: "{{ dsc.sonarqube.namespace }}" + type: Opaque - - name: Create dso-postgres-secret - kubernetes.core.k8s: - definition: - kind: Secret - metadata: - name: dso-postgres-secret - namespace: "{{ dsc.sonarqube.namespace }}" - data: - POSTGRES_ADMIN_USER: "{{ postgres_admin_user | b64encode }}" - POSTGRES_ADMIN_PASSWORD: "{{ postgres_admin_password | b64encode }}" +- name: Get SonarQube monitoring password secret + no_log: true + kubernetes.core.k8s_info: + namespace: "{{ dsc.sonarqube.namespace }}" + kind: Secret + name: "sonar-monitoring-password" + register: sonar_monitoring_secret -- name: Install sonar +- name: Set SonarQube monitoring password secret + when: sonar_monitoring_secret.resources | length == 0 + no_log: true kubernetes.core.k8s: - template: "{{ item }}" - with_items: - - postgres-pv-claim.yaml.j2 - - postgres-deployment.yaml.j2 - - postgres-service.yaml.j2 - - sonar-pv-claim.yaml.j2 - - sonar-deployment.yaml.j2 - - sonar-service.yaml.j2 - - sonar-ingress.yaml.j2 - -- name: Find Sonarqube admin password + state: present + definition: + apiVersion: v1 + kind: Secret + data: + monitoring-password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits')|b64encode }}" + metadata: + name: sonar-monitoring-password + namespace: "{{ dsc.sonarqube.namespace }}" + type: Opaque + +- name: Set SonarQube helm values + ansible.builtin.set_fact: + sonar_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" + +- name: Merge with sonarqube user values + ansible.builtin.set_fact: + sonar_values: "{{ sonar_values | combine(dsc.sonarqube['values'], recursive=True) }}" + +- name: Deploy helm + kubernetes.core.helm: + # force: true + name: sonarqube + chart_ref: sonarqube/sonarqube + chart_version: "{{ dsc.sonarqube.chartVersion }}" + release_namespace: "{{ dsc.sonarqube.namespace }}" + create_namespace: true + values: "{{ sonar_values }}" + +- name: Wait sonarqube endpoint to initialize + kubernetes.core.k8s_info: + kind: Endpoints + namespace: "{{ dsc.sonarqube.namespace }}" + name: sonarqube-sonarqube + register: endpoint + until: endpoint.resources[0].subsets[0].addresses[0].ip is defined + retries: 45 + delay: 5 + +- name: Get DSO Console inventory kubernetes.core.k8s_info: namespace: "{{ dsc.console.namespace }}" kind: ConfigMap name: dso-config register: ansible_inventory -- name: Reset admin password +- name: Get SonarQube version + ansible.builtin.uri: + url: "https://{{ sonar_domain }}/api/server/version" + method: GET + return_content: true + register: sonar_version + +- name: Reset admin token when: ansible_inventory.resources[0].data.SONAR_API_TOKEN is undefined block: - name: Missing Token disclaimer ansible.builtin.debug: msg: "Impossible de retrouver le TOKEN du compte admin, initialisation en cours …" - - name: Wait sonar endpoint to initialize + - name: Get postgres endpoint kubernetes.core.k8s_info: kind: Endpoints namespace: "{{ dsc.sonarqube.namespace }}" - name: sonar - register: endpoint - until: endpoint.resources[0].subsets is defined and endpoint.resources[0].subsets | selectattr('addresses') - retries: 30 - delay: 5 - - - name: Wait postgres endpoint to initialize - kubernetes.core.k8s_info: - kind: Endpoints - namespace: "{{ dsc.sonarqube.namespace }}" - name: postgres + name: pg-cluster-sonar-rw register: pg_ep until: pg_ep.resources[0].subsets[0].addresses[0] is defined retries: 15 @@ -79,22 +142,37 @@ ansible.builtin.set_fact: pg_pod: "{{ pg_ep.resources[0].subsets[0].addresses[0].targetRef.name }}" + - name: Get pg-cluster-sonar-app secret + kubernetes.core.k8s_info: + namespace: "{{ dsc.sonarqube.namespace }}" + kind: Secret + name: pg-cluster-sonar-app + register: pg_admin_secret + + - name: Set pg admin facts + ansible.builtin.set_fact: + pg_admin: "{{ pg_admin_secret.resources[0].data.username|b64decode }}" + pg_admin_pass: "{{ pg_admin_secret.resources[0].data.password|b64decode }}" + no_log: true + - name: Get admin account kubernetes.core.k8s_exec: pod: "{{ pg_pod }}" namespace: "{{ dsc.sonarqube.namespace }}" - command: psql -d sonardb -U dso_admin -c "{{ query }}" -t + command: psql postgresql://"{{ pg_admin }}":"{{ pg_admin_pass }}"@127.0.0.1:5432/sonardb -c "{{ query }}" -t vars: query: select row_to_json(row) from (SELECT * FROM users WHERE login = 'admin') row; register: admin_account until: "admin_account is not failed" retries: 10 delay: 5 + no_log: true - - name: Generate random password + - name: Generate random token ansible.builtin.set_fact: admin: "{{ admin_account.stdout | from_json }}" token_pass: "{{ lookup('password', '/dev/null length=32 chars=ascii_letters') }}" + no_log: true - name: Get sha384sum of token ansible.builtin.shell: "set -o pipefail && echo -n {{ token_pass }} | sha384sum | awk '{ print $1 }'" @@ -102,22 +180,36 @@ executable: /bin/bash register: token_sha changed_when: true + no_log: true - - name: Adding admin token - kubernetes.core.k8s_exec: - pod: "{{ pg_pod }}" - namespace: "{{ dsc.sonarqube.namespace }}" - command: psql -d sonardb -U dso_admin -c "{{ query }}" -t - vars: + - name: Set query fact (for 10.x branch and higher) + when: sonar_version.content is version('10.0.0', operator='ge', version_type='loose') + ansible.builtin.set_fact: + query: > + INSERT INTO user_tokens + ("uuid", "user_uuid", "name", "token_hash", last_connection_date, created_at, "type", expiration_date, "project_uuid") + VALUES('mysuperuuid', '{{ admin.uuid }}', 'DSO', '{{ token_sha.stdout_lines[0] }}', null, 0, 'USER_TOKEN', null, '') + ON CONFLICT(uuid) DO UPDATE SET token_hash = '{{ token_sha.stdout_lines[0] }}'; + + - name: Set query fact (for 9.x branch and lower) + when: sonar_version.content is version('10.0.0', operator='lt', version_type='loose') + ansible.builtin.set_fact: query: > INSERT INTO user_tokens (uuid, user_uuid, "name", token_hash, last_connection_date, created_at, project_key, "type", expiration_date) VALUES('mysuperuuid', '{{ admin.uuid }}', 'DSO', '{{ token_sha.stdout_lines[0] }}', null, 0, '', 'USER_TOKEN', null) ON CONFLICT(uuid) DO UPDATE SET token_hash = '{{ token_sha.stdout_lines[0] }}'; + + - name: Adding admin token + kubernetes.core.k8s_exec: + pod: "{{ pg_pod }}" + namespace: "{{ dsc.sonarqube.namespace }}" + command: psql postgresql://"{{ pg_admin }}":"{{ pg_admin_pass }}"@127.0.0.1:5432/sonardb -c "{{ query }}" -t register: admin_token_check until: "admin_token_check is not failed" retries: 5 delay: 5 + no_log: true - name: Update inventory kubernetes.core.k8s: @@ -128,16 +220,18 @@ definition: data: SONAR_API_TOKEN: "{{ token_pass }}" + no_log: true - - name: Reset password disclaimer - ansible.builtin.debug: - msg: /!\ PLEASE ENSURE YOU RESET ADMIN PASSWORD /!\ +# - name: Reset password disclaimer +# ansible.builtin.debug: +# msg: /!\ PLEASE ENSURE YOU RESET ADMIN PASSWORD /!\ - name: Set fact sonar token when: ansible_inventory.resources[0].data.SONAR_API_TOKEN is defined ansible.builtin.set_fact: token_pass: "{{ ansible_inventory.resources[0].data.SONAR_API_TOKEN }}" + no_log: true - name: Post-install configuration ansible.builtin.include_tasks: - file: setup.yaml \ No newline at end of file + file: setup.yaml diff --git a/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 b/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 new file mode 100644 index 00000000..b79669fd --- /dev/null +++ b/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 @@ -0,0 +1,35 @@ +apiVersion: postgresql.cnpg.io/v1 +kind: Cluster +metadata: + name: pg-cluster-sonar + namespace: {{ dsc.sonarqube.namespace }} +spec: + instances: 1 + imageName: {{ dsc.sonarqube.postgreSQLimageName }} + + # Parameters and pg_hba configuration will be append + # to the default ones to make the cluster work + postgresql: + parameters: + max_worker_processes: "60" + pg_hba: + # To access through TCP/IP you will need to get username + # and password from the secret pg-cluster-sonar-app + - host sonardb dso_admin all md5 + + bootstrap: + initdb: + database: sonardb + owner: dso_admin + + # Example of rolling update strategy: + # - unsupervised: automated update of the primary once all + # replicas have been upgraded (default) + # - supervised: requires manual supervision to perform + # the switchover of the primary + primaryUpdateStrategy: unsupervised + + # Require 1Gi of space per instance using default storage class + storage: + size: {{ dsc.sonarqube.postgreSQLvolumeSize }} + diff --git a/roles/sonarqube/templates/postgres-deployment.yaml.j2 b/roles/sonarqube/templates/postgres-deployment.yaml.j2 deleted file mode 100644 index de45ab0f..00000000 --- a/roles/sonarqube/templates/postgres-deployment.yaml.j2 +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - service: postgres-dso - name: sonar-postgres-dso - namespace: {{ dsc.sonarqube.namespace }} -spec: - replicas: 1 - selector: - matchLabels: - service: postgres-dso - strategy: - type: Recreate - template: - metadata: - labels: - # network/dnc-net: "true" - service: postgres-dso - spec: - containers: - - env: - - name: POSTGRES_PASSWORD - value: "OR64-#Yr!)" - - name: POSTGRES_USER - value: dso_admin - - name: PGDATA - value: /var/lib/postgresql/data/pgdata - - name: POSTGRES_DB - value: sonardb -{% if dsc.proxy.enabled %} - - name: http_proxy - value: "{{ dsc.proxy.http_proxy }}" - - name: https_proxy - value: "{{ dsc.proxy.https_proxy }}" - - name: no_proxy - value: "{{ dsc.proxy.no_proxy }}" -{% endif %} - image: postgres@sha256:390b0cdc715e189d0245d9f149912738e86535f02e011b6209500fccdda37440 - name: postgres-dso - ports: - - containerPort: 5432 - volumeMounts: - - mountPath: /var/lib/postgresql/data - name: postgres-sonar - restartPolicy: Always - volumes: - - name: postgres-sonar - persistentVolumeClaim: - claimName: postgres-sonar-data-claim diff --git a/roles/sonarqube/templates/postgres-pv-claim.yaml.j2 b/roles/sonarqube/templates/postgres-pv-claim.yaml.j2 deleted file mode 100644 index 39c3f556..00000000 --- a/roles/sonarqube/templates/postgres-pv-claim.yaml.j2 +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim # Create PVC -metadata: - namespace: {{ dsc.sonarqube.namespace }} - name: postgres-sonar-data-claim # Sets name of PV -spec: - accessModes: - - ReadWriteOnce # Sets read and write access - resources: - requests: - storage: 100Gi # Sets volume size \ No newline at end of file diff --git a/roles/sonarqube/templates/postgres-service.yaml.j2 b/roles/sonarqube/templates/postgres-service.yaml.j2 deleted file mode 100644 index 97cc623a..00000000 --- a/roles/sonarqube/templates/postgres-service.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - namespace: {{ dsc.sonarqube.namespace }} - name: postgres # Sets service name - labels: - app: postgres # Labels and Selectors -spec: - type: ClusterIP # Sets service type - ports: - - port: 5432 # Sets port to run the postgres application - selector: - service: postgres-dso diff --git a/roles/sonarqube/templates/sonar-deployment.yaml.j2 b/roles/sonarqube/templates/sonar-deployment.yaml.j2 deleted file mode 100644 index 95d01be1..00000000 --- a/roles/sonarqube/templates/sonar-deployment.yaml.j2 +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: {{ dsc.sonarqube.namespace }} - name: sonar-dso # Sets Deployment name -spec: - replicas: 1 - strategy: - type: Recreate - selector: - matchLabels: - app: sonar - template: - metadata: - labels: - app: sonar - spec: -# initContainers: -# - name: init -# image: busybox -# command: -# - sysctl -# - -w -# - vm.max_map_count=262144 -# imagePullPolicy: IfNotPresent -# securityContext: -# privileged: true - containers: - - name: sonar - #image: sonarqube@sha256:e2c686ea37803a6b50b09670918dc68f9845660397d8925168be0fca24915b27 # Sets Image - image: sonarqube:{{ dsc.sonarqube.imageTag }} - imagePullPolicy: "IfNotPresent" - livenessProbe: - httpGet: - path: /api/system/status - port: 9000 - ports: - - containerPort: 9000 # Exposes container port - env: -# - name: "SONAR_WEB_JAVAADDITIONALOPTS" -# value: "-javaagent:./extensions/plugins/sonarqube-community-branch-plugin-1.12.0.jar=web" -# - name: "SONAR_CE_JAVAADDITIONALOPTS" -# value: "-javaagent:./extensions/plugins/sonarqube-community-branch-plugin-1.12.0.jar=ce" - - name: "SONAR_JDBC_USERNAME" - value: "dso_admin" - - name: "SONAR_JDBC_PASSWORD" - value: "OR64-#Yr!)" - - name: "SONAR_JDBC_URL" - value: "jdbc:postgresql://postgres:5432/sonardb" -{% if dsc.proxy.enabled %} - - name: http_proxy - value: "{{ dsc.proxy.http_proxy }}" - - name: https_proxy - value: "{{ dsc.proxy.https_proxy }}" - - name: no_proxy - value: "{{ dsc.proxy.no_proxy }}" -{% endif %} - volumeMounts: - - mountPath: /opt/sonarqube/data - subPath: data - name: sonar-data - - mountPath: /opt/sonarqube/logs - subPath: logs - name: sonar-data - - mountPath: /opt/sonarqube/extensions - subPath: extensions - name: sonar-data - volumes: - - name: sonar-data - persistentVolumeClaim: - claimName: sonar-data-claim diff --git a/roles/sonarqube/templates/sonar-ingress.yaml.j2 b/roles/sonarqube/templates/sonar-ingress.yaml.j2 deleted file mode 100644 index fb07d8cb..00000000 --- a/roles/sonarqube/templates/sonar-ingress.yaml.j2 +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: sonar-ingress - namespace: {{ dsc.sonarqube.namespace }} - annotations: -{% for key, val in dsc.ingress.annotations.items() %} - {{ key }}: {{ val }} -{% endfor %} - labels: -{% for key, val in dsc.ingress.labels.items() %} - {{ key }}: {{ val }} -{% endfor %} -spec: -{% if not dsc.ingress.tls.type == 'none' %} - tls: - - hosts: - - {{ sonar_domain }} -{% if dsc.ingress.tls.type == 'tlsSecret' %} - secretName: {{ dsc.ingress.tls.tlsSecret.name }} -{% else %} - secretName: sonar-tls-secret -{% endif %} -{% endif %} - rules: - - host: {{ sonar_domain }} - http: - paths: - - path: / - pathType: Prefix - backend: - service: - name: sonar - port: - number: 9000 diff --git a/roles/sonarqube/templates/sonar-pv-claim.yaml.j2 b/roles/sonarqube/templates/sonar-pv-claim.yaml.j2 deleted file mode 100644 index bb0d3ef7..00000000 --- a/roles/sonarqube/templates/sonar-pv-claim.yaml.j2 +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: PersistentVolumeClaim # Create PVC -metadata: - namespace: {{ dsc.sonarqube.namespace }} - name: sonar-data-claim # Sets name of PV -spec: - accessModes: - - ReadWriteOnce # Sets read and write access - resources: - requests: - storage: 100Gi # Sets volume size \ No newline at end of file diff --git a/roles/sonarqube/templates/sonar-service.yaml.j2 b/roles/sonarqube/templates/sonar-service.yaml.j2 deleted file mode 100644 index 0beaa668..00000000 --- a/roles/sonarqube/templates/sonar-service.yaml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - namespace: {{ dsc.sonarqube.namespace }} - name: sonar # Sets service name - labels: - app: sonar # Labels and Selectors -spec: - type: ClusterIP # Sets service type - ports: - - name: http - port: 9000 - protocol: TCP - selector: - app: sonar diff --git a/roles/sonarqube/templates/values.yaml.j2 b/roles/sonarqube/templates/values.yaml.j2 new file mode 100644 index 00000000..7acc1fc3 --- /dev/null +++ b/roles/sonarqube/templates/values.yaml.j2 @@ -0,0 +1,162 @@ +deploymentType: "StatefulSet" + +replicaCount: 1 + +OpenShift: + enabled: true + createSCC: true + +image: + pullPolicy: IfNotPresent + +securityContext: + fsGroup: 1000 + +containerSecurityContext: + runAsUser: 1000 + +sonarWebContext: / + +ingress: + enabled: true + # Used to create an Ingress record. + hosts: + - name: "{{ sonar_domain }}" + pathType: Prefix + annotations: + route.openshift.io/termination: "edge" +{% for key, val in dsc.ingress.annotations.items() %} + {{ key }}: "{{ val }}" +{% endfor %} + # This property allows for reports up to a certain size to be uploaded to SonarQube + nginx.ingress.kubernetes.io/proxy-body-size: "64m" + ingressClassName: "" + labels: + app: "sonar" +{% if not dsc.ingress.tls.type == 'none' %} + tls: + - hosts: + - {{ sonar_domain }} +{% if dsc.ingress.tls.type == 'tlsSecret' %} + secretName: {{ dsc.ingress.tls.tlsSecret.name }} +{% else %} + secretName: sonar-tls-secret +{% endif %} +{% endif %} + +caCerts: + enabled: false + +initSysctl: + enabled: true + vmMaxMapCount: 524288 + fsFileMax: 131072 + nofile: 131072 + nproc: 8192 + # image: busybox:1.32 + securityContext: + privileged: true + # resources: {} + +initFs: + enabled: true + # image: busybox:1.36 + securityContext: + privileged: true + +prometheusExporter: + enabled: false + +prometheusMonitoring: + podMonitor: + enabled: false + +plugins: + install: [] + # For use behind a corporate proxy when downloading plugins +{% if dsc.proxy.enabled %} + httpProxy: "{{ dsc.proxy.http_proxy }}" + httpsProxy: "{{ dsc.proxy.https_proxy }}" + noProxy: "{{ dsc.proxy.no_proxy }}" +{% endif %} + # image: curlimages/curl:8.2.0 + # resources: {} + # .netrc secret file with a key "netrc" to use basic auth while downloading plugins + # netrcCreds: "" + # Set to true to not validate the server's certificate to download plugin + noCheckCertificate: false + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + +sonar.web.javaOpts: "" +sonar.ce.javaOpts: "" + +## a monitoring passcode needs to be defined in order to get reasonable probe results +# not setting the monitoring passcode will result in a deployment that will never be ready +monitoringPasscodeSecretName: "sonar-monitoring-password" +monitoringPasscodeSecretKey: "monitoring-password" + +env: +{% if dsc.proxy.enabled %} + - name: http_proxy + value: "{{ dsc.proxy.http_proxy }}" + - name: https_proxy + value: "{{ dsc.proxy.https_proxy }}" + - name: no_proxy + value: "{{ dsc.proxy.no_proxy }}" +{% endif %} + +## We usually don't make specific ressource recommandations, as they are heavily dependend on +## The usage of SonarQube and the surrounding infrastructure. +## Adjust these values to your needs, but make sure that the memory limit is never under 4 GB +resources: + limits: + cpu: 800m + memory: 4Gi + requests: + cpu: 400m + memory: 2Gi + +persistence: + enabled: false + +## Override JDBC values +## for external Databases +jdbcOverwrite: + # If enable the JDBC Overwrite, make sure to set `postgresql.enabled=false` + enable: true + jdbcUrl: "jdbc:postgresql://pg-cluster-sonar-rw/sonardb?socketTimeout=1500" + jdbcUsername: "dso_admin" + jdbcSecretName: "pg-cluster-sonar-app" + jdbcSecretPasswordKey: "password" + +postgresql: + enabled: false + +sonarqubeFolder: /opt/sonarqube + +# For OpenShift set create=true to ensure service account is created. +serviceAccount: + create: true + # name: + # automountToken: false # default + ## Annotations for the Service Account + annotations: {} + +account: +# adminPassword: admin +# currentAdminPassword: admin + adminPasswordSecretName: "sonarqube" + securityContext: {} + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi +curlContainerImage: curlimages/curl:8.2.1 +adminJobAnnotations: {} + +terminationGracePeriodSeconds: 60 \ No newline at end of file From b4453532ec21113f1c92340cf6ac2bc178b69abe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Thu, 31 Aug 2023 10:46:03 +0200 Subject: [PATCH 05/31] refactor: :recycle: installation Sonarqube via helm chart Bitnami fix: :bug: fix import ca from secret fix: :alien: gitlab token needs expiration date refactor: :zap: avoid bad response from proxy when vault is down fix: :bug: keycloak tlsSecret support refactor: :art: manage tls.type == none fix: :art: finish variabilize argo fix: :bug: couldn't remove proxy vars gitlab ci fix: :art: can provide harbor pvc size fix: :zap: remove fsGroup and runAsUser values from argocd fix: :zap: set sonar pg cluster replicas to 2 fix: :bug: fix proxy vars for harbor fix: :bug: maven_config_file should not be configured in CI --- admin-tools/get-credentials.yaml | 11 + filter_plugins/debug.py | 13 ++ install.yaml | 15 +- roles/argocd/templates/values.yaml.j2 | 14 +- roles/ca/tasks/exposed_ca.yaml | 6 +- roles/console-dso/templates/app.yaml.j2 | 4 + roles/gitlab/defaults/main.yaml | 4 - roles/gitlab/mvn_conf_file | 64 ------ roles/gitlab/tasks/main.yaml | 25 +- .../gitlab/templates/gitlab-instance.yaml.j2 | 5 + roles/gitlab/templates/mvn_conf_file.j2 | 64 ++++++ roles/harbor/templates/values.yaml.j2 | 13 +- roles/keycloak/tasks/client.yaml | 3 +- roles/keycloak/tasks/main.yml | 15 -- .../templates/pg-cluster-keycloak.yaml.j2 | 2 +- roles/keycloak/templates/values.yaml.j2 | 19 +- roles/keycloak/vars/main.yaml | 10 +- roles/socle-config/files/config.yaml | 36 +++ .../files/cr-conf-dso-default.yaml | 46 ++-- roles/socle-config/files/crd-conf-dso.yaml | 217 ++++++------------ roles/socle-config/files/releases.yaml | 42 ++++ roles/socle-config/tasks/main.yaml | 11 +- .../filter_plugins/settings_filter.py | 1 - .../templates/pg-cluster-sonar.yaml.j2 | 6 +- roles/sonarqube/templates/values.yaml.j2 | 3 + roles/vault/tasks/check.yml | 35 +-- 26 files changed, 372 insertions(+), 312 deletions(-) create mode 100644 filter_plugins/debug.py delete mode 100644 roles/gitlab/defaults/main.yaml delete mode 100644 roles/gitlab/mvn_conf_file create mode 100644 roles/gitlab/templates/mvn_conf_file.j2 create mode 100644 roles/socle-config/files/config.yaml create mode 100644 roles/socle-config/files/releases.yaml diff --git a/admin-tools/get-credentials.yaml b/admin-tools/get-credentials.yaml index 92301be6..0af4c93a 100644 --- a/admin-tools/get-credentials.yaml +++ b/admin-tools/get-credentials.yaml @@ -86,6 +86,17 @@ tags: - always + - ansible.builtin.set_fact: + dsc_default_config: "{{ lookup('ansible.builtin.file', '../roles/socle-config/files/config.yaml') | from_yaml }}" + dsc_default_releases: "{{ lookup('ansible.builtin.file', '../roles/socle-config/files/releases.yaml') | from_yaml }}" + tags: + - always + + - ansible.builtin.set_fact: + dsc: "{{ dsc | combine(dsc_default_config.spec, recursive=True) | combine(dsc_default_releases.spec, recursive=True)}}" + tags: + - always + - name: Get DSO config ConfigMap from DSO console namespace kubernetes.core.k8s_info: kind: ConfigMap diff --git a/filter_plugins/debug.py b/filter_plugins/debug.py new file mode 100644 index 00000000..5cb00b28 --- /dev/null +++ b/filter_plugins/debug.py @@ -0,0 +1,13 @@ +def get_debug_messages(dsc): + messages = [] + if dsc['proxy']['enabled']: + messages.append("--- Proxy ---") + messages.append("Nexus Proxy paramaters cannot be set via API, please configure it with local admin account") + messages.append("(Parameter Icon) => HTTP => Proxy Settings") + return messages + +class FilterModule(object): + def filters(self): + return { + 'get_debug_messages': get_debug_messages, + } \ No newline at end of file diff --git a/install.yaml b/install.yaml index 448181df..1320069d 100644 --- a/install.yaml +++ b/install.yaml @@ -18,7 +18,7 @@ - name: cert-manager tags: - cert-manager - - always + - cm - name: confSyncer tags: @@ -28,11 +28,12 @@ - name: cloudnativepg tags: - cloudnativepg - - always + - cnpg - name: keycloak tags: - keycloak + - sso - name: nexus tags: @@ -41,6 +42,7 @@ - name: sonarqube tags: - sonarqube + - sonar - name: gitlab tags: @@ -49,9 +51,11 @@ - name: gitlab-catalog tags: - catalog + - gitlab-catalog - name: gitlab-runner tags: + - runner - gitlab-runner - name: vault @@ -66,6 +70,7 @@ tags: - gitops - argocd + - argo - name: harbor tags: @@ -76,3 +81,9 @@ tags: - console - console-dso + + post_tasks: + - debug: + msg: "{{ dsc | get_debug_messages }}" + tags: + - always \ No newline at end of file diff --git a/roles/argocd/templates/values.yaml.j2 b/roles/argocd/templates/values.yaml.j2 index 81feeb72..2ae8dd97 100644 --- a/roles/argocd/templates/values.yaml.j2 +++ b/roles/argocd/templates/values.yaml.j2 @@ -1,19 +1,30 @@ +securityContext: &securityContext + containerSecurityContext: + runAsUser: null + podSecurityContext: + fsGroup: null # TODO variabilize openshift boolean openshift: enabled: true image: PullPolicy: IfNotPresent config: +{% if dsc.argocd.admin.enabled %} secret: argocdServerAdminPassword: "{{ dsc.argocd.admin.password }}" +{% endif %} {% if dsc.exposedCA != 'none' %} tlsCerts: {{ gitlab_domain }}: | {{ exposed_ca_pem | indent(width=6, first=False) }} {% endif %} +controller: + <<: *securityContext dex: + <<: *securityContext enabled: true server: + <<: *securityContext insecure: true config: clusterResources: "true" @@ -29,7 +40,7 @@ server: {{ exposed_ca_pem | indent(width=8, first=False) }} {% endif %} users.anonymous.enabled: "false" - admin.enabled: "false" # TODO variabilize + admin.enabled: "{{ dsc.argocd.admin.enabled }}" kustomize.buildOptions: "--enable-helm" resource.exclusions: | - apiGroups: @@ -49,6 +60,7 @@ server: value: "{{ dsc.proxy.no_proxy }},argo-argo-cd-repo-server" {% endif %} repoServer: + <<: *securityContext extraEnvVars: {% if dsc.proxy.enabled %} - name: HTTP_PROXY diff --git a/roles/ca/tasks/exposed_ca.yaml b/roles/ca/tasks/exposed_ca.yaml index ce44ab32..427bcd7f 100644 --- a/roles/ca/tasks/exposed_ca.yaml +++ b/roles/ca/tasks/exposed_ca.yaml @@ -22,14 +22,14 @@ block: - name: Get secret kubernetes.core.k8s_info: - name: "{{ dsc.exposedCA.configmap.name }}" - namespace: "{{ dsc.exposedCA.configmap.namespace }}" + name: "{{ dsc.exposedCA.secret.name }}" + namespace: "{{ dsc.exposedCA.secret.namespace }}" kind: Secret register: exposed_ca_resource - name: Extract key ansible.builtin.set_fact: - exposed_ca_pem: "{{ exposed_ca_resource.resources[0].data[dsc.exposedCA.configmap.key] | b64decode }}" + exposed_ca_pem: "{{ exposed_ca_resource.resources[0].data[dsc.exposedCA.secret.key] | b64decode }}" - name: Exposed_ca (certmanager) when: dsc.exposedCA.type == 'certmanager' diff --git a/roles/console-dso/templates/app.yaml.j2 b/roles/console-dso/templates/app.yaml.j2 index bc7912bc..1fe89e99 100644 --- a/roles/console-dso/templates/app.yaml.j2 +++ b/roles/console-dso/templates/app.yaml.j2 @@ -64,6 +64,10 @@ spec: {% if dsc.ingress.tls.type == 'tlsSecret' %} - name: ingress.tls.secretName value: {{ dsc.ingress.tls.tlsSecret.name }} +{% endif %} +{% if dsc.ingress.tls.type == 'none' %} + - name: ingress.tls.enabled + value: "false" {% endif %} syncPolicy: automated: {} diff --git a/roles/gitlab/defaults/main.yaml b/roles/gitlab/defaults/main.yaml deleted file mode 100644 index a0eec7b7..00000000 --- a/roles/gitlab/defaults/main.yaml +++ /dev/null @@ -1,4 +0,0 @@ -vault_auth_path: "jwt" -vault_auth_role: "default-ci" -mvn_config_file: "{{ lookup('file', '{{ playbook_dir }}/roles/gitlab/mvn_conf_file') }}" -npm_file: "{{ lookup('file', '{{ playbook_dir }}/roles/gitlab/npm_file') }}" diff --git a/roles/gitlab/mvn_conf_file b/roles/gitlab/mvn_conf_file deleted file mode 100644 index 02b7c700..00000000 --- a/roles/gitlab/mvn_conf_file +++ /dev/null @@ -1,64 +0,0 @@ - - - - - mirror-mi - $${env.NEXUS_USERNAME} - $${env.NEXUS_PASSWORD} - - - nexus - $${env.NEXUS_USERNAME} - $${env.NEXUS_PASSWORD} - - - - - mirror-mi - mirror-mi - $${env.NEXUS_HOST_URL}/$${env.PROJECT_PATH}-repository-group/ - * - - - nexus - nexus - $${env.NEXUS_HOST_URL}/maven-public/ - * - - - mirror-mi - mirror-mi - $${env.NEXUS_HOST_URL}/public/ - * - - - - - mi - - - nexus - $${env.NEXUS_HOST_URL}/$${env.PROJECT_PATH}-repository-group/ - - true - - - true - - - - - - - - default - true - $${env.PROXY_HOST} - $${env.PROXY_PORT} - - - - mi - - - diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index 8200dbe8..84290a26 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -1,3 +1,9 @@ +- ansible.builtin.set_fact: + vault_auth_path: "jwt" + vault_auth_role: "default-ci" + npm_file: "{{ lookup('file', '{{ playbook_dir }}/roles/gitlab/npm_file') }}" + mvn_config_file: "{{ lookup('ansible.builtin.template', 'mvn_conf_file.j2') }}" + - name: Install gitlab subscription kubernetes.core.k8s: template: "{{ item }}" @@ -149,7 +155,8 @@ command: > bash -c "echo 'PersonalAccessToken.create!(user_id: 1 , name: \"ANSIBLE-DSO\" - , scopes: [:api, :read_repository, :write_repository]).token' | gitlab-rails console" + , scopes: [:api, :read_repository, :write_repository] + , expires_at: 365.days.from_now).token' | gitlab-rails console" register: token - name: Set new gitlab token @@ -244,21 +251,21 @@ state: "{{ dsc.proxy.enabled | ternary('present', 'absent') }}" variables: - name: "HTTP_PROXY" - value: "{{ dsc.proxy.http_proxy }}" + value: "{{ dsc.proxy.http_proxy | default ('') }}" - name: "HTTPS_PROXY" - value: "{{ dsc.proxy.https_proxy }}" + value: "{{ dsc.proxy.https_proxy | default ('') }}" - name: "NO_PROXY" - value: "{{ dsc.proxy.no_proxy }}" + value: "{{ dsc.proxy.no_proxy | default ('') }}" - name: "PROXY_HOST" - value: "{{ dsc.proxy.host }}" + value: "{{ dsc.proxy.host | default ('') }}" - name: "PROXY_PORT" - value: "{{ dsc.proxy.port }}" + value: "{{ dsc.proxy.port | default ('') }}" - name: "http_proxy" - value: "{{ dsc.proxy.http_proxy }}" + value: "{{ dsc.proxy.http_proxy | default ('') }}" - name: "https_proxy" - value: "{{ dsc.proxy.https_proxy }}" + value: "{{ dsc.proxy.https_proxy | default ('') }}" - name: "no_proxy" - value: "{{ dsc.proxy.no_proxy }}" + value: "{{ dsc.proxy.no_proxy | default ('') }}" - name: "Set or update CA_BUNDLE variable" community.general.gitlab_group_variable: diff --git a/roles/gitlab/templates/gitlab-instance.yaml.j2 b/roles/gitlab/templates/gitlab-instance.yaml.j2 index 7cbf9152..a5e29bd7 100644 --- a/roles/gitlab/templates/gitlab-instance.yaml.j2 +++ b/roles/gitlab/templates/gitlab-instance.yaml.j2 @@ -80,10 +80,15 @@ global: {% endfor %} class: none configureCertmanager: false +{% if dsc.ingress.tls.type == 'none' %} + tls: + enabled: false +{% else %} tls: enabled: true {% if dsc.ingress.tls.type == 'tlsSecret' %} secretName: {{ dsc.ingress.tls.tlsSecret.name }} +{% endif %} {% endif %} extraEnv: {% if dsc.proxy.enabled %} diff --git a/roles/gitlab/templates/mvn_conf_file.j2 b/roles/gitlab/templates/mvn_conf_file.j2 new file mode 100644 index 00000000..5cd7b865 --- /dev/null +++ b/roles/gitlab/templates/mvn_conf_file.j2 @@ -0,0 +1,64 @@ + + + + mirror-dso + $${env.NEXUS_USERNAME} + $${env.NEXUS_PASSWORD} + + + nexus + $${env.NEXUS_USERNAME} + $${env.NEXUS_PASSWORD} + + + + + mirror-dso + mirror-dso + https://{{ nexus_domain }}/repository/$${env.PROJECT_PATH}-repository-group/ + * + + + nexus + nexus + https://{{ nexus_domain }}/repository/maven-public/ + * + + + mirror-dso + mirror-dso + https://{{ nexus_domain }}/repository/public/ + * + + + + + generic-user + + + nexus + https://{{ nexus_domain }}/repository/$${env.PROJECT_PATH}-repository-group/ + + true + + + true + + + + + +{% if dsc.proxy.enabled %} + + + default + true + {{ dsc.proxy.host }} + {{ dsc.proxy.port }} + + +{% endif %} + + generic-user + + diff --git a/roles/harbor/templates/values.yaml.j2 b/roles/harbor/templates/values.yaml.j2 index e9c76413..d8764230 100644 --- a/roles/harbor/templates/values.yaml.j2 +++ b/roles/harbor/templates/values.yaml.j2 @@ -2,6 +2,9 @@ expose: imagePullPolicy: IfNotPresent type: ingress tls: +{% if dsc.ingress.tls.type == 'none' %} + enabled: false +{% else %} enabled: true {% if dsc.ingress.tls.type == 'tlsSecret' %} certSource: secret @@ -13,6 +16,7 @@ expose: secret: secretName: harbor-ingress notarySecretName: harbor-ingress-notary +{% endif %} {% endif %} ingress: hosts: @@ -40,12 +44,15 @@ externalURL: https://{{ harbor_domain }} persistence: enabled: true resourcePolicy: keep + persistentVolumeClaim: + registry: + size: "{{ dsc.harbor.pvcRegistrySize }}" harborAdminPassword: "{{ dsc.harbor.adminPassword }}" {% if dsc.proxy.enabled %} proxy: - http_proxy: {{ dsc.proxy.http_proxy }} - https_proxy: {{ dsc.proxy.https_proxy }} - no_proxy: {{ dsc.proxy.no_proxy }},.local,.internal + httpProxy: {{ dsc.proxy.http_proxy }} + httpsProxy: {{ dsc.proxy.https_proxy }} + noProxy: {{ dsc.proxy.no_proxy }},.local,.internal components: - nginx - portal diff --git a/roles/keycloak/tasks/client.yaml b/roles/keycloak/tasks/client.yaml index a3a2a542..4a8394ac 100644 --- a/roles/keycloak/tasks/client.yaml +++ b/roles/keycloak/tasks/client.yaml @@ -29,7 +29,6 @@ clientId: "{{ item.clientId }}" register: kc_clients_secrets -- debug: var=kc_clients_secrets - name: Set Keycloak clients secrets in Keycloak namespace when: item.clientAuthenticatorType is defined and item.clientAuthenticatorType == 'client-secret' kubernetes.core.k8s: @@ -57,4 +56,4 @@ metadata: name: "keycloak-client-secret-{{ item.clientId }}" namespace: "{{ dsc.keycloak.namespace }}" - type: Opaque \ No newline at end of file + type: Opaque diff --git a/roles/keycloak/tasks/main.yml b/roles/keycloak/tasks/main.yml index 79b714e4..eeac3734 100644 --- a/roles/keycloak/tasks/main.yml +++ b/roles/keycloak/tasks/main.yml @@ -42,7 +42,6 @@ delay: 5 - name: Get Keycloak admin password secret - no_log: true kubernetes.core.k8s_info: namespace: "{{ dsc.keycloak.namespace }}" kind: Secret @@ -51,7 +50,6 @@ - name: Set Keycloak admin password secret when: kc_adm_pass_secret.resources | length == 0 - no_log: true kubernetes.core.k8s: state: present definition: @@ -98,7 +96,6 @@ delay: 5 - name: Get Keycloak admin password - no_log: true kubernetes.core.k8s_info: namespace: "{{ dsc.keycloak.namespace }}" kind: Secret @@ -106,13 +103,11 @@ register: kc_adm_pass - name: Set Keycloak admin credentials facts - no_log: true ansible.builtin.set_fact: keycloak_admin_password: "{{ kc_adm_pass.resources[0].data['admin-password']|b64decode }}" keycloak_admin: "admin" - name: Update console inventory - no_log: true kubernetes.core.k8s: kind: ConfigMap name: dso-config @@ -124,7 +119,6 @@ KEYCLOAK_ADMIN: "{{ keycloak_admin }}" - name: Get Keycloak API token - no_log: true ansible.builtin.uri: url: "https://{{ keycloak_domain }}/realms/master/protocol/openid-connect/token" method: POST @@ -135,12 +129,10 @@ register: kc_token - name: Set kc_access_token fact - no_log: true ansible.builtin.set_fact: kc_access_token: "{{ kc_token.json.access_token }}" - name: Get keycloak realms from API - no_log: true ansible.builtin.uri: url: "https://{{ keycloak_domain }}/admin/realms" method: GET @@ -154,7 +146,6 @@ - name: Create dso realm when: kc_realms.json | selectattr('realm', 'equalto', 'dso') | length == 0 - no_log: true community.general.keycloak_realm: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" auth_client_id: admin-cli @@ -169,7 +160,6 @@ enabled: true - name: Get keycloak dso realm users from API - no_log: true ansible.builtin.uri: url: "https://{{ keycloak_domain }}/admin/realms/dso/users" method: GET @@ -182,7 +172,6 @@ register: kc_dso_users - name: Find dso admin secret - no_log: true kubernetes.core.k8s_info: namespace: "{{ dsc.keycloak.namespace }}" kind: Secret @@ -191,7 +180,6 @@ - name: Create dso admin secret and user when: (dso_admin_secret.resources | length == 0) or (kc_dso_users.json | selectattr('username', 'equalto', dso_admin_secret.resources[0].data.ADMIN_USER|b64decode) | length == 0) - no_log: true block: - name: Generate admin user password ansible.builtin.set_fact: @@ -244,7 +232,6 @@ # force: true ## Ne fontionne pas quand user supprimé via la GUI et tâche relancée. - name: Get dso keycloak client scopes from API - no_log: true ansible.builtin.uri: url: "https://{{ keycloak_domain }}/admin/realms/dso/client-scopes" method: GET @@ -258,7 +245,6 @@ - name: Create generic keycloak client scope when: kc_client_scopes.json | selectattr('name', 'equalto', 'generic') | length == 0 - no_log: true community.general.keycloak_clientscope: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" auth_client_id: admin-cli @@ -282,7 +268,6 @@ with_items: "{{ keycloak_clients }}" - name: Create base admins groups - no_log: true community.general.keycloak_group: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" auth_client_id: admin-cli diff --git a/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 b/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 index b9bcc09d..fa9bac93 100644 --- a/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 +++ b/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 @@ -5,7 +5,7 @@ metadata: namespace: {{ dsc.keycloak.namespace }} spec: instances: 2 - imageName: {{ dsc.keycloak.postgreSQLimageName }} + imageName: ghcr.io/cloudnative-pg/postgresql:15.3 # Parameters and pg_hba configuration will be append # to the default ones to make the cluster work diff --git a/roles/keycloak/templates/values.yaml.j2 b/roles/keycloak/templates/values.yaml.j2 index 36e653fe..0c80e130 100644 --- a/roles/keycloak/templates/values.yaml.j2 +++ b/roles/keycloak/templates/values.yaml.j2 @@ -1,4 +1,5 @@ image: + registry: docker.io pullPolicy: "IfNotPresent" auth: @@ -15,16 +16,12 @@ proxy: "edge" httpRelativePath: "/" -initContainers: "[]" - configuration: "" existingConfigmap: "" extraStartupArgs: "" -initdbScripts: "" - initdbScriptsConfigMap: "" command: [] @@ -60,7 +57,7 @@ podSecurityContext: containerSecurityContext: enabled: false - resources: +resources: limits: memory: "2048Mi" requests: @@ -123,19 +120,21 @@ ingress: servicePort: "http" annotations: route.openshift.io/termination: "edge" -{% for key, val in dsc.ingress.annotations.items() %} - {{ key }}: "{{ val }}" -{% endfor %} labels: app: "keycloak" {% for key, val in dsc.ingress.labels.items() %} {{ key }}: "{{ val }}" {% endfor %} tls: true +{% if dsc.ingress.tls.type == "tlsSecret" %} selfSigned: false extraHosts: [] extraPaths: [] - extraTls: [] + extraTls: + - hosts: + - "{{ keycloak_domain }}" + secretName: "{{ dsc.ingress.tls.tlsSecret.name }}" +{% endif %} secrets: [] extraRules: [] @@ -211,4 +210,4 @@ cache: logging: output: "default" - level: "INFO" \ No newline at end of file + level: "INFO" diff --git a/roles/keycloak/vars/main.yaml b/roles/keycloak/vars/main.yaml index 4d4aaca7..706da668 100644 --- a/roles/keycloak/vars/main.yaml +++ b/roles/keycloak/vars/main.yaml @@ -9,14 +9,15 @@ keycloak_clients: - "https://{{ gitlab_domain }}" defaultClientScopes: - "generic" + publicClient: false - clientId: console-frontend standardFlowEnabled: true - publicClient: true redirectUris: "{{ lookup('ansible.builtin.template', 'console-frontend-redirectUris.yaml') | from_yaml }}" webOrigins: "{{ lookup('ansible.builtin.template', 'console-frontend-webOrigins.yaml') | from_yaml }}" defaultClientScopes: - generic + publicClient: true - clientId: console-backend clientAuthenticatorType: client-secret @@ -28,6 +29,7 @@ keycloak_clients: - https://{{ console_domain }} defaultClientScopes: - generic + publicClient: false - clientId: argo-client clientAuthenticatorType: client-secret @@ -39,6 +41,7 @@ keycloak_clients: - https://{{ argocd_domain }} defaultClientScopes: - generic + publicClient: false - clientId: portail-client clientAuthenticatorType: client-secret @@ -49,6 +52,7 @@ keycloak_clients: - "*" defaultClientScopes: - generic + publicClient: false - clientId: sonar-client clientAuthenticatorType: client-secret @@ -59,6 +63,7 @@ keycloak_clients: - https://{{ sonar_domain }} defaultClientScopes: - generic + publicClient: false - clientId: harbor-client clientAuthenticatorType: client-secret @@ -68,4 +73,5 @@ keycloak_clients: webOrigins: - https://{{ harbor_domain }} defaultClientScopes: - - generic \ No newline at end of file + - generic + publicClient: false diff --git a/roles/socle-config/files/config.yaml b/roles/socle-config/files/config.yaml new file mode 100644 index 00000000..d726df83 --- /dev/null +++ b/roles/socle-config/files/config.yaml @@ -0,0 +1,36 @@ +--- +# https://kubernetes.io/docs/concepts/configuration/configmap/ +kind: DsoSocleConfig +apiVersion: cloud-pi-native.fr/v1alpha +metadata: + name: conf-dso +spec: + argocd: + namespace: dso-argocd + subDomain: argocd + cloudnativepg: + namespace: dso-cloudnativepg + console: + namespace: dso-console + subDomain: console + gitlab: + namespace: dso-gitlab + subDomain: gitlab + insecureCI: false + harbor: + namespace: dso-harbor + subDomain: harbor + nexus: + namespace: dso-nexus + subDomain: nexus + keycloak: + namespace: dso-keycloak + subDomain: keycloak + sonarqube: + namespace: dso-sonarqube + subDomain: sonar + vault: + namespace: dso-vault + subDomain: vault + sops: + namespace: dso-sops diff --git a/roles/socle-config/files/cr-conf-dso-default.yaml b/roles/socle-config/files/cr-conf-dso-default.yaml index cc1ae99b..689a639b 100644 --- a/roles/socle-config/files/cr-conf-dso-default.yaml +++ b/roles/socle-config/files/cr-conf-dso-default.yaml @@ -1,44 +1,38 @@ --- -# https://kubernetes.io/docs/concepts/configuration/configmap/ kind: DsoSocleConfig apiVersion: cloud-pi-native.fr/v1alpha metadata: name: conf-dso spec: + certmanager: {} additionalsCA: [] exposedCA: type: none argocd: - chartVersion: "" - certmanager: - version: "" - cloudnativepg: - chartVersion: "" - namespace: "" + admin: + enabled: false + cloudnativepg: {} console: dbPassword: myAwesomePassword - gitlab: - chartVersion: "" + gitlab: {} global: - environment: "" - projectsRootDir: [] - rootDomain: "" + environment: production + projectsRootDir: + - forge + rootDomain: + .example.com harbor: adminPassword: anotherGreatPassword - chartVersion: "" - ingress: {} - keycloak: - chartVersion: "" - kubed: - chartVersion: "" + pvcRegistrySize: 20Gi + ingress: + tls: + type: none + kubed: {} + keycloak: {} nexus: - storageSize: "" - imageTag: "" + storageSize: 25Gi proxy: enabled: false - sonarqube: - chartVersion: "" - sops: - chartVersion: "" - vault: - chartVersion: "" + sonarqube: {} + sops: {} + vault: {} diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index 3b328abb..ac578ddd 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -47,6 +47,65 @@ spec: - name type: object type: array + exposedCA: + description: Private CA cert needed to validate HTTPS traffic between tools. + type: object + required: + - type + properties: + configmap: + description: The configmap with private CA + type: object + properties: + namespace: + description: The configmap namespace + type: string + name: + description: The configmap name + type: string + key: + description: The configmap key providing the Private CA cert + type: string + required: + - namespace + - name + - key + secret: + description: The secret with private CA + type: object + properties: + namespace: + description: The secret namespace + type: string + name: + description: The secret name + type: string + key: + description: The secret key providing the Private CA cert + type: string + required: + - namespace + - name + - key + url: + description: An URL providing the private CA cert (it should be plain text) + type: string + type: + description: | + Method to find the private CA cert: + - none: No private CA cert needed + - configmap: Private CA cert is stored as a configmap + - secret: Private CA cert is stored as a secret + - url: Private CA cert comes from an external URL + - certmanager: Private CA cert is managed by certmanager, please use ingress.tls.ca accordingly + type: string + enum: + - none + - configmap + - secret + - url + - certmanager + default: none argocd: description: Configuration for ArgoCD. properties: @@ -65,15 +124,12 @@ spec: - enabled type: object namespace: - default: dso-argocd description: The namespace for ArgoCD. type: string subDomain: - default: argocd description: The subdomain for ArgoCD. type: string chartVersion: - default: 4.7.13 description: ArgoCD Bitnami helm chart version (e.g., "4.7.13"). type: string values: @@ -84,32 +140,24 @@ spec: default: {} x-kubernetes-preserve-unknown-fields: true required: - - chartVersion + - admin type: object certmanager: description: Configuration for Cert Manager. properties: version: - default: v1.11.1 description: Specifies the version of Cert Manager to use. type: string - required: - - version type: object cloudnativepg: description: Configuration for CloudNativePG. properties: namespace: - default: cnpg-system description: The namespace for cloudnativepg. type: string chartVersion: - default: 0.18.2 description: CloudNativePG helm chart version (e.g., "0.18.2"). type: string - required: - - chartVersion - - namespace type: object console: description: Configuration for the console. @@ -118,93 +166,28 @@ spec: description: The password for the console's database. type: string namespace: - default: dso-console description: The namespace for console. type: string release: - default: 5.6.0 - description: Console version (e.g., "5.6.0"). + description: Console version (e.g., "4.1.0"). type: string subDomain: - default: console description: The subdomain for console. type: string required: - dbPassword type: object - exposedCA: - description: Private CA cert needed to validate HTTPS traffic between tools. - type: object - required: - - type - properties: - configmap: - description: The configmap with private CA - type: object - properties: - namespace: - description: The configmap namespace - type: string - name: - description: The configmap name - type: string - key: - description: The configmap key providing the Private CA cert - type: string - required: - - namespace - - name - - key - secret: - description: The secret with private CA - type: object - properties: - namespace: - description: The secret namespace - type: string - name: - description: The secret name - type: string - key: - description: The secret key providing the Private CA cert - type: string - required: - - namespace - - name - - key - url: - description: An URL providing the private CA cert (it should be plain text) - type: string - type: - description: | - Method to find the private CA cert: - - none: No private CA cert needed - - configmap: Private CA cert is stored as a configmap - - secret: Private CA cert is stored as a secret - - url: Private CA cert comes from an external URL - - certmanager: Private CA cert is managed by certmanager, please use ingress.tls.ca accordingly - type: string - enum: - - none - - configmap - - secret - - url - - certmanager - default: none gitlab: description: Configuration for GitLab. properties: namespace: - default: dso-gitlab description: The namespace for GitLab. type: string subDomain: - default: gitlab description: The subdomain for GitLab. type: string chartVersion: - default: 7.0.8 - description: GitLab chart version (e.g., "7.0.8"). + description: GitLab chart version (e.g., "6.11.10"). type: string values: description: | @@ -219,9 +202,6 @@ spec: If you use a private CA in exposed_ca, you should set it to true. Configuring tools in pipelines container is not an easy job. type: boolean - default: false - required: - - chartVersion type: object global: description: Global configuration not specific to one service @@ -262,17 +242,18 @@ spec: description: Administrator password for Harbor. type: string namespace: - default: dso-harbor description: The namespace for Harbor. type: string subDomain: - default: harbor description: The subdomain for Harbor. type: string chartVersion: - default: 1.12.2 description: Harbor helm chart version (e.g., "1.12.2"). type: string + pvcRegistrySize: + description: "Size for registry, default: 50Gi" + default: 50Gi + type: string values: description: | You can merge customs values for harbor, it will be merged with roles/harbor/tasks/main.yaml @@ -282,7 +263,7 @@ spec: x-kubernetes-preserve-unknown-fields: true required: - adminPassword - - chartVersion + - pvcRegistrySize type: object ingress: description: General configuration for ingress. @@ -373,31 +354,14 @@ spec: description: Configuration for Keycloak. properties: namespace: - default: dso-keycloak description: The namespace for Keycloak. type: string subDomain: - default: keycloak description: The subdomain for Keycloak. type: string chartVersion: - default: 16.0.3 description: Keycloak chart version (e.g., "16.0.3"). type: string - postgreSQLimageName: - default: "" - description: | - PostgreSQL image name that will be installed by CNPG operator. - By default, the operator will install the latest available minor version of the latest major version of PostgreSQL when the operator was released. - See : https://cloudnative-pg.io/documentation/1.20/quickstart/#part-3-deploy-a-postgresql-cluster - You can override this by setting the postgreSQLimageName here. - In example, you could set the value to "ghcr.io/cloudnative-pg/postgresql:14.9". - Remember you should NEVER use tags like "latest" or just "14" in a production environment. - More about container image requirements here : - https://cloudnative-pg.io/documentation/1.20/container_images/ - You can browse available image tags here : - https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql - type: string values: description: | You can merge customs values for keycloak, they will be merged with roles/keycloak/templates/values.j2 @@ -406,28 +370,21 @@ spec: type: object default: {} x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion type: object kubed: description: Configuration for Kubed (config-syncer). properties: chartVersion: - default: v0.13.2 description: Kubed helm chart version (e.g., "v0.13.2"). type: string - required: - - chartVersion type: object nexus: description: Configuration for Nexus. properties: namespace: - default: dso-nexus description: The namespace for Nexus. type: string subDomain: - default: nexus description: The subdomain for Nexus. type: string storageSize: @@ -437,12 +394,10 @@ spec: type: string default: 25Gi imageTag: - default: 3.56.0 description: Nexus version based on image tag (e.g., "3.56.0"). type: string required: - storageSize - - imageTag type: object proxy: description: Proxy configuration for tools. @@ -466,7 +421,6 @@ spec: so easy to configure. \nExample: .cluster.local,.svc,10.0.0.0/8,127.0.0.1,192.168.0.0/16,localhost,svc.cluster.local,localdomain\n" type: string port: - default: "3128" description: Distant proxy port listenning type: string required: @@ -476,37 +430,14 @@ spec: description: Configuration for SonarQube. properties: namespace: - default: dso-sonarqube description: The namespace for SonarQube. type: string subDomain: - default: sonarqube description: The subdomain for SonarQube. type: string chartVersion: - default: 3.2.10 description: SonarQube Bitnami helm chart version (e.g., "3.2.10"). type: string - postgreSQLimageName: - default: "" - description: | - PostgreSQL image name that will be installed by CNPG operator. - By default, the operator will install the latest available minor version of the latest major version of PostgreSQL when the operator was released. - See : https://cloudnative-pg.io/documentation/1.20/quickstart/#part-3-deploy-a-postgresql-cluster - You can override this by setting the postgreSQLimageName here. - In example, you could set the value to "ghcr.io/cloudnative-pg/postgresql:14.9". - Remember you should NEVER use tags like "latest" or just "14" in a production environment. - More about container image requirements here : - https://cloudnative-pg.io/documentation/1.20/container_images/ - You can browse available image tags here : - https://github.com/cloudnative-pg/postgres-containers/pkgs/container/postgresql - type: string - postgreSQLvolumeSize: - description: | - The storage size for SonarQube PostgreSQL PVC. - Must comply with Kubernetes size definitions (i.e 25Gi). - type: string - default: 25Gi values: description: | You can merge customs values for sonarqube, it will be merged with roles/sonarqube/tasks/main.yaml @@ -514,43 +445,33 @@ spec: type: object default: {} x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion - - postgreSQLvolumeSize type: object sops: properties: namespace: - default: dso-sops description: Namespace for SOPS. type: string chartVersion: - default: 0.15.1 description: SOPS helm chart version (e.g., "0.15.1"). type: string values: + default: {} description: | You can merge customs values for sops, it will be merged with roles/sops/tasks/main.yaml See https://github.com/isindir/sops-secrets-operator/tree/master/chart/helm3/sops-secrets-operator type: object - default: {} x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion type: object vault: description: Configuration for Vault. properties: namespace: - default: dso-vault description: The namespace for Vault. type: string subDomain: - default: vault description: The subdomain for Vault. type: string chartVersion: - default: 0.25.0 description: Hashicorp Vault helm chart version (e.g., "0.25.0"). type: string values: @@ -560,8 +481,6 @@ spec: type: object default: {} x-kubernetes-preserve-unknown-fields: true - required: - - chartVersion type: object required: - additionalsCA diff --git a/roles/socle-config/files/releases.yaml b/roles/socle-config/files/releases.yaml new file mode 100644 index 00000000..4f633b2c --- /dev/null +++ b/roles/socle-config/files/releases.yaml @@ -0,0 +1,42 @@ +kind: DsoSocleConfig +apiVersion: cloud-pi-native.fr/v1alpha +metadata: + name: conf-dso +spec: + argocd: + # https://artifacthub.io/packages/helm/bitnami/argo-cd + chartVersion: 4.7.19 + certmanager: + # https://github.com/cert-manager/cert-manager/releases + version: v1.11.0 + cloudnativepg: + # https://artifacthub.io/packages/helm/cloudnative-pg/cloudnative-pg + chartVersion: 0.18.2 + console: + # https://github.com/cloud-pi-native/console/releases + release: "5.9" + gitlab: + # https://artifacthub.io/packages/helm/gitlab/gitlab + chartVersion: "7.3.4" + harbor: + # https://artifacthub.io/packages/helm/harbor/harbor + chartVersion: 1.12.2 + keycloak: + # https://artifacthub.io/packages/helm/bitnami/keycloak + chartVersion: 16.1.5 + kubed: + # https://artifacthub.io/packages/helm/appscode/kubed + chartVersion: 0.13.2 + nexus: + # https://hub.docker.com/r/sonatype/nexus3/ + imageTag: 3.56.0 + sonarqube: + # https://artifacthub.io/packages/helm/sonarqube/sonarqube + chartVersion: 10.2.1+800 + tag: 9.9.2-community + sops: + # https://artifacthub.io/packages/helm/sops-secrets-operator/sops-secrets-operator + chartVersion: 0.17.0 + vault: + # https://artifacthub.io/packages/helm/hashicorp/vault + chartVersion: 0.25.0 diff --git a/roles/socle-config/tasks/main.yaml b/roles/socle-config/tasks/main.yaml index 2b22fc1a..4836a3d0 100644 --- a/roles/socle-config/tasks/main.yaml +++ b/roles/socle-config/tasks/main.yaml @@ -93,7 +93,16 @@ - name: Set DSC fact ansible.builtin.set_fact: - dsc: "{{ socle_config.resources[0].spec }}" + dsc: "{{ socle_config.resources[0] }}" + +- ansible.builtin.set_fact: + dsc_default_config: "{{ lookup('ansible.builtin.file', 'config.yaml') | from_yaml }}" + dsc_default_releases: "{{ lookup('ansible.builtin.file', 'releases.yaml') | from_yaml }}" + +- ansible.builtin.set_fact: + dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True)}}" +- ansible.builtin.set_fact: + dsc: "{{ dsc.spec }}" - name: Set root_domain fact ansible.builtin.set_fact: diff --git a/roles/sonarqube/filter_plugins/settings_filter.py b/roles/sonarqube/filter_plugins/settings_filter.py index 97a466df..4be3220e 100644 --- a/roles/sonarqube/filter_plugins/settings_filter.py +++ b/roles/sonarqube/filter_plugins/settings_filter.py @@ -20,7 +20,6 @@ def to_query_string(queries): '&value='+ urllib.parse.quote_plus(query['value']) ) - print(queries_strings) return queries_strings def plugins_includes(sonar_plugins_list, key_to_search): diff --git a/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 b/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 index b79669fd..5dbc4f0c 100644 --- a/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 +++ b/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 @@ -4,8 +4,8 @@ metadata: name: pg-cluster-sonar namespace: {{ dsc.sonarqube.namespace }} spec: - instances: 1 - imageName: {{ dsc.sonarqube.postgreSQLimageName }} + instances: 2 + imageName: ghcr.io/cloudnative-pg/postgresql:15.4 # Parameters and pg_hba configuration will be append # to the default ones to make the cluster work @@ -31,5 +31,5 @@ spec: # Require 1Gi of space per instance using default storage class storage: - size: {{ dsc.sonarqube.postgreSQLvolumeSize }} + size: 5Gi diff --git a/roles/sonarqube/templates/values.yaml.j2 b/roles/sonarqube/templates/values.yaml.j2 index 7acc1fc3..96367d79 100644 --- a/roles/sonarqube/templates/values.yaml.j2 +++ b/roles/sonarqube/templates/values.yaml.j2 @@ -8,6 +8,8 @@ OpenShift: image: pullPolicy: IfNotPresent + tag: {{ dsc.sonarqube.tag }} + repository: docker.io/sonarqube securityContext: fsGroup: 1000 @@ -23,6 +25,7 @@ ingress: hosts: - name: "{{ sonar_domain }}" pathType: Prefix + path: "/" annotations: route.openshift.io/termination: "edge" {% for key, val in dsc.ingress.annotations.items() %} diff --git a/roles/vault/tasks/check.yml b/roles/vault/tasks/check.yml index 0a7025ea..05e958c3 100644 --- a/roles/vault/tasks/check.yml +++ b/roles/vault/tasks/check.yml @@ -1,29 +1,32 @@ - name: Get vault health ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/health" - status_code: [200, 503, 501] + url: "https://{{ vault_domain }}/v1/sys/health?sealedcode=200&uninitcode=200" + status_code: [200, 503] register: vault_health retries: 12 delay: 5 until: vault_health.json is defined -- name: Set vault_status to OK - ansible.builtin.set_fact: - vault_status: OK - when: vault_health.status == 200 - -- name: Set vault_status to "not init" - ansible.builtin.set_fact: - vault_status: not init - when: vault_health.status == 501 - - name: Set vault_status to unavailable ansible.builtin.set_fact: vault_status: unavailable when: vault_health.status == 503 -- name: Set vault_status to sealed - ansible.builtin.set_fact: - vault_status: sealed - when: vault_health.status == 503 and vault_health.json is defined and vault_health.json.sealed +- name: If Vault json is defined + when: vault_health.json is defined + block: + - name: Set vault_status to sealed + ansible.builtin.set_fact: + vault_status: sealed + when: vault_health.json.sealed + + - name: Set vault_status to "not init" + ansible.builtin.set_fact: + vault_status: not init + when: not vault_health.json.initialized + + - name: Set vault_status to OK + ansible.builtin.set_fact: + vault_status: OK + when: vault_health.json.initialized and not vault_health.json.sealed From 7b72c48ce6dca623dcbd6111c60d0ce19c5b8cd1 Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Tue, 3 Oct 2023 00:55:14 +0200 Subject: [PATCH 06/31] fix: :art: support helm values for console --- roles/console-dso/tasks/main.yaml | 8 +++ roles/console-dso/templates/app.yaml.j2 | 56 +------------------ roles/console-dso/templates/values.yaml.j2 | 37 ++++++++++++ .../files/cr-conf-dso-default.yaml | 1 + roles/socle-config/files/crd-conf-dso.yaml | 5 ++ roles/socle-config/files/releases.yaml | 2 +- 6 files changed, 55 insertions(+), 54 deletions(-) create mode 100644 roles/console-dso/templates/values.yaml.j2 diff --git a/roles/console-dso/tasks/main.yaml b/roles/console-dso/tasks/main.yaml index a81762e0..cbedbb91 100644 --- a/roles/console-dso/tasks/main.yaml +++ b/roles/console-dso/tasks/main.yaml @@ -42,6 +42,14 @@ {{ exposed_ca_pem }} when: dsc.exposedCA.type != 'none' +- name: Set extra env vars + ansible.builtin.set_fact: + console_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" + +- name: Merge with argo user values + ansible.builtin.set_fact: + console_values: "{{ console_values | combine(dsc.console['values'], recursive=True) | to_yaml }}" + - name: Apply app kubernetes.core.k8s: template: app.yaml.j2 diff --git a/roles/console-dso/templates/app.yaml.j2 b/roles/console-dso/templates/app.yaml.j2 index 1fe89e99..eb1600c2 100644 --- a/roles/console-dso/templates/app.yaml.j2 +++ b/roles/console-dso/templates/app.yaml.j2 @@ -16,59 +16,9 @@ spec: targetRevision: v{{ dsc.console.release }} releaseName: console-pi helm: - parameters: - - name: ingress.hosts[0] - value: {{ console_domain }} - - name: postgres.container.db - value: dso-console-db - - name: postgres.container.user - value: dso - - name: postgres.container.pass - value: {{ dsc.console.dbPassword }} - - name: keycloak.clientIdBackend - value: {{ console_backend_secret.resources[0].data.CLIENT_ID | b64decode }} - - name: keycloak.clientSecretBackend - value: {{ console_backend_secret.resources[0].data.CLIENT_SECRET | b64decode }} - - name: keycloak.clientIdFrontend - value: {{ console_frontend_secret.resources[0].data.CLIENT_ID | b64decode }} - - name: keycloak.redirectUri - value: https://{{ console_domain }} - - name: keycloak.sessionSecret - value: {{ session_secret }} - - name: keycloak.domain - value: "{{ keycloak_domain }}" - - name: keycloak.realm - value: dso -{% if dsc.proxy.enabled %} - - name: server.container.env.HTTP_PROXY - value: {{ dsc.proxy.http_proxy }} - - name: server.container.env.HTTPS_PROXY - value: {{ dsc.proxy.https_proxy }} - - name: server.container.env.NO_PROXY - value: {{ dsc.proxy.no_proxy }} -{% endif %} -{% if dsc.exposedCA.type != 'none' %} - - name: server.extraCa.name - value: bundle - - name: server.extraCa.key - value: ca.pem -{% endif %} -{% for key, val in dsc.ingress.annotations.items() %} - - name: ingress.annotations.{{ key | regex_escape }} - value: {{ val }} -{% endfor %} -{% for key, val in dsc.ingress.labels.items() %} - - name: ingress.labels.{{ key | regex_escape }} - value: {{ val }} -{% endfor %} -{% if dsc.ingress.tls.type == 'tlsSecret' %} - - name: ingress.tls.secretName - value: {{ dsc.ingress.tls.tlsSecret.name }} -{% endif %} -{% if dsc.ingress.tls.type == 'none' %} - - name: ingress.tls.enabled - value: "false" -{% endif %} + parameters: [] + values: | +{{ console_values | indent(8, true) }} syncPolicy: automated: {} syncOptions: diff --git a/roles/console-dso/templates/values.yaml.j2 b/roles/console-dso/templates/values.yaml.j2 new file mode 100644 index 00000000..22739916 --- /dev/null +++ b/roles/console-dso/templates/values.yaml.j2 @@ -0,0 +1,37 @@ +ingress: + hosts: + - {{ console_domain }} + annotations: {{ dsc.ingress.annotations }} + labels: {{ dsc.ingress.labels }} + tls: + enabled: {{ dsc.ingress.tls.type != 'none' }} +{% if dsc.ingress.tls.type == 'tlsSecret' %} + secretName: {{ dsc.ingress.tls.tlsSecret.name }} +{% endif %} + +postgres: + container: + db: dso-console-db + user: dso + pass: {{ dsc.console.dbPassword }} +keycloak: + clientIdBackend: {{ console_backend_secret.resources[0].data.CLIENT_ID | b64decode }} + clientSecretBackend: {{ console_backend_secret.resources[0].data.CLIENT_SECRET | b64decode }} + clientIdFrontend: {{ console_frontend_secret.resources[0].data.CLIENT_ID | b64decode }} + redirectUri: https://{{ console_domain }} + sessionSecret: {{ session_secret }} + domain: "{{ keycloak_domain }}" + realm: dso +server: + container: + env: +{% if dsc.proxy.enabled %} + HTTP_PROXY: {{ dsc.proxy.http_proxy }} + HTTPS_PROXY: {{ dsc.proxy.https_proxy }} + NO_PROXY: {{ dsc.proxy.no_proxy }} +{% endif %} +{% if dsc.exposedCA.type != 'none' %} + extraCa: + name: bundle + key: ca.pem +{% endif %} diff --git a/roles/socle-config/files/cr-conf-dso-default.yaml b/roles/socle-config/files/cr-conf-dso-default.yaml index 689a639b..a4a92c0a 100644 --- a/roles/socle-config/files/cr-conf-dso-default.yaml +++ b/roles/socle-config/files/cr-conf-dso-default.yaml @@ -14,6 +14,7 @@ spec: cloudnativepg: {} console: dbPassword: myAwesomePassword + values: {} gitlab: {} global: environment: production diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index ac578ddd..f8b755fe 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -174,6 +174,11 @@ spec: subDomain: description: The subdomain for console. type: string + values: + description: Extra helm values for console + type: object + default: {} + x-kubernetes-preserve-unknown-fields: true required: - dbPassword type: object diff --git a/roles/socle-config/files/releases.yaml b/roles/socle-config/files/releases.yaml index 4f633b2c..4db15bf5 100644 --- a/roles/socle-config/files/releases.yaml +++ b/roles/socle-config/files/releases.yaml @@ -14,7 +14,7 @@ spec: chartVersion: 0.18.2 console: # https://github.com/cloud-pi-native/console/releases - release: "5.9" + release: "5.9.1" gitlab: # https://artifacthub.io/packages/helm/gitlab/gitlab chartVersion: "7.3.4" From 5565507c7bec21a9cb2a0d13fe9dc31d02d767bd Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Tue, 3 Oct 2023 00:55:58 +0200 Subject: [PATCH 07/31] fix: :poop: test keycloak ingress parameters --- roles/keycloak/templates/values.yaml.j2 | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/roles/keycloak/templates/values.yaml.j2 b/roles/keycloak/templates/values.yaml.j2 index 0c80e130..db0b3caa 100644 --- a/roles/keycloak/templates/values.yaml.j2 +++ b/roles/keycloak/templates/values.yaml.j2 @@ -119,6 +119,9 @@ ingress: path: / servicePort: "http" annotations: +{% for key, val in dsc.ingress.annotations.items() %} + {{ key }}: "{{ val }}" +{% endfor %} route.openshift.io/termination: "edge" labels: app: "keycloak" @@ -126,13 +129,13 @@ ingress: {{ key }}: "{{ val }}" {% endfor %} tls: true -{% if dsc.ingress.tls.type == "tlsSecret" %} selfSigned: false extraHosts: [] extraPaths: [] +{% if dsc.ingress.tls.type == "tlsSecret" %} extraTls: - hosts: - - "{{ keycloak_domain }}" + - "{{ keycloak_domain }}" secretName: "{{ dsc.ingress.tls.tlsSecret.name }}" {% endif %} secrets: [] From 10db02db63d3bacaa76083c902df75e01d727846 Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Tue, 3 Oct 2023 13:25:10 +0200 Subject: [PATCH 08/31] fix: :art: variabilize postgres clusters pvc size --- roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 | 2 +- roles/socle-config/files/config.yaml | 2 ++ roles/socle-config/files/crd-conf-dso.yaml | 6 ++++++ roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 | 2 +- 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 b/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 index fa9bac93..ffface43 100644 --- a/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 +++ b/roles/keycloak/templates/pg-cluster-keycloak.yaml.j2 @@ -31,4 +31,4 @@ spec: # Require 1Gi of space per instance using default storage class storage: - size: 1Gi + size: {{ dsc.keycloak.postgresPvcSize }} diff --git a/roles/socle-config/files/config.yaml b/roles/socle-config/files/config.yaml index d726df83..d5d7cdbf 100644 --- a/roles/socle-config/files/config.yaml +++ b/roles/socle-config/files/config.yaml @@ -26,9 +26,11 @@ spec: keycloak: namespace: dso-keycloak subDomain: keycloak + postgresPvcSize: 1Gi sonarqube: namespace: dso-sonarqube subDomain: sonar + postgresPvcSize: 5Gi vault: namespace: dso-vault subDomain: vault diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index f8b755fe..5edf1e1d 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -367,6 +367,9 @@ spec: chartVersion: description: Keycloak chart version (e.g., "16.0.3"). type: string + postgresPvcSize: + description: Size for postgres' pvc + type: string values: description: | You can merge customs values for keycloak, they will be merged with roles/keycloak/templates/values.j2 @@ -443,6 +446,9 @@ spec: chartVersion: description: SonarQube Bitnami helm chart version (e.g., "3.2.10"). type: string + postgresPvcSize: + description: Size for postgres' pvc + type: string values: description: | You can merge customs values for sonarqube, it will be merged with roles/sonarqube/tasks/main.yaml diff --git a/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 b/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 index 5dbc4f0c..66ada909 100644 --- a/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 +++ b/roles/sonarqube/templates/pg-cluster-sonar.yaml.j2 @@ -31,5 +31,5 @@ spec: # Require 1Gi of space per instance using default storage class storage: - size: 5Gi + size: {{ dsc.sonarqube.postgresPvcSize }} From 721e28cef300996d22912d08a48a9db56f17791d Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Wed, 4 Oct 2023 15:04:08 +0200 Subject: [PATCH 09/31] fix: :bug: bad harbor version --- roles/harbor/templates/values.yaml.j2 | 9 +++++++++ roles/socle-config/files/releases.yaml | 2 +- tools-dependencies.png | Bin 0 -> 51248 bytes 3 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 tools-dependencies.png diff --git a/roles/harbor/templates/values.yaml.j2 b/roles/harbor/templates/values.yaml.j2 index d8764230..c55f03ee 100644 --- a/roles/harbor/templates/values.yaml.j2 +++ b/roles/harbor/templates/values.yaml.j2 @@ -47,6 +47,15 @@ persistence: persistentVolumeClaim: registry: size: "{{ dsc.harbor.pvcRegistrySize }}" + jobservice: + jobLog: + size: 5Gi + database: + size: 10Gi + redis: + size: 5Gi + trivy: + size: 10Gi harborAdminPassword: "{{ dsc.harbor.adminPassword }}" {% if dsc.proxy.enabled %} proxy: diff --git a/roles/socle-config/files/releases.yaml b/roles/socle-config/files/releases.yaml index 4db15bf5..c1c53866 100644 --- a/roles/socle-config/files/releases.yaml +++ b/roles/socle-config/files/releases.yaml @@ -20,7 +20,7 @@ spec: chartVersion: "7.3.4" harbor: # https://artifacthub.io/packages/helm/harbor/harbor - chartVersion: 1.12.2 + chartVersion: 1.13.0 keycloak: # https://artifacthub.io/packages/helm/bitnami/keycloak chartVersion: 16.1.5 diff --git a/tools-dependencies.png b/tools-dependencies.png new file mode 100644 index 0000000000000000000000000000000000000000..8feca613f21d5e2b4e60bd5f882bc3de8b27a4cf GIT binary patch literal 51248 zcmeFZcT|&2_bx0LC5S)*(tD96h=@Rx-cgE$UZhHq9+3{xA#@a}p(zN`yMXkrAVoz$ zI-yDry>lk|{LcHlYx$l3&iB`OR@UQMSA{-&2q3<^ z2L2-nAR&pYt*v!28XF&HxEL%=OJ{J=)pe+u;m~JdOmbmuV&!|z?u2GOPaJ<=tJ8W) z5rez-qq_}yx-{1P8PWkMub?R3zkQ^*A;GT@>X%$8c<@=7|ME%mZKH99z+rfQ`Eb4T zZA&7X&bRu@ci;yxh)jvUH3{Bxl`ICK&gw(^44h{L~*(--<$NeZJ9x}jO|H#7J`l~Nuu{dJHqKQcn46mED!80Ft=0K*FvTsr?( z)3H8a1z%om(f<1PubGhw-hUqjRL&L@=->if={T@vn}=kl=;4``o#eKlJElm51=>a)mO7v|ITJ2|?ahdo}vj@?QSSzoh&jyp(5z zriCi8=968CZ)3tfH3S(Ks$^5{#v#;cobp-!?HSzUB|67BU#eCM(^m`jTET|q~x7u^5cP^x!bzn*X98j=IexBdFR zn!j=nG;d^e{`CQj@KeLi8gp6k$|xULE8}DX>;JW|nF;Vm#^!L7@u`7ASdBz8*4xQf zXPTE6w42?+Af-wEFK<=~@omRQG9>@7$z9PGN6+fA{S>*M{G+?OrGd;c&(igW(!U&p zSsI@*{0vg6mHoNQ!Cd8`lKhiNnG!7yb(}?VBWg6^{|ah;uL*T{Pu1u3T}I6(%lIDH z9+WwhdUf}X<67?eP}W8DuWrEb7E;5J%ZGG$! z9{s}Cc)U6HwYPM3)V{W-YAzdX|Lj^(wQ1CC%f2O}v%>`ar|;s%3aciT;`QxoRwP$8 zKJ!%6wB`*hSxTO4#~v*koh`Xcdb_%=ly+)m+fnWuuGOxOab-qeb|*X?TxVY@YJFIB zFaIRk*yXD(j|;>92XiHd3vPW8j2^YlFBF?)@;UxmzcZ-62DFAv{9S_K z(&=VGqecfbRU_+?OnW$g!(y+*k!=+iO!cpv^oCfAiXAlTNJP_4RpUv|YH25<-H&%& zq)g5O57$nQ)?HDS@>yacOYVb0eI7d=ksTSy@|IXmpgjY#ssb%PHt~hE6 zv)nAOVSogxxhtjo+IaUiJrun7|FGl>0}L-`K6=f?$;QUo z`DB|h@S2ql23Lblj$b9MBu-`#0>M5P`_P)&kAD!;Bl;hE@BCN`rnBE@MB9D%ITp&9 zwn(XC?8lgC@j3bqZ~2#-YYj(hP9l?D<+b~m@bMudpH+)QlZ&on$j+K}O8o-PM$UAv zZ>tsG)z-jo9W$fuEL63%)r=XZGJ3~h6YxR4EuI&`sZWhB+ zf#hnAMAxg0E$GBHn$Fw5l&7RA;El>-o~S=u%C}E`nvFf{czFBpob&Ngez8YXPyJS} zsQu<=PFKqmWbht+*T@=g-^t-}@kIGDiY>~KT;+zJq(s2p5PPf&#%0`DyK3BJbf?Q| z*d)Xm`jX{kr^M09%$dsd-Ld6O$?5+Ep zjq=sZzG@JIWJZci3^O&4Jc(63ws>sZ?^;KQ6LXeN~`4xtsD?FNAr?ar?4gC^55py6c2H$#hU- zJeJ)hhL?a9c7>T69>jZWh~f8Lx>hiCfpjE*xuqk;!`SQ_TeKJ^ zTKwhO$K~t%Rx!rDg%fTw#3DbR#F{V3>MqF}1$6eylCH{#{z}M<6i%kQVeCs)S;9g$ zuk|a*G7Wq3bSXDy7c&H0aIswh!VrjJa2@;FNhsrH)_VVh$#X?O1jMYPl`^p*UdRm+ z#Gvqyfs;G@<%^@m%;>}8stFGpenZNKLlbQ@9anbSi%<`suF;aG)wOf+`W~igt5uUP z47cAl)RDs2VR_W9H`=lbAC)6we`x7a?^x9GRgHa1^*KFgNbduFW0-LpxWD*{X?zMogW8{WbB|>&wP3^EAw@qd#Nug^!^dIlWc`vJGOca(= zH^z}!4i9f!O;zPVYs>k)I6188=5HJ;tX=ykKG?97mw%jaW>Z{0SXkOYdmVb-*7oe6 z&u3SRO6&$qZMgbL&w85Y8kWwZ4ta`=_Sv3Q6r-IN5~faOMYlTB#PjZk8-G*$MIdv$O!3^AQf zU`4@d?;-ucci}dPI)h=aSQrWH9U{$j@lD9?p+!q3LFrX`n-~^!CdwZ6ll?D%4B0d6iQP?>D0edTOQ$Qe84Y9RE>NRx!q1>R1KorFkk)l zw-+nl>=CB2hbBrDOzW_=7DgeiDG29y#2O%|skQtEI79_~s#9GW$_i z!#LUd*NA}5CLOcLIS~str4945F{i#Aquzrq#&4vO$5Y}Y`>s*=5EEv%vA~6m8`G{3 zT7{|4$wn~CPqo;@u5s7NRdFF$$e^AL-}+ZSV_EMOj?}WBPiEZ7+LG$JuR2tFot^9j zAfK*W?&(Y}8^mJ?&*b#_tfzu!3sRO zDFHbQB@cu4&zFxYHYV`!$^$$J>yJIC{T@ zMLwnmomiwnsJ!G4dVl{jjQN$iW4;#UXz;Xs{h=ble;kSqbJmxCv9QH z?B@&|BbDooPq|z88nM-Nt$3e}x-Em5k${oP&2T%uMS;KWIrfEB+PO|fPg@_cUm)}9 zO969(xbC~4rZl~7o1Hj3%zUySd4|QR(W=7~T7pGwM9bopUw|P`rFYqBhB(H#4Blb3 z!v0{w62Sd4$&cezSp>8^l-)-&;t&Diuy(K$u!TGh6$#)&yb2_M8dB3gY~I4}gNdb= z;bx+O>3Si)_Li$$nqZ$;xwcvlo!8-Q{=1ZF<$V%I#*M8r7y6im_jJ>3D4c@ZSqv#q z?zWiQdX>Mj(HeXn&eMKdTi(c)AeDyVB4 z7xeHNGB{J2z6SH28gtVd{wB&zB*EZW=%{f3@!=KAvJVGflh{ks?eV@^p`RwWdcW*2 z2)9(DU?ChdJd-j$-WSQyYIo>wagqB>V2UK*DBSyqEEn?9&vlN7#%_Cc%aV#i!SY zqG=m;w$!_yStP z8@utz;+AEDEVG>gWJ}TSFNli8z%J3AKN5~-u(HCF!3U!J$qQ+oy+owPscg4&M^krhkHqF6TEFt*i;{~5CSHvUm8ZwhDt-a} zKU%2dB_O-l7C#*4CdHZh{1xf-QGIJlwz9!DnlIiM76y{ZaUEVW)M|Dj>Z_r#Ou^pQ zMGh1RC_S4wKaLWd{Z5r>!I$7^Y=Y%0L@Z}d#U`r;UMb|RmkN8wY|QnXPiDt1vIvwQfBp8WNj4GA zHX76NO?rH_h|m-K<<*<7g1oP>^-h)gp<1w4gvD*$5Ul=G_?^kY&+D?Q%v5cmw;#0* z*+d0B0W*PUeR-kif`i1MTpj(Gd;lWWwZ_eID*N!KWx6}*s~A}51BHWo-+6nV6w2mD zsqr!kx4G#xebl@pY}|T(EIDQ)SZa7$zCQ{Vp=LurwF*!V7&q*flrSS>Dd~JpMfllG z3u1mRirJ(b1UZH}X#Z}cE!+l{gi4Lwso~YuV%vpL5xS~ELMO(O(e0=LXR9BQGWAd{ z)rf=+IRi@jVHuch6z7lL@G5;|c4>hYi1mR@-&rnd?u)NOOrMxj09Q{E=o_{yK-%)N z*SS)o$t*{H@p)+BLXRyJvQ8rx60@;k?%i!p*~1;(F3DuPkNCM10G$)qV+#z_zkfit zOe}0$@EBB6>Wypj`v^mHv(v}aQI*j4&g}T;f`XZYdwsONXx7_2GMA8q@M5kI9lHWm zon@iP;I3r!B)R7eFX$7ndgkWc6KB4Q(VeC(M7|y0O%_`jwPV;mT7hh&No$yY2@K?< zb+Ntr;$b!CyBxdQy)jNo``*WE>rE;>Sr1ucygM)!%Q$8UND1Y)L^~Hjq7>EeC z(Hht3#kVjQ@lc87fhClKu4J=iVXhZTP~=X+|5%te5+m*QYvx{9h}7V5S(xBJxN#*& z4qA6UsN9fF`CK~&gCQ!eIkG4alHbJF2B}ab2|ZeB>}dm8@@Lp|2-;cio$-16iy{1S zVj&B}E>DLpPT#Su6RK7aWk0+~F8=lkR$azXN36)rkG1!P8*OJT6 zSrtcv$S49;BLSR;goa6`CLvcbP{4|x;Fi4Vm(jw z_uZtuWg>(Jqv)CZ$XU+sTeaD}9~=1m)u=90mra0lQ`&zA4_)Qbo+M^}Wpyy0lmBRb zTrtjQ9kO9yQAhz9bo@oAcwYB@IaQD6v5R#yaF6Sey3fDLdwWu&vkEYmJ*9W2R+@*4 zzuT)7mQV(tHi>x_Tcr9 z>SOeg(tE0tfIAu0<%=>;I}Nu54CmPosjJ}FxQNm(p8k9IGXcK+bp(3v)Y_A9*nP8@B-wf;(%ts&wzzm-^VA`;^YYFVkw^6`{{4lR zmXK=_X`7{7%XZl~kmc-CCvi^XK`DtzeHU$X<%>_z(X<8UWiC~{39b&EUf(cbfDI0< zh6%clbB(C3cXfk&NxIQ4`kWcGk6!{`gk$&oai<3b(m%Sxd{hJ_2olx};{@Lu^E#4c z+NRN849}5DJkml2BfA7X&(G{spvj4ww>Xxa>Jp%;%^^F?9%W2K0*IdruVh}-@wXVh z5ht48tN4wwD2S!_wI|v-Iu1EJH@Va&09)MPvO+Od>ux?%XqOA+ykO#+aEp#Y?vfb( z@NhXH2eYH-fjPz9PqC3#=~NqdU!uBWF$&aKE{@q0+1%&1b(h&(cd0du=Uie8l${&b4pa!a;pC?$qyD z*IR!x&ao$LR19Q9cnxGMgDjVX7=L{JwpQTfRs=jI=%N79ieSQ^0zU6%Qo8?!!7XlM z^G&o!$K%hN2KkYaCom+jK&(%vU=V_To9WQjAs78oc!37@edE+5+2GR+{`9`42*FsLk_+FstH4jckv}nG|hzK`%@Ik8zg9> zDor+r#6=_9vK$41y@*y%vr9{)-8rVERW^=PmX_u>@eUrK|b@*-7XmZPG5NSCoy&zxKS~10h->>?6q7J%z4>PG$ zpnBvHB`&^xZnvm8fOGcqFM?gd*8xNH2P{w|dw!L-6=%VngGdl-$Sv>S#q5Y^UfPbU z?EO;!$Ysz%HrCj7NzYiI4cHF4iSLi@;&$f(!i}#G6YljM2KP^>!!75DL_5!LjP;W1}*$>qsGyHokajJ^f&7v&>YQG#A#m(v7}<1-b#+S)wRNrNME z8ITv@!Mj2nZRCN*E!GfZcJac-{@SX4bd2jpKZ}pVD;&I~D{y5urF|cu{D}vzOMj!^ zQgMVOO;%UT5%{09ZN%=h`q0ZajVgmdW#~an=^9b=ZsL3K8xV<;?e8arvGk#Q&zz_v ztT%tqIy{Y9cxHfiKWV=0D7tvnE!u*@65+2#PbfByMMMLW@j3hs=%}Hv6GX*Ii282(a#wg03sSv2L!obK%dU zGF~~#&sUomNee0$mRtfT_#z8zbbs#J?mXII;+wzp>t?pV*PWPjHWhlGIUDya?Ppz5 zfz!h3n2*SN`jQ>MM|HMHT)>S!n#oFWHxEqAzk14kcl zHpzLqA;eYgs}nZGeQ+`(^X&Ju4vMI}cg zbKOxcW}n+Va?;-RR+euQDy=PO`V(9cE6ZBM$qG$#jtq@fcN-Myc=fBBEqHQ|Bm0)e zFf1fI+L&J5wxJ@c<^)hhzu;F-v^taT;TN0YcnM}1d=nETn7gT{BT+mE-cs}sQ~Jij zI*Z{%7-`gKIITJ(+7;8v{J!KN+lR+am(F^#U-|AfvjtIGplDf;vSd4KQOsu|`3PA9 z(K<`o=(}(;*0Cu$6lG9S8`effb5NfD3o!n)0_30aP@L7M7#yGfQ%!LjEG-tgBbQrbt= zk3-qUS+KkbZZO$OC^GmNEC_W?*0xar9#5^&N-9~`m=wy6%wBnzzxfaGlEI80NhB-G z4!v~`FcnA`3VBR6_F~koY87t`%@w;aAEiv7_1IaHi8I(Z6~It+bO2!X=B2Ygm?0x5 z%A8Z6vhTQD<+~}*AP}NTjuwoPa-43%p?Lr&uKt3#Y}A4sY3>ec+Hb3w+P{L9u{RR~ zDeMWKn!$Kvr|A|l(~*vct^)jzN|SlxR+wO08OwVvJ87#h&J$m5%av<`%;s;@sg9lNyF6z5k2BpA%7TA@_7m7^FoCzy8`tMFzHIy^JE zz&3X2zCXEB4N3Fsn*QL-=sHd}9{}KIi!4_CL(M!e!B0VU?odlBw5S9Tzsn1iBEm0? znw$~};A+igqc>XW`Q&al0WfiGejR3OW`D2UpO*H4aD@$1BDx!GZ9qhFbi5B7Lh^3D3b)%1 zSI=EyTMGL8KUx4h!afEu>ftX!9s^V2vMcm~SJPy-rD3cuh5^io3Cd*!>EZL3dGkrN zZvJK0(kLU-o+%;OMl;^P8D;tCNww911cdC=#MA7gj_~BRD%Kl_AzQ&Xq60=&_LKFe6ZMB!aikGnI;?wC-{!)wFAj2?^jXvXpNgL^$f zVJZ4W9;RCz2R6Oet^s~#AmAJlDm-Gv=DIiTx~!G$k*HCqgp#^9GXy|Ao1xIBghN?a{dfzb=+|Wqp?2Q zMpM*I=k!J3MAOHa$`b0O;CQv@G8>B)?1Cf(GP}k}yYyttzNzONndSCPO20#RqQ_G1 z2}Nn@7onZZHUSnBTFFcs!z>a{1#p+N20NRS^-@CaBqFR zc%AK;kaiF}6#Z$yV2*;Ulu!+jKTNue0-;=cqU&|#qmHe_Cv0*_h89WYW8Qn?3&m?Q z?nOK)Cc!j-cVbwm7EP05q_^?d>JfR2I%eq7artR};v7;)w%1Lrn$9&sAL+64Lwj6~ zTy?hij$CN=buYq>Ye8qK{WTq1FXgBgXl1I2X>Wa+)a~1`GO!#@d@rKS^y=(J>*X0U z!G1gw`bY1H4jv}tq*#M;=o62Pn8J{391jzh>E;W$x6 z$3{Sf4b-2V9yO#lC+HKyXlmqr4~2Hi^BN2^_~A;SO@N4*oeYmKiW24qj@Bm2T#2K1Yy1P52Z4;Aa11W_DkCtcVAq};LsJ+e*mXNpaI zwVn;&eVqUpSwIK4urYvwuELzE@9?7pAt|N-J#l_0D&R69XRg9n$P-V1Uf26djZw)F zoQlLOq&TT{QaHGksob7>Enze^S!2ThH_I>lu*nKTA1@cx_5uLbeGSlRdpG>gMRuacbW;>||Zw=Z~?W)EHtNestR=1`dve7#7(U_-u@BTc!Cp{v4au>x!XItV& z#XYF+Al?~%MF8LA{KLy80dkM6`i9M-SKF@#@)SLmy(POyd&5|&c&73p0Eb^3LWYC^ zr&%k0z*@m_t4C>>Ijxp%}RwO`AWpGjdtq2-AN=2ki4Q%udN>M7l8gN zY1I$Tv=CjZ&Rq8yF4Il%`;lx@^y}zK)%6%F(xL8Vk5_j&kPh`b@l?$)P!!_g&kq)< z6hVj+#Gvi@RXEJg4VoqVf{*eHtJC;GNVTVdwsB{2W+qukkSOrP0z{iTOmc z;6>{ffNeVL+4>!jQ86$Mn1w%r9AE9R$FFw*?n=yA1%Q|_)c9#z-cxtuYq61}>%m-t zr!;F;Np9orh|S3siUjbm@l^V>0s9!dR)!7RSvV=|E^e3Q*9eqj*eGf)zj*)Ue4v}` zZEpsf7AHtCy_lXO5qpr!tIi8l2K})@_gTk{`-4%Pjm7mTi&l?Q(YJ z)F@8K{~Ea5T!-3&Od^%;Y1T^*^EW;~a8+^@hPlFw z*fP|>RW>0~*1sch^v<&O_kopstbU_J%Cm6Z4u>n4%Zyi03~q?2+hk-DFtw(mw_lDa&_24T`VtlgHR+UT zci}k}mKfLuRkLMb<>2A>^)eK>w3ORioVG{Q@`ecHT%8GRz0|&F8gm7n169k3=g3HYW9n6H%db7Ri=j@0z#vsCAj|J_KQ}3uQd00tfC3x zUQmLP!_zoTEv%w3j0-^$2yK~1Pj2Z)wFg$SN~f^65Kkajf*Os$hIWclb;6Z+vdyLW z^L5yPW;0Iqwu>XZrO6;tePP1VKDxwsW47;Q)~G|{c;}?F7c0w!-h}hIQ?CH$L0&(8*`3=8pcv8 z40Ug0Y}rP(rbC!KinBTbnFF**CPbiWv&sc&9f1$p9&$#xvn+m91bPZPrai?!)f6ws zck4=>O1R%4Ov&yzdvM}#d%%|OZcl@?!-p4daxl!sU(+*TmK$W!wpNsnQK|k2cpDAo zrQ^y)P-Dq84Ia#+`#Mcb&PVPM;@``^b-0*Wi_@qqq;CvlTy7&WrJS;p-7u2#%?lGm zQK0Ukl<9Y##z7C2V^pHHH^e%hZ;%lhDz;g1>8D5=H*2BPhZF9w)H!}3Hi7tsnW?aF z3B3J`PsBSFM328O%;A5NTq?#8=)YN+yLxX*EvU@F2%b z)uf!EjYfT2>7s^}|X+{3T6HB$~i2s!>rhbf;D z_@>W#dI@QbGYvUe+6^~?IKEzH512mc5OyWWw-Itv;oKWLw=?Fnz+nlf@bS9VszL!v zzkqOx8zwY_tJm>4ku($=l+lIL=#YLA%W7LyZCj`LR%nK;d!eBYe@}`R{<%V;Vt@?( zp3-li!@+vBP|M+3gK+Lo?Nz>p`?(NNe9Ds=4I~Hjd|@3ut$@?j9Y#Rf>qr^n*99Bb zNT(3udEdq_2kN2iCw?WeQ$$n_!@F7&3~#=$Fxaq?`ctyqbc}sqX3Jn&dKmy;6Gwp=O=7Oc*v=z1 z8J3`Z-bm??EwRDZl_wCDj%7lV3)2C+u;ri`rQwU7EY+|qkv6uL+Tcl~o%@Ch%Z;0U zQdk$3VA7{bX@X)Q-gmdJV(6xuWj8FNhMS4}Ncr<)T6!KwYS)g|U)R7DkPcuz)xUzm znJXBCK9R~@%I`LLXdCNYP0##MD$Vj;LY?h}N9{EJL(d{_Nk4|m!3e8w*501J&@%bc zi+n7;=Z*{!p7Bqvke(ZK)=?EO&On>!&(Wi|p}yh6WKr3UW^kkmf~OL2i2IphOkNv) zzl)9Rw8`>c3a}Q@kc^KIZ+V#M`QDC{{Hz1J9L#0uU~bYkfgh$BzVSG-^*U~cc}<6J zw5Te&y@l0L^TQI!wS(!(t6Ng^ON}F}0{5>JUEy5hQ>@;^(T7YN1}{~`=uB%J&#Nm9 zpe5vvY3dSXWGXNp`0PJpbf{V`OGn;*rC>Y>qvHZaFpWVR3AufP%mX$`5^aZ>Hvu7V zQ@C`z#VASB0&iLZ>gGW!RBNU~}LC^1df}YTX)n(1G+n#u{uSAf0Fd`^0Yc zVC(X@Vfd**9wOiX_HcOM8JY~O6BSc&k5qg+hKd^6+trbfsb=d1R~?2p4Ft(CYl|5| z7rPbQ>#!Y8ko~z&Bp5lR$rne3dmpFt?Bp2nFFMtdP0)BopK}STF%_#VHC3>h69s)Xe2J8@?yU?f^9>OpAx|mnI@F~CPvC+mGE#U# zDiY7L96*Kyk}H8d(0eM9PkdXVBtjhJUKof*S;=B<`(F&XHnx27@zJszm_=Mz_+ThR z?pI9+@)mU)hq3zdlaU+?qspoiiMK`?2_P&8Q&{GO>w#xXq0Fpg1=U!NV$*w%ia_pD z4=+hnkv9SXh8jONa&llqU!^$Lz6iG*3+;_GdPX=w$OOty2dWbp$W8cEB176eP(Y=V zJ}O|xXMF%Pv7aka;&V0N?-v3j!Vpxm?k~(B%g(&>0aVZ@z zjX{0s(MUGVG)EGd^m(#Pzuys6uSxz}^XZ2V)ltd5prNk|l!S-{2L*bdE~2|-^?*$5 zk&OeHf}Pb9-1<~sp5C!9miLKAsIN<84gMDYZQ|x(mW2pV=5iqaN9%bKEeXf<2;+tm zw;GnGLjd7DEN@F9xty5}cy;h;19Fm^ks-m>Jow0e0(!NF*8lo@(n+4AOx_wG#PtW=JLFV5sqQ zPD}6URoCUhs*fOBxlS8C4#;r((5vx|K=2f~JXCCCzBK-23kW})a9B<)&=ZV>T`O8l zK6Cv_QN`1kx4Rh5?}Zbq?E#5o?RgSJQxDMDF+_%_bvzXLa)-N^pVIx<^X18E?RtY# zX2i{~QP1`I@t2B>Ns7*Kil1?6blYS?TXFENi=T>Qq_3|jm%MQnN#1+xC|(8B z=g}+<0ygx*%DVz2r51=AJ5&L)T7$+j!8rIFp-0W4#h>-mq9g5!D4uo|Bw4;h|KMFM z;hF4Wbl=PQX@TzX1~t%HR;=1HoOnWU>)-Oxy8hfNz-fhDvHB`}Aemyzz~)Z?G=i*uKXS+=&;2&!OIs2vdhdTUpq}hm zti89kzBrYSgccTbtt(i48YyBu{0<>knW8jsly)6=9x^Xfu;tzpAWmH#G;mUN z1ya#{{sES8P-NRz{?MXZS*J4cDisAg^*9iG&5Jzzt}2o``i}~8kaYluE%L)c^_LbRezC+8YJzUw@(AEzM`J*Vxw5!r z*Pr{6_Q7mNC=m&)dieSrv*XmQIggQn z9?WWdB~AE1%qWhlt1Uf&b{9e2^h%Yx(-DRoMyY{vZw;uix6r9z{6mkQeZI;NDwz7A z*2)v7NaH&qNAIhxC?As@Lh#wW{fQ+`rh)p7`pb*6)4j&=SIXCa%>zJ!Nz@8#lrEB| z`-q_wO{4dGlBL*sZcf?= zQ4=(}P84u`ffRv1aWc-)NNf{gfuY&WIAX(|)?@KaeJcD3H5*O`%H~KdL$Olg_eva( zk7P9|Uk?5$+_%HQkUcIMI!xZbUujuHHpB729chu1>hcU%bWgr7!xH`ip8Fg?V)>K( zKo&e|sAD`Mnq{V{t|}C_CxtjW-trdq`q`oE*A^U(AZKr*19Qs^RiP;#vb=S~+J^9ByW83Z*7|FhCi&eCRfiA(5v zBe=D3$bX7@6ghFrrm!-8?}PMvTCmtgc2nLIsEyc}L~EVo(j9#p#8u~>-BSVz)*p%P9M20}X{a{jft8n%-`27~JRrp&)u^l^zN z}yx|*ZKQmM`eb_1x)_0QBEmT_jZ zzY%weticgpBKA+UdWP-Wof|_~d z9NXzU4CQ9>-hN5vxl-y~M-#Q-2=Sj`YrBFI+ns3J*HLYlF7hQ>?~67rbl*1Z&5V{< zG8?+Z3Awry>HC$(`BAZ08w~>J3gtxb)qWD7$H+2-aDe?$VFZKOvDb93gv|rfnLBn;Q8| zByNPY9x*4Sch&e#KvzAC;^NV0#gl7k;Vpoq|!5X2d51GAH4AVR zCws7K?f7tZC=N#Q86w&mn$@#!BVz{ucDG;;4v)lhtYsOx1DX2>Azimt+{`2v*5YXf z(#^dwRAb9L`Ff~*Thf1)2PJp!6B;Jnq&e_!uJ*NF_noSOgC}rH#Hmuht*M4U6y0|K zPpx%2Q}3Z}fWw1{pAWeng|{yKloKNJD*#(>`KxbEtPjM&Wd0}*wbro1e-|pqLP;b; zB<(HB+VDkq)OIUJKZRC+6$(pSlZQ!%nrwJI~$g$!crip#@-U5A!VZ z>&hZV>kjn?z1~|IbtcPq94dMzSr&k3<&(pmH#qYI6FGsPcfcr61%}5I)otEOpPWlD zT03+2XUKuRO+8qBRA#fPM%#2VqZ`XD#vzQ3PIWUOO8{NUo!f(uaIm$SL_5ZLnE5OX zHpl}t1a7&y1Buqsk1!`guq8-TBpG82zA}o9CZESA*AA7K07z=58%d~C<1ytl#hXVM z6=%*nd?ficbU#!ehpZXcQ7V>`S|})eIvkwsGLto9rJiCg%Rv2})a)LD#A17B=ouS{ zVjRo>s8d&!4UozYrh+K-K6Q#p58poBpBV>9)EY=gnS9J(iZS4T&UK#|vMPkCV9zbS zP_|pgC+j|YBDqtaWj55vv=!AUjhdOjW|U@AS#EFJS~*~V?4hU~m+L0CYN8)xssM zjo2{#6qI6XTpNp(b-B)W{Z;^>28rwKa0*YOv{FZ^xR4y(`=7da?;|;s>-CV?le1qZ z%XUW{>t4R-aoBikvLbBst{!N@t7Fyfd?vVngYw3~Ngzz;Z9_{OCxm|1#SP9bIz??& ze#4ZsbUgeamq7~rWr>v)#t*#3%`PW7K{n$SGI*Y~jQL~9w7h{yZ1lMjkZD!}PiS}8 z4J8PmZu*K}cdL2_5=|xc7!ltpfKNtuwdmXYBC(<~j8yMky3grP0(=eyNL4`u$O}&e ziYDf)7rrVojWemfrGF$!r(dVKK@E~oy5sD~I&EEZ3Lo$5<|N#e`nd47Bs)e0xh_Wo z5e#IvEdvKg4wgWUQ*4;nZ03u84@HHEgVdITDI^$O3{AXLtJanT+{ygU_DOja7avw{ z*sNuyGJ+FK#ac7*!xHY>RRt&UnGtSZ95078p!WUu9+x9zJ)=a|5I)aey0(KHY-M9& zrlSmSL!|`hD4{sPmphtylp)^mZ;jzn&V*LST0_ElNziS1shKK%nL}4Nb&6CjCL)3b zSEGTCJb6E+#Hyvs2oeTr3VnM0IbPh~_W6=aHjC*%rq5x3j;Q*A3FkKyiL#FVqY;rO zv_k4UY<;iC8si7t_M(!99E9f{c*EP^hm}0aO>!v*09yYwa~oz9$eaQ4(!1tA;iLX7_~T{&3k8AVK`WFUM>Mx`-W7 z6QaV4Gy+GXt)jt^_FcDMPyZkRpFo4uNzEFM;a1F`E?oPsx)7AMt&iXRIc-P{jMEPW z2nXPS)iLEcO@JN1L6dC#JlVpa2p;HoZH@Xf#nP0|(c+Lu&6){1)Zh03PrL z43-UG1i;bGVa};jN!8 zXD@|GoddQUeCI2=#E#-sUrCcy+#hJXP^K(7GfRGoX@^#}s|;F#7= zOP~_>q;R%|>>p>>12%mC0IdJ9o)gIAG?`$2yTCM0qZ88p9Pmd)fCgXEsQjStyNH__ zlK;48f-LaM+hdw}zh@FmFdqpRAT^xJ7jzBpLEJki@PmCJ;QQ9NC+$>nga4pB6}X83 zXDR--^}xc+2Z_`wxFq2_BmW<@KP!v|G{`2Hn0F1I7&jdZ|8Yw|M3K;vidD8s?rV{L%%4+MiJM-m?q!+*Pkl{}c39isnO55xv-aP7bd8!(Ny zp~s&a2AFSx1{ExBXlg@fanm9CANK^H#Q$5||F;(R|69=icNerDJM=~p&yRGev(uC1 zNu`Ap%8x{(Fk0HrRTpG{G00E)B<)G#W>0wfH|G?05cMO^-?%{CWW`=t|(PSly zePos@s(CGP%p~}f{OJg#f%5p=pMVq^EFa3#%q9B+kPjR>{|$bYQiSjtN-$`uNOlED zS?>a5yyiKXIV(N|%O9Ma;+KWR4<+!nV%?!f(ieZ@&@|^|^Pq54q;GpSr!a?ep2P3o zIWOf~hyT+@YjLr^LGc-ExDH0~WHZgl0LmFqtOj>NZ^He;{vlk$o(q;+X90wLsK6$d z`41)^;m4vG2yIhn!pxD=#oqZL^80spb~G#HA&=-9b1OwE8Pjk${(y&D{iqF&Zy2a0 z&2?t&2PdHu?kiG!kAB7ylk5!u{OhT5Nhm4#fBGo@F-ceGQF-x*&0wWQ3?5$iDKE04 zDTYS|V#;QW-3*Pqo4N!D0`R-k!@LpJKLiI_kMusy^w#2a6RVXt%P)R>|73=ty0$LoeA+)T=^U3!&Hg9IEM8Sez3NLXxXzR#Q z+X2`;m~1s~O*{A>HUw}WI3@-R&~SZMZ`(3daA!@{Iu-g{4jAM6!}je=189~7pk2_) zoDVrzAHe@1VP4u>Xhq`atz>6DiYHRPc?y9WSz$20VW>`>m#NvKoWpF~@APvT@;;#c zkYa!eQwPQX11xs+s{3h3ZC{`v1@yfCTvq$-{}yfHTH( z4S^FbD*WFrJpb+P>rkjh;GaHfWucQa*@TYP04%$PyC(n;uK_eOH;KSW#C1Y4O+QNt(hj6o20u+;kN|upM4|_E+@$xSON6V*2=XmWk?stpEYW} z$G^O(UcL^Pj3rROh*W{9O(*mEySjlD{Y$fCM3p7QVwU%Am4s3H<&gmcYgDds4_y1b zj;eXAa8hW$U;LB>;4M6L%v+HGm++fpwltvR@t<_+yYXJ4s6i~}P!%<0S^LX7ukBWs!K0}G5Yl49UlHd$&XF`(S zd4i9wliowbLIV%%0e`#IynvVyJ6b7Uc&M_viE3|f_q$(M?~YWLV0~}_Um$w&PH*Fv zO3oBVnZ7z@rX*v3!rymZxB!8oWTDV}ny0s!o)o>*< zutWzPaR)_lsx}5A%MT;g*F;8b%MUJ0{k}9sY)fSiTv$@;xmMGO6U2>EYSvD68c&M7 z$r+pw`^n7@=R!+;nW?hhTF2f0f;&*opS8U_)aP@~MDSB-98Ljv0;s&T&2D~IAPdyH zUo{<{1}y%OlX(RPJ)Z7XHP+y6zF0_7laE*P(Y8pBs*$vu+XaY8HSX4wNTETUH9%$b z9F}*9Q;qJQfNLklaq7CDm?Vqj&xIeC^6Y`U2Z_TaYr*9NV?cSfjw?G|fj;*`53OGi zvK9!2Dt7ck1?{FcNtIKcrYC`V;42&fZs!>HBVb%y?(w&7^2IxCAJ-2v!8YBV2P2*w z2Q-HLuXhQI1>CqRIM6^rG=A?|;Vf0ey#R7lbA$T`Z{_a%XEpawsh<#2rbg!^pGl8p zJ`n(Ne*_H1sHd!o_|8Cpb0qHW6^DuaM`yT$;Ke(IWTrjR1Pts*i~fwV5G|HD0+Zk~ zPe6@%mNL&@`MwYMyvOw${=Ux!9xA#&yp@&y;_!1JAPaC;+}Ms@hn9d_EwV+Ye9yD@ z9`~K?iw|Vr<1&E6N$R{>=o5C|L!2hyNM2pZ>%{b{C^+45#+^V-+4}wrU@&FCY5R^% zOK)R?&S@_L&208UI$-23N}7m+Z%*68d|RJ<07%E4Sitf^^-R<idJ-yWxr z=-U_th)J)c@~O)IV(+b^s_eeDQAI=$kWC7L(jna;DIpC4(gM;Qn~+Tlf^PKoj=YvuP=Py-xkt;Q(Se zya(75_$GG`7{2c55O(V)^GN9dsP03fNqvu&m*BQPz+n%EqKKK@mKh!v%@(cLf^@#! zU3;KeE+8}qY2^Ccm(Nk>d1rN7VskpaTUQ=(teXr2-zRmtIObOnz2)zGz!GxBg<3lV zAD?;%!c)H9fp`^#=4S{m5P_@%0n0gMfVP`IDDp#Hi_x{+!V_7Py3Nb5Z@G1Jr0+Y{ z=duO`AU7>Vyg{Cf#*T62oOA!#;z;D?I%5W+&1()m2R|HKS12=>eVAp zEz>!ZONI2(vfMs=|MyrFAfA1Fn__%^7x_$nonGXp>)6rwcd!$)MJck^cGv^SI=sc@ z-|bF*_P{!u+9Fhu(cYY^21w2`Hi*g%^S_{`S3B!zj$-0VO1vY|Q5@b%FKVpY9fn0u zT%r8i;efbM94#nxxLkm2y+gUxMQVxzh2Y$08zlWG-})S&$AAsMEU!WOqaB(XX|r3h zU#)$x2jO1MhjFGMu&$9`zr`efiu9n$hQPdj1eO40&MjVD3<86YpWMa!$nwu>dtaQY z*l+Q|nCWaTJQQ$>UBB%k(okCZ)_AN(>0(;kc)U;fB0q8CduCdSbHil-Hv2Bpz(B;} z!7OR}DIm#Ega~{Dr#-8Rn})fhrH4V-ceL1uY#G4alZ~dhk98~wY2JLcW+^NQpsX*f z2XQk!0l+SEayF}l8~`^X*FLx#q8IL{3YEgbUjJ;2>noKfI+-#-<$=q(_5%jo6aBOf&yOsIe^z5E1zCg5(yLKvX?}LQ-XLm z1|D6!@0`KjH>}V_Th=FMZPQ6QQ7_*|DsbK`ffOGu@a63USs$&hB+9406Gyy-%|x4 zPa+?!ASTPJp1GP{Hb61Q(~eDsj`ZI1S&QM6U1MUducb;N#)#&0fY$yS7M8>@-e7hme0L9T zJP95*YXPe9A;+0hLx?wgdu0;S!45gV_Ife=hQlyejkjLO?0`vZm|(fTL`LG}{nJ!! zD!DBnto#sYS1map3%Y{mgUxY)t>U{Rn}OVcjp_W^xHUXENEsX?+oXp>Bv{dB9yX`? z9aOU#O`cKtm$X?laKWWNoqfimWWV=-X$F75x_2DSX~e^*(cFc{ zO_3t8_>?mlIC1yyjLJlP5-*PmdEA=BM?Fu15WfVt^!adV3ZeE}w>ZAk<>nV>>h3s) zKgcq35&81ko}g>^4yE^^_(m0hrDuQnSv4SuX~W)ui8=2Z^U<9X?nFZ1c9d|23!;G* z7UYB`5-uhANitJskR8Hf_qicoO*=u!bE448O$_`*ncSd){;p zjvhwJ2cZ_4KTy3ZK%4!!)iDz-)d-A`tz-2onUAp&MvwuycXeA;Gm7hPEWv1;obX@^ zQ0?$dFC=4wD1ZX0TBU<5iWlY@_XdTqd}T^dJ>rC(^0_Cw@W5>^uq+97?s9DOt2Y@l zzS8$ugfwYuT!g8V?wI)X9v-Cw^JC7r{SJ86d`AjvvptFBZUCVDC=_4> zgf1XX>D6D7Yw>XC)KRIm{aFT&E?eQcG!++G!@vWX6iqGNPRgRz0-8%%CzgOtEowM# z@Mcb0(C1wlbYuTFFB>kyqMjMj;gL9_uhYjsi|sb!hub7NEnYoajVJSOiItxl2Vm1y zE#4h8iRLICgqyaxq{lIG5D=1P$SKjG&Zn9iID>_me6sq!77wZ7pd*J8~+H2Vm>?DW2ZB{fADOf&j5`ISYkzz~-u*`Thl?;Qd z5U@xJI!hW^DJ^|gT=uqCEI1i8`4Fw6U8;5sn4K%Wa(6#l2#SOk*Pr($`259z)q7$`4N4*jK>Ov z?I3+X%ZLNbTOgdLBQ(1h1l*RHG}5F$DxxjP$njI$o&iChJM}Uj`?p+BLJE9sQl@va zaI{rE*9~*`vtDNwG$9sqhKM66SZL&Q3jkK|##0CVHqG}R8`Fa;=4IGwn(b;SSH%l{V1z>m4|S>hvZ;3YIu50bpx#8X^LyP)U+SNI04+Xg|NCGT*d><^+VK~-M zdkmRf*#j;HM7X=~JRH#%DE(7#t)rAtWVxp0)^IxPgh52s3XOdoIDzx`UPW|4OQE(R95;aSG8?5QG_a>ar(q_-ecvKo;t>i6libgYuGj#}Y zXGL*1Tz;UkSh{3Bqmo;?*$RATC&K{mla$5%zI>+fzcrp>De4pW8&INpTufE z+B0*fz+nM!mYu%@D^;J&aHs?hdTKZD{x}ytdzl2L+l1fVTL0^U&xDAz{Z@1i69*wW zK1y2?jbNv-)76GOW_$kA_;!l^ZBBp&c8Y{Mws|Xy#B^=?hTiJ7=L9=|q1b8+dBb?! zkSxNCHn%jlFinE6|6J&hjr~;OhVk|+lkcLgRLnL^BJD=Q7WRW^QZ9=&Toq~(QcFQzCt zDX)g#Y{NmRhqILCv8hCqUGANC3S%dmK(j0T%m_1@wU4%FK%>d0~$mEAN`Sy@J( z+9lvY6W+VsoFGWp@cax-rDQ$^kDOzl@f(gndxRh|+-yL7BMlE#t*-I~RFs;v6Ht<#P4ML`-ryPZxYrvWL zB;@gU?DrdUP45^Vc`W^YYdH_aWuKhH!jMz|g zMDLd1 zpPnRQxgs>$nc&TI5c$+wcWdu9Q;ka2ciIZhCHNtWcMvTDZn_RDaxz@_@f2cNCDKY` zl_=cjz$6)*36Z(p(jV`lx1FvU6rNRkhnKX{-q;0Z$hzsE(-zf0JqXg-=I*U!TMUzl zrI(O^v}PB+r@^JKSjPAQ`&l;tP!zki7ZP8CZ}&v{~om%)xJN8DzAvDaJFT;Z0adp9cJ17E)#3YGHVP#d_{PjXSxsgL}*v zaS}6K4Z{xZ7qhx`)KU=KveMzP#a8$<_G-}N@*W>k?xs#yc2R>2)h&OgsFY$UE(-H3 zd5Q~TwK$o0EKJ~aB6CXGmph;-jaMLgY4z9tbK9NLoC7Tv2*>=^kVx*?6jAft{uBx3 z1AOg_<1RcufUjaBFhEs zF<6*4Iy-SFG!*G`%ob?u`(g`V$;q^u9p9;A_IepJw@l@oEAk`EAWw|Bj+Xf|`8PFm z=vm7VWu&9iM= z;)6!`deY+`)m8;CBGMf)q&i%m;=Ze0Nh^j}m1&Jz<*z5a>yFT8Cbcv@MhF%+EWY_b z$lNS@smU=X)eC<63Ah0m-o{4*&F>gA+v2q5ft|MUS){0@ejrcvq^tlw+R;+8w_aM z1epG(9Dr(auz9;}>>zkb>R!S$yTH>? zPiB$a3W1Z@08Q${Rs6ANpj%vfU-S+B@o4J%X?l$^lq^RH*8y%&%>&1opC9>)RPDu( z&mJO!UIIh7vUDlt!TVI7I8GZW+P}2Mm4-RTucfKd(?F7=#7HMw+W-` zCiwn(KD;e$4I!Irc=A8*_`#w*a%;Tp#0`3^d68;0+F8SeD!P8N|^A@NEA`cc8 zl>!)Oz`o9%1^&OE5K(B?1KUE<{5j2se6_AY#rxv_L=yHNJ|`yf=f{Lapa6()@^Th~ zM^w)M?dzZ{qR9Yc-TVQrocwQU&V{-*$AkC=M7p}u+8I1*uD3PI?9fQwmTvL$CuZt- z6?DvJvH$gC>K#%iP@`#~C+e7BxdRin2E;sciFIv+JdF~+j&eM>?R9{A{WXPP+&tXvMH7+RwvW?=Q$SJ{PZt> zpS!8>?{oi~a{0cV&i)@Pz`wHne^Te}Za)Nh$qD&6bt`f=X74@VW$LCVNYk`guC7ab zJ}CXs2kmPZPpaSS4om5!JUpqUc(1`GVgkgOSkNhApXywo}g){&sF!PipwM8zehm2+=&jx)hcTYz|&?9j}> zT!A1szN`sxm^uLLX{-ElwNnLL*&=yyr3~)oYAuwM{s;qi2ffnFrFkU@DlcGJ5YwOq z!MCQyO(@y;%KQa_f5a1OShx~2nlE** z31b8a>u+oN5rVswI16C-Oo0GX0b2PLGa2AVOapX-LL{ic-B4#HlL5xz0lBtvfAkxM zq)Q!UM*I>60Dl0j43+Q&v51v!b)nEVd^ZQ7&KPciD;*9`Cl&(VFx)2`V3tO|OASCO zpq0#M4B)rqS6b9G&oRN>P-plU05YPhOLJ_W_G2%-cw(PCARV&0gjz`xLkWa6+V2R%q`R{Z^`%4Dn7ku1g z&_OiSZ|v7hs^6xyW#oeAOl7U+f7N9wQ$+h}|3}iRz9TnNJG}*zBV#sf_s&n6IUNj6 zb;kF%fyt?*lY!$k-(mqt5I#6RlFq2}OrF=ixU>km4()gvCQ#Nc1&QWvgapVsDkjz zpBpS0#(zCY#5=zE8JHC^;8?3)bLi9w66e9Q#5*zhhWh`cX;&#_1#8g1;vy~R;IBN^ z3T>1z_=}vX{3XRKgHz!C6l9&~0Jx}_C&(b314+`Gk33fJ zLA{g%T`Km<2!MEF-XgeL4fzHMa0+OaX`$&PP5-}`PCmfx>HyMP-9e_+TYd8cO;%Wv z9zZp+5x>R?(uWGVVZGCg-NP>-jnK?`|9|EHqZ`tR)e<5>S5J?IvKOJUBy|8uHG>BK zYMyPipzcq8|8Ebwo@v721kihE;{F%!5uB`OyUS8@eRZJ^b&Y6SpV?`-4RRuOO@Co# zSAiUI3T7aZmoTkAP7gT_4a3EskfP>W8;Y`om406fq=xVG=D+B!`xgBzU5yD&QERII8(kM^NkduUR z)&W|!pTP0D$M}-Kvh3tz{tp4KL18cM%H!A1sij9|mFj6Y-nX`ev;WFcKoP6b$x!2lc?7R zU{cDp_mTkpDuo9~W$p%eH@{H=OXqFpzQS+lCYefVDDapG1b;kC4v~NVy3$$uo1T|0 zT?R-n{9DBh#Fz4B0RhQRKzcBeC+GsCQVSpx(_Huv$0J~KvYQum$w|=KZ*BzWr=L)w zJIEGy?M@1ei4r@v61V=33*P}C0bTlPHvd4U83HQ`r5JxN$Z7urYAFc`Y<&4#2O?t<H8buIIV`H*awv z!{=C6ym?q~Il=+->}dE{Gs+xg&(3mm#>DT>Fu6CYO_>S6f^$%!wWw)objlu=D}Z1+ zN0dTH3wXmOR5;A`H|WH?G5~xzfN^yP&`)JmF5#_`w*ZDPHV2T88>c!CMnU)qsq*A^ zc1E*67!M@%;b7AY*^gI%r|dl6^DD{$Qu25k6XHkM(1O5=ZBn%;+D`M1cl^2387e&O z@f0~Sfj}M7qu9;|mgWFuWVsX78&Fs}G?>8y>R%PyH-^b3ODB6nW%@XJUIKI>#6WT| zXin|k&sJTdZRpX-L4vOXE7alTei{o(LU@OsULFizNxJ?QwPLW-4{tEh!};NBmjxCJ z1H?PKt>ZJZ)C^Zj=*;p|ui+S9xQ3cPgzzL8yiS2?&|F^DLb;uhB$UF*#b=UHZzS~q zwBw;6D+CXp1$3f6A)d^{m(KDWL!7~7t+SwL+8hLSi{^&1S{)#;n+eWiidgD<4=J@^ z_Q}ORl)!k^Iso=jN)sXT*RF%IL%01c0AYhunq66_zSXCoK3@4u^KL=k3!8DkHnGwD z$aWPl`Boc3T|GX*^ z1xV9l%_DWxPbk#*c%p+@galgJtw;bF$_H_QKJ7PMfpYDBtl5aCDgcUV__Af27gI=P zs;Kx1| z-OgxJBdFBZ8~|qRG*8EtfkHDplRBmz_m2T!wmHuby}2IL)Pq<{))_0A|4sph|DFQl zPt!x;*SJWWgQ*u9%14ZG%}@b85H+RTvO-VO5qWvG$ORC&7YhxboS<*is+5eN;ov_N z{-6NS-;#ok9Vr0oIQ|oi&9$729FTJ+h(1j<$TCyv>Ev;)gUAuKsL_>fF=w+Mm|{_N zv7Rj5WhnlnoU>nkp}QkEvD*-!(_|1}qt6**KPn0HB|cm}-41`aj2`VEVhYojyzAfe zmh^ns?|N)$DL@YQd5G)*jyD?j{FY zuIV8H0$2)MNzJIXZlvBl+`k6dVw@ov_VP?Wo|6p?pFLRY*D;L{|zA0kr|P*y`_ zI5=Zlysh;T-l^ag)O7BW!<;5%4$d|Adg4Lu#5)NiNCqZK#OU}@;t*!mk9h^)?2l!` z9(0mL(Ji@!YqOxD?Bytokzep-qvS#uyesaK*T)DUab!OTVoW5r;YR?d-}i8SP*(Dz zFbKm_?6E!|EOodx@>)l$(pQ_@$(dAXxrI(jbO&#VS|nRog04ck_10JT`xPoutj91| zm~K~7A15+9*_ePDMk0ra$z((D0+U&i!6p!ky6qWOCHklKYon8M;d=Mxi;Jel$%}7% zwibh;Q%43DyG4AqYrYnf1LuQ0!zB;HXajm}OVrmQ{L<_(4OnDC7+0 zDr0@oV<)qS*duXTk**nW)S40svP2_Bzen{MDz98)1s{p$VejEV0M;SNm;a z&ABD?G~f&?LGGj704lfv6u8)+6#gUUL-u=xG>Sy6g%>j^t1%My(Mbp`Dg5d`)*~L3 zccdGK6D)JE?67_mIWCPZqHH-1wuWO+-yv6FKqsz>Q9}8_3^0}Yyv+eCQ(?Df;HO@w ztlkv-H);EiyFr5G-(Ak>t6l#@$x4l8Uj3*Y%1p}VI`C~Uoqn&HY5b=SWV(#c|Gx7G z@ydr~>E5n{thX_Hf2E!7$_b+8@zB70E=j!xJl89CO;8dI8#kBYPEUAPhZdCxJA*2% z9PX`)nv_$eT&g~pU{WKp!-q?mPk?E&^CU@Y zp~)k)8Qp|HM=l+HxxIj9-${mS5ZMb=Ej0Z`Jro$Ye#_Kc1ODzdE;2?bw`2mXc*VRw zabl01+W*$YL<%~0?XnrxmZAw?Op$^Tj+7(}Y2{$j&uN7(eG06P zBGvbIz1(a0Ya*SMte9uhfEBsmlUH$mJUDhq4~uK501G8~u&+M+9al0u2_8dRBy|`5 zDufVXcbh^rCR-7pp_S*Laje9UTApF9e2&%h5`piQEYYPnHSpiLQPR?Jhjx!U(Nl#Y z09(9s&r0sRTvT}d?4Zs@DN}Mw`nmZ90e1X=sZ^T=28W;t&pYXt&r}eS5#~|KcOUTw zl2eHjNaE^ePPf%03lf^hVK`R1CAj}u=>F9l#v`TwW8L!Ut1=!H6`UXW1Y(OC-!LyK zJZj%nBr_5EQZBEKy_+&Y;&{L)-w_Knya{2a>_`EXm9w%kFgcWnuz0)qX=#Saal>qX z%y*i_h*pGfDea#M6N;cGmNa55VK8iY`3>3{Y7}#FE}6MSPjiRxfP<6vPZC(T&WLGF z%+1in_H@qH`EzLE+2{Y9tpu7?9d^yRJi0L}pn8xbp@bHPj97LQ*$28M)}do=+4-S# ze_)U) zRceV3nK1^3p4fxEH>=|^?=yJzX8Lt9<1a32Lk*HyT>2hhXN=`Bj0`8M(oQ?6Gq*kV zdEe|ZAetvMl8417;U^U+^W|**q)D5@lV)-bu^br=3Nqx)0H*Ja3&XuI?eQ$t>3cu2 zw@QmcxiVj*huUjqCeizUC(|WEwttA;eHlphhR46v_T7EX4a9U)MKYI3gZB(?U=ARp z^(Q5TspqOjE()q_^tVXNSftr+gw9b1Q>dfJsqeU}U>F^j1WUhqqNOms(Yb^5yhl0~ zYY7}{jEe6OM8qtVXFg2QCU{G@l^o*yunqSu@w}j-Orv9aR=6#I^xSvTV~+lBkv;{} zHB!DYe!}HmQ4bI?yZ3>HwQE`J}^%*yDEC--1$ljzi2p zp%?3P%{qw)47ZUdG=vg&BXMqC>++@?KG#eSowevXt(aRMR<;XhksMH3t2)f|p8iZ?0CNnP4FNmr?BzY<1hL_l$(_&FMi8Z{j<#)Mk>1Jf zgi&wK1jEz5=8_)Bcc#23a;$w%1A9VCh#_{Im#~_BS6GuY@D>B7vUG}*ZA6l4-$0jP z)##SKo=^9@Z;_EMdH|Sl`iVcg_4K3sP=sYD%WSnW`=-M?sFb&d6maA z1WUVw_X2D{foT~saT2y@(7@S84v`bkEh6GSsy$(Sf4mLI)pa{dijhhI@2cf~<70k^ zjdPCzVm`b(4G#tS^@P3bMzGNVsh$(X#jDW_AR}ND~=( z@zW90X>E9}KXrpNT6&d~#81dReF6vyW%1~``YjJ(gx_9SAF~xTVF`$aGx$%U1Dd@1 z6W!HMuB)D{<6;pwi70$$dPpLF@fwq-wa^5-i{U9?Cp*Lhb~yeKK3p3RxL1ONyIk#- zC`BS5C3x)Mx7+@I2Tq^&Hb;WQJ~~K63f?HsyeeFF+)pffqYPxl_bGmdZ$`jqmhZ6# zBrD$1z$qftS0ERq7@ROKH^PlBO^ymJMQsm7KzEf_Jv%m8pwj|)G>y(dsgH%d=UHYONKZL7hK&#C64|7q| z)pK5LEF38bxCQASu}Vs`DA_9~WqR%Hv@rVf$Fh*H4o!t!9+KVcOo=6{*xl$LeW@<_ zQqM=j#8H=ylLK7#!1DA{K&OaSFMD=kKGO7)2QVFy9&4t{_aYPNbm%jRM)TN~25@za{@2xGe4s474}K=eT)ZCU&7pu4kAXg1KhX?}X^77D z0XF|Ff6}-8IGr2YiD2C#m*YunxdEQVc=LzWi*^w)PCi4&J=r9m{Ha>915$p+HJ)WI z{O9RpaePIJ5v}y}eK`{X^v8)ZXa@w((^HZ@p5#fl>@m^Xx*Cye&x(%4J4Zl66wm6Q z`=0)zU|gX!>og9!2@X3CQ9-r7LV=?r)u0aAEf{#?jk@TqNHFyst>SugE*Om8&ELgu zZwljSOc{>omIks{nyg^w!bpl|0w<#QX?+r`0Yz}tFkn8D0E}jQ;fRlJh2x2mMjsb6 zn(AdW=JJVPG|O^JU-7`8(R^Mw@9?;&snMU;6bnZJJbUApSeXDA&Bjm4G^X2Wfn=U6 z(tk(OLX77TArL*6?pPf(m|6cH9!w_9blp;u&g20r0|v}jvzi53#I1B)uk_I6SRVV| z#EfSi61=|meVRtJEPGFYVASjp;Trkzf|=5&$J6sa;B50DZ2+fR?&Ax%MMPDF3QAT2 z-Ze>e6DwNgTCs>J zpm`2Oj23F;!yqEOpyUY~1qn2Qm=-jkj&>vjR$#nxZ_;tZN@Rfx1bh*l?`*_ zhoaMg??nW>K@KntSka#|d=wlLFb0PjKXB){sqGNKuQZ?s3xR`1;Dv9{)v19ZhS9Ui zj)LFc^G658wY-NuLPgM%1nz69Dqn$aqx&8O{u4GdG++XnAc^Z(Df<-Y43R|zK@iaO z$K$(N`rv&M{@~fWymgums-T6UJUcNG%;1W8X&|s=yg&=LQ#^wIws1vW0?wdXue;F$ zey6#00w?$t1HKTNdanq{MnV{RJ;KB76VwF0hKWbFK}*$=M-+$@KwF-->)T_=Nub?^TKoN%R;?)b z^?0B`3hgVHZ}Oc4iBf3W;JX!;YY+c+IEE42AhK=om6q^%dMM6ZxBN#b@P&tMyewGK zK&pgfHPLZEFjW4nm`p=B@8<8UgL%ooXsfG*RR}uQMMCZG4PLlnE3C*4`XPa~4eqO0 zUY+=lz`Te+OP?7(V^RB$OzA;~iR3_QB|ht~9)n+JcnV8eqnVN7 z4qgU2oB?jwuSm;@QUbqoJ5e(F-vi%s#T+Z7gQWmjnk|Z{{2gNq+K+-fq@I-N&@4_b zG}omsVYt~zrXPsD4=qE*+pM*J$D{(>Hx`uy4h8=RY+DUlih{_Br2iw+M9|^qRNyUW zkq!y7Krq(B1HyDMcfpkxtj0og-!s5>y)5Rb|902|$hvpTA@K<}_`RpaCmsC>9ryyZ zQu%ucGa$=Y{|iA#Fa)F#KwV$`@^MlDU2-lE z4Q9FWmayU{GZcv-^L@wGC;nr__3WMY_=EAL$NHAnWsuE0aQT{(xle^mxs`XVaNNGb8ce zc41{xD8F<#C`bZ*ctB~eBaMLmxgJV+9H)_3@!}XVv;G=NSQ_ivAyBKCJ^$~0SJX1h z>K1T!$h=Xic%SoQ>YQN)#x|++Pm_802ptvTC9JOm)OZfHShr^(TPq4`GM&-~BWkF> zUw#S2z2{@?Z$MYrY@T$VCs_s-9aE7g5X1QS|G0dx-?#0%otD-)lOO=uJu^Q#3Xakr zXW@1r&WLVuk0>Ns7;UCmNmc0Z{PXC*xKhJoHME?g1?j=>`lS_)kijJgJ^G&0%{M4y zL{K9uYh6iUvV`@3fT;+jx8R{40nECO6~~2waq64p|6CsGXQ{rZbaWuG)t-keS!+we z;z^g>5PslN(LnE!X3GNOJ4c%!t84kM5!^O@ zMojZ;St&Fu<9|~<++C3asOue+mfu;DpY8|XNtplz6JPl3cr6t8A{l+NJpZ_4>Vryh z#uEU1wEtSm%H!y7^Z@`iiktQ#9bYsb2sdk7Z4F=B9Vs>+Do%8Z9hYvHb9m|Y&y6;U zfJS{i{@>ME_ZP?k0A6{q4cG`KWW~=%YyeiJ$nYR_>P4DEowXbA$7?~14s6MnU5=N} zj{dke*MFebdwG?KqoFG^~qG@MNA1oDbo7FW#!;JUq-3Z1}lqJs@Eg?KH4;U1-oe*Yna!m;ZXC`_JX|)r8rr|MGlADWnSlYG>P}RS+;NQW4qf z*ockxUa>)npSI%VgBWFX9f&O#Koty0;OBCHEX3m|;C{o$^Udb#O|ddnhG8%bQu?$s za|Hi46;hUVsPJWN*^Es=(Z;qS=#gcnCU zW-+3Z0CPsoysv8;7UK&{wro}3{kVy5^?&rB48y;kqYogsJ^?~yKR3vR5zpNE9_PK_ z@Ei%xZ@!vsE)rj490PG-?JGcwbg4I+3Q=_d;6zlu7zkQxgKEIWZavh&F97X`*AXZf z*jxv~%mqf*t480-mxKUD!t;m&wa-C3TM0Z*c@r7pcPN}B>J!J?C%7J1;&o;S^?!Ih z^R@0z-6mL2wFc8=RRn+=EZuHQMpv&3x!5>>y_$fC<*H9jcf+-d~O%!h8c|yGwSgI#QZX!<)azgo|I4ix>DFY5P^i zdOQM<85@AfC2A4~^5{OAZ2Fb;E^|E`g>}65HhX#Yh)WCA)}vkvHE8 z?|!FMx+sy?8Y_MmfOSRP#VaIG2hO+F+&DMZ!6wW_K+xfHl*8fk6;mV0Vu2>&iCXXI zRzD5N-Ywm1X6gis1p+(0{nG*;MOw~3=UoW#=x6C3-NN%~jRzlg3Y&I=IL483ajx?o z>zYuUE~w@g{Ygys! z`KX7|i7*%`HAXQR+8qcUb;O6dT0j}xxz_{`7h2&wvX4B>j`IA@^C~fUYemED^@n|r z%WX@odWb&5@98%Gwxq2v4hHHXoMUpNl{mUJOWq^R!wz~jAMH8*dBeVlJ{$N}1C$l7 zYc5JDFG^>~XF6j?_09!1H!nQWU*lGeHqE#jqJ74<1QGq@8T$`x65>~5;y**M+xh`w z$e_3f3T+zr;05$xklQ`?>e;<0G5-ygFr%Yffkx{ix6JFqyaI#QKXUHzH-M6AyablV z3x23`&$&&%Ls>spH^Yt|kJ;JzD!p_eP@<=xx?z>A@;;uMX}zYx*NUWJ(?AH%6-lP>QMoJr^t66AN`L3q>pZ%spdsn{^l5Qasaeb z{PeB(7t`)JP#5S(D~ADVnD`rN$i1n2ay5in>DQhU7IGk1{Bp?#0Alof>`(oo{xD2) zS{r2WbbLEQ-fz}{szBL_@E|q=?UfN@4QGRn->R)kO{8@V&-%0{^{XUV(;3{hHd9F> z%Ztl~-*e1f=(XsJ#kQs)JT)rOUQ7@C9(Tmss~wx)1ng0MGP< zOCG^{&=ajmAn!T!E`xf~D|cSN%7Zn}Vs}Ap^~WT1nvGhA)88-jARuqR^?Z*KwPypE z^}c{i=p4a=(WIt|^S^q~kpc+0vZL4tJ5ru!_+3={6`*zqGf$=6CN#AtxSTK*211`p z<&s9a^qeb;rD0LaW-s@GkvNq7N>niA9B;N@_>>{lL;hF>6Hvhwu}B|+I03C%xT}8Q z`>k2FS`lZK-lkTmvx#C~w@#6-fiK>JeyqDbtlX(7`sP!)(}4brcIdL(?^5qc6av|8F#|r{?iUAii?Y8)J3aIsSZC!Z&PVHh>AXa#2BZkS2n0)E6ns78SjlN5NjBU~jcf7j5iZ|Jm7SX{D>;gcA@w;sd2pe@$hcf6dgR?ZTaMwi9q$BKw2OAAg8eZ*sR?@V1}IUYV)J%JD7^HC|8) zC4ILCcYq=O7?a@#G&mKP-@w1 ztuWF=@wsm@9*P^)fzoLk67%U5!=Hgh*D4>QCY&r$E z&%`oD+%vu&8<{lVZ*H>~ohclFbBGTv4y*U~V%TRfLVJfrM&Ii)-7Zw`6KCZ8gWCYsXdI=XxB$4R9{NSqAIbD4;*NX;Yi@!wAGwgDFh&W}yRL116^GBNQO!;n4<%3h4>ycT~ z#i^Wm&|{0R5kb>Q%TJbIDK@XHCZ=4*z8(3f&RaQiI8hue>l(>%ZL)1JDV+`Q>!AGO z^Ga}%o8s66QR^zCju(bfhl&__GdOaNm*$2D$)pnL!8p$((&nD^@P&u+pf6aMJQxM@ zvU#eF=@y3UwBLqCrruAK;7kJ5Qip%QW&uD}czV(A5cKs4WBxhZgeseT?e%O1H4Bf` zmLRq`l5tpH2wS49LL~~qUGzxox^{7yv?`y}>%9J@FY+6Vj9|idKh$Y7s^$qmZfzbP z7@>}gN>i2BhvmUDnraQ@DQV6yv{g%Qa|e+Zscx5<6#cq^XMixv5GpD?0S<%BlLuI9 zQ`kWgzv?r>Xklsp!2-}9E;NyK^`QV(^;_fmZ@e9_f02jsN=;YWQ-*drCayC9LJ+3T z6x|2y-%rxP`R!=rr?>QCzyj}2utXm92O#cpjlnoFlaH|5a5zMy8|R`IaV61l@ChLU zHk=|nXM8Ih7iPH2)E@pDI=gOU(Z-aB#>(G+#Hv&mASku4NXOgoJqj@W@a5Mh3%1`8 z)hEv*JUM861A7~zAgOZ|Cw>+;bL)-;^|WAf`z)?1c@;PE_p`0I7wR_0+o_5$i&LJ8 z&~KUeJk3NGJCeu&OYw^zdMzEa#82N(Yvtt3Q4w4(J9qO5ML6ilYw1kCKBwj=2FT8dMN58tvE$UP;-DxiM z0844xtCBA&GbZ6|8~p7wz6uBHRX*8{76&WEf}aI>R4-C~8xnb$3@z@9{-}kcEc(0p z5?EMN!faBVYE4->?}+XC=T^Hq|4hDNG z-fWWCLCP>|jSL!|AVUl|MjEj_xedoqi}cbNl=Q<{aX1PZyy7pMjC&H z+6N=1*y4BJe17R&QWln^pwQc7z(bbC8E1p~Kw=rv8x*_&4p(9>B z{LX|a38gc7wWUbM*t?D^ql=lTjf8XS=rl(0vtBn;4&{58>F;({B|Csx{(GFq{%B{; zz($v`NZl_>Hk;&eJ$yMZJdPCJiR|q#!79jZ;9dc2S`!(jU0(X9dP-=8D$8nhV+_sH>wevR2xLKV(gs+_LohOb`!Y>s}i z@nyy|p{cj30<7VUf8tmZowy%|KEKrvcaZh^u)!vL7pb;P&ndOIx?; zRe}Oh5NXZeDJ?u}Uti4VmyRwX@Ln8gwZ7!F!YHYt>AwSPw)3~KDli^!?&jKTFP}#1 zR=68?U5Xe`;$ESD7c5KqDj?~OA!EZK!|Zc~j zPuP{@0NSUDsZ@p;(OQz25Kta83@N--??jtXX%KAMWU@AeY`oiVaN~h+Q(6O76x6*YpMp zpEx6_uTr9=&IOhqXW>!0zpMvn>mvH0z*{!}M7Vlm%I|7wQ}DT+-?New@9K2F>oYgM zHO?C(YP$ncu1~*HUtiP*8gd#<4t`{JD+R!sSeOpZu*K0pEkz}N(r6TVDH3>U2M9}n{G98o#F1W&Is81 zK7pgr(cos8$dzsjD^gm|pr%JvDH-W7O}T|drfCr0pWd19NeUuDt6KiqbE+bDV}7}z zos3W@oe{H4Ia7#jXmPu%4RZ&5TAhT%JOr;l3wI}1Oyrov))ETp^GI+os$diZxw||GJ#QR^eh~rX|Az8wVZ0e;*=Naof&4tS~fdXazAKBLkvwotTKq~I?tao>q2_>r@ zs)RE(Od;6Y40ijB(Z~We7J!w0vuwv!^I+NkCZsqEq5Khdk$oFiV~BkInnWLceRff+^ZqDBT9y*T_&w1eVFcw9KUjDV-z9 z)=F0b4As<`GUBi7QV%mySn=kyL$2z#$`lXJ4p@D%tx?!l>4MR$VSz;GpPbIQTufg( zQ_U&8dq_obGkBY7m)B>lLa;qqMerTx$vtySZ1(#Gu4-39Aqggjz@w)#N%6x)a@lNm z=EQLA2!>swD7) zm0Jb!sU()LpQnO!H9VBA-sx-%Lm$soRV3j{fnt7pR7_7 zM2j;P3=?D^EHJXWGS$Cz6&`e#!8F7Hb2U3pWc6|7jj$wKG+ZBR19&<^Ct(Bkug~6y z3_v($E0W1gJFjxG+#B9@fsW7IKrVT$gmWgG#jzL3z?kb(+5X)xPc>?D)5Bo3AJK*& zN5&~daWrV)x4_oAQq?z_vAHZ%q!&yIak%(v?xigrNs8a|-)@&BzY6swCb&w9W|+r{ zTYiX5cKCy8h^m1qpSH(2)lANNZYi|;u4r?n<)x8Ay0I~ls&Jn_jEE=bI^6xewLMyc zqfp@6?TyP`!AwaLV}EY81kwV)2fprE$~1ZDktjA=feP{+%4$!WKg^xbv{Z?J$rv$8 zqwU=6awmEE7|+M{3E$r_r4~d~b9c9-vD3|S**PLlxJYE$TmZi0S7f5`Q?yU;Fw1oG zQQ(WC4Jo2g%09w7dywavgMR4p zKpf@L_|wCH#*wn91%v=5T=&h?{AC(-4%T(g<;Je34l96_V<%+cZ5Eq!-Ct!oPi`zW zcbGB;YM3wf*6GmrvdqSr(u2v8?W1Cg2E1*Xk{<0(uwZT(No*`haiK|%Z(%*X0Mmn} z^NvDctNGV}1GRVDl+p>E-*O6#!`mg1I44WzJz-R>in?sex8f3>XnkxKMZ_s}MZ7w~ z!z-2FA>B7gNbjZOQgQ0X3k9_WYy&&G8S;I@kVhtXrnpO&C`urO4;Ap0y9*0v!P4s^vGn46lv3 z|7^Z~`9GHF5J8|df5p^)R^nbmIW8nX=no4>#Wv^VW)8&=5^OflA68$tL8RwU5)YEwCnEM;Te+IHi zIWlwkOvtg{0#4Jy9ya|dyCQwKSB^sno%ao3op3ts#&+V*ZoQ?S{9YkgJ|5_>-)dL% z>7`v|o0moD8#7~qFM?y^NW*HF|M>Ziq^$LUe`FH&ECEb#npwf5SuD=pcOIP6*X^fNG3dWN$FrM^pc#06zU>qxa2WdvD8!omeXC0+I1 zcWES+(_x)=FxclCq&zZ9d_M2Qk6zS<*CG9hkk9sv-=1I~Pm~Pw=%Zd6EDPAMP0U?Z*>i>;-G@x#n7X z?KQujIpcH=NC6JG1+jf9f8-7iryKgs0UqhAC1G3Ngl&#T9d~6%1^Z2-guH{>*)vWf z)acG8<9ChO)nQ+w)IZp1siU1NeAg7eCAUT-sP*liyP=NOwkC_>jPC}qgtLY#th10M zTIm)Myehh0(cE)GzM2d|#rYq9a72yA>10!O-|%`;b@;WooehuT)l+P!*`d>siaKHI zk#nCbiAk3(u6*wri3`53;bvAFdhgQi#A#k|28*quRn5H=l{m z54FHe%$HV9j?j$GRj2Le$G_>N>GU>I%g~BDHpB@7sRQ$O&tIsy4Q&6^VxY>3y1_=R zc-`6O^whA(&V%Hl+0lJ~W6ql1EfN-FnHmz22(aLCR&=k&Id>^cl((+elbJTwCmw>B zs6=J86Brj4X~xL{%`kKEr&*}hK(;!xVd&bSzhf>|uMetRyb~JtGe{o8_)w!J>+AKu zYBRs}1D57fllmZryx(^Bc@Fc)Rn7P(AHGVHJ;tiuoSBvMF1vDu^+^;PvQEp_k5j{- zup`Bn&DKDY2g~EbZ&w}?%{~wnv~OTOG+|5!zU2I%%LP@Ia@gFirtnMWr7$+rGBIC- z%SY+^jnpXeeVtsrN~&?45gGPDjww4P^`6+}mQ6pWV?ws993omLi+tIfPYLP6zZIBl zNjRT&K4mKaN}4tUhPCsY1D6xqQ^}mlcFg>47c#s$fg0TDTO%8b?o7ou^*?S*tSzX5(dL((WtnPQ5Epi2?9O@41LqhwxtVX0C5&`z}4I=ZSuP04Z)4yTKXGPq|$0 zle!*b*|2{sXR2mE_C~6rV2u~3!TI_pRJT0GvU;XfIil34@?!ihJ`HuZN1>Z9MT(K` za`E_Ti`wP)=jwPP+CNy?<@%mYNGp)=tjtkv_XnX?k<~LJZd)4RqX&g_OAMok2KR=Q zEGShz*~9K?p)1pCdP_Qeoq|H=MUZ0szK3p&pS_;0kci^S*>5}Nv~lIl5hk_IRyz`O zM+rBW^rZeYC*nEtQoEC#sP_6> zG|>y^L^!g^krB3)b45oEvDy)l{_W&n78HGO4r8jj6ew>px6%j+zeA9k%%hl7 zxHytP5M{7wXRQAH?!8yawlEIO*SA&3Cy|w@yFKlKi4qO$Jm=_rjs#Y&OVv`1{g?73 z5<111iIMP-)@U6Y zP!&kyOA|btWr2eEKOK|m|2nA$Tqe}h)cEuBpO^#s_ut6&Mkn4jd4FFB^#&#q>E{QT1-5f6RIavOj)*i5 z*-fNFKes*Je|uSZx_JLSyA#%pGixDr>e)Aj^tS8nCb0+k;$MqpV*S<#F20$8x2`6~ ztWLhZe^g1ITK~Dqb2{L3 z_fDK0(VznT`pR6Roa>jMaO$HGL=pk(Rw?R#?#RkUvW(cGmt(`^4%70RD^6YPq6x(K z_B=Gj2^CPvZ7X9wkVRA`{Ee#O$0ei?$*dV_o_w!fmTOZ2J7l77JB=x^V4|>|MP&yE zjkx9xQif1cu+eLmX8>+qbrYR2)z*JmQkxgfsmf|_8=8DnRjeMA> z#?q91=N#nWtaA#76Ic+H_HpXx;Y93ZI#7Ut^Za)xdZ0B20XEI)r{1yH^N|~1tD}b~JtIwmMB3(a`N~Eh)f%-`2F{O|J zKEwFV1G=vEAh2{F*<_`T-hG^Ry_q~*KElyfKOIUD`_fKez(jo-Ih@L>5#`*v(W3F= z3Z8S*Q~Vt`C3o}*I8HjKd&mEW#I0u%iCbQFUG%c?Z&CY895xe2n&z*=fEOg-Tb_Gb z+E5p72-V?0(deCd#Wglh%SYIIa;w^6Ag~W|vDidEL`Tz2`#GC{SpFI*tIUx$AH6_W zzIMMsj7YVq7P@CBpGThy6J;e2PPXg?JfnVAu!zC%*hOi_;tswB*Nq(OlOp}>?MW(U zm$SRzPQ70RZRC3!eB?RW zzpH%x9tfj1k|qe`C8cI-dvInuuUIk{PZAKJ*XEnMS%t@~zo;x6`Y_;JUpUu${K(P` z(J5=@Bj3R>e&}BW)Ze(2%ou9x$P;nEH|JxBfKzv9{MNW`UxSS#n#7<~BoI!+a#w<< z<^?UO;Dh`4CQS3T$l!+UGhVfc|s5rwHrjmfn^N6pipfaBvD{>>G%@W(#B-E)`R#CKx{`lAOE=v>q-r?hwjyNv0+!Vw%yH^ zL2bG1m!pkkO=TaceUpRE3jHBHD?h8b2Ais@duEM^_`7MYvU#NJYz@e7*Zk zDR$aU15C5F_Du@X@Li`O1lMhoPShc1cQoZ3=LUV}xZJlSiRi#Bus0O&q-UzP3x_*f z)yZ+FWaPG_ZVRjj>mnzTp0k?df)Zz*dYGtpgdz%{(Cg2+8o#6B8?AS#%K!VP5bN(v zIMuAY88Hr`uqR-%e`bL9H}@xd{mh#6=g3kJa(~Vo|0)ntG%9~4+1U`i2IlL6-AMxV zXGY6F$niu!V(b~Jk9j@Yv%IlRZBwcPNJ9#&)-QJFV>`Xw|F!bkOk?VohdkVF3D$Rj zRpWVSF$xG5cuHkEeI0>%U>Kxce{_<|QLXDyK}xv$w6UBxK9*!OqL9sPNC^y=A2-ym zF5fn)ld`Gc1McP)_V6tdxSQy^8yDa6l7{^YxGtro>&3se7976eQG$I}sUsxzwh3!$ zBw<3tCm_j^dhVp!CTz{|FZ^*~#1P3XOpfg%?7XSbvJ`Z=Ha9Vhc8nS zdUG!gzx^$ifFzXyLAPW|1M8MCwhap4{Rq|@8X2eWX(2GI1a@CVMXsm!(G!*T)x=*1 zEF+(1)KiaXzFe0Z22j6zXN>Q?G=|kJ2ORGvhXXIFt#p;V4VkYO&Fix%Nvy2LfpdY% z(}piXCzM=1^*SmNmH%i<^#>s6h|=Q7webng3e|eChojrzp=H7K2}mB8Ic==7#GZ#a z!mRi}-*Z~~;M+^`H9jV2uCB`zai^G^I5S9TXmY5_YrhEkmLU9AO*BwsT|;w)n9_#M zX>BTwBM-Cm`l{jLaFcV@(nP@j_|R>=j}N=i9(fehe--BHJ5Le!$ViC$6RQl%yrwvz zWY%`Wykuz26RYfja!rBLPx{lCJb=D^)o$YfcW974Jpl{-3xSE01^`lqA^%4yPn7PAgtQG21)7&(2)*|=!_WLM^Eu# zGwt(x57?gb-VV|dG)f5SaZ3!`F-_cFL7iaSgUyh84G`?HprZ%s6kg`9>fk_ipLsR^ z88D7!_|a~?-&O{^6|f%-XIg8nqi6ojGyPwfCM$R@c6jmwscsc2An_1sd*ov=FgWK* zc+2E8w*bRYI=cN&^|Uq2cS)-tVG=>`_Qqc_Rj1jQ;{hVoyJFD^EQ@ zvCc9`GYHh7b=}pR4{czPoT%?JcL_kI`GmZc-fSmHPe9@Vf%PUfw>9y8pX znoN;Z_3L1a$+j5)78~Fow@%M0s0>{4Ve4UiFFx&?K`s;Ud!3ANeHzn9b9@!%iqem1 z<+oh|K$fp%YkC7D4#o_^vZ}*?cDBz3ThL*LTSNC?yI?wC_Qtp){Oo7P0IjCJ?lswO80p18bQxo0uadFmwX|Sd(WTLq7H}D zmFDebsKwMv&p`44*W8QWt$+ok!1g0JmnNNV1T9KHKDpk4`jBjR9H8+o-R5HdHR+(c zxqxp0DWuGxWKXrVJw;=uy5w;~81gK6YKH71|kWj$8};+xr3Qm+`&TE>{z ze|!VA1j|5jrg;6t%u4G~ayvvw;-3%D2+3dFWZY}PgMcVGm_`y3-kD+8RxR=ayYsH4e z)_q$GfVLFZC0M)eSwa{$3bUd)%!m^3=+4fx1^plU{+jFiz;e#V-joPol)q>H-mh_| z!IJfRJpOf64&p88XDQeIe-}Yl$NtmRFKhkBfO{VDAGi2tQvCOBfurspzaSS9jd(mq P2Y&Q)jJ3-&(c%9ED>3YZ literal 0 HcmV?d00001 From 51b2dc6e040232384b2799916d9723970fbdf74e Mon Sep 17 00:00:00 2001 From: this-is-tobi Date: Wed, 4 Oct 2023 21:54:25 +0200 Subject: [PATCH 10/31] feat: :sparkles: control ingressClassName with dsc --- roles/argocd/templates/values.yaml.j2 | 16 +++++++++++++++- roles/console-dso/templates/values.yaml.j2 | 1 + roles/gitlab/templates/gitlab-instance.yaml.j2 | 1 + roles/harbor/templates/values.yaml.j2 | 1 + roles/keycloak/templates/values.yaml.j2 | 2 +- roles/nexus/templates/ingress.yml.j2 | 1 + roles/socle-config/files/crd-conf-dso.yaml | 3 +++ roles/sonarqube/templates/values.yaml.j2 | 2 +- roles/vault/templates/ingress.yaml.j2 | 1 + 9 files changed, 25 insertions(+), 3 deletions(-) diff --git a/roles/argocd/templates/values.yaml.j2 b/roles/argocd/templates/values.yaml.j2 index 2ae8dd97..cee1a288 100644 --- a/roles/argocd/templates/values.yaml.j2 +++ b/roles/argocd/templates/values.yaml.j2 @@ -86,4 +86,18 @@ extraDeploy: kind: ConfigMap metadata: name: argocd-rbac-cm - namespace: {{ dsc.argocd.namespace }} \ No newline at end of file + namespace: {{ dsc.ingress.className }} +applicationSet: + webhook: + ingress: + ingressClassName: {{ dsc.ingress.className }} +notifications: + webhook: + ingress: + ingressClassName: {{ dsc.ingress.className }} +server: + ingress: + ingressClassName: {{ dsc.ingress.className }} +server: + ingressGrpc: + ingressClassName: {{ dsc.ingress.className }} \ No newline at end of file diff --git a/roles/console-dso/templates/values.yaml.j2 b/roles/console-dso/templates/values.yaml.j2 index 22739916..683070d3 100644 --- a/roles/console-dso/templates/values.yaml.j2 +++ b/roles/console-dso/templates/values.yaml.j2 @@ -1,4 +1,5 @@ ingress: + ingressClassName: {{ dsc.ingress.className }} hosts: - {{ console_domain }} annotations: {{ dsc.ingress.annotations }} diff --git a/roles/gitlab/templates/gitlab-instance.yaml.j2 b/roles/gitlab/templates/gitlab-instance.yaml.j2 index a5e29bd7..abea705e 100644 --- a/roles/gitlab/templates/gitlab-instance.yaml.j2 +++ b/roles/gitlab/templates/gitlab-instance.yaml.j2 @@ -70,6 +70,7 @@ global: name: {{ gitlab_domain }} {% endif %} ingress: + class: {{ dsc.ingress.className }} annotations: {% for key, val in dsc.ingress.annotations.items() %} {{ key }}: {{ val }} diff --git a/roles/harbor/templates/values.yaml.j2 b/roles/harbor/templates/values.yaml.j2 index d8764230..e6ad3d44 100644 --- a/roles/harbor/templates/values.yaml.j2 +++ b/roles/harbor/templates/values.yaml.j2 @@ -19,6 +19,7 @@ expose: {% endif %} {% endif %} ingress: + className: {{ dsc.ingress.className }} hosts: core: {{ harbor_domain }} notary: {{ dsc.harbor.subDomain }}-notary{{ root_domain }} diff --git a/roles/keycloak/templates/values.yaml.j2 b/roles/keycloak/templates/values.yaml.j2 index db0b3caa..fdb4890b 100644 --- a/roles/keycloak/templates/values.yaml.j2 +++ b/roles/keycloak/templates/values.yaml.j2 @@ -112,7 +112,7 @@ service: ingress: enabled: true - ingressClassName: "" + ingressClassName: {{ dsc.ingress.className }} pathType: "Prefix" apiVersion: "" hostname: "{{ keycloak_domain }}" diff --git a/roles/nexus/templates/ingress.yml.j2 b/roles/nexus/templates/ingress.yml.j2 index eff8a8d6..d42c8da9 100644 --- a/roles/nexus/templates/ingress.yml.j2 +++ b/roles/nexus/templates/ingress.yml.j2 @@ -23,6 +23,7 @@ spec: secretName: nexus-tls-secret {% endif %} {% endif %} + ingressClassName: {{ dsc.ingress.className }} rules: - host: {{ nexus_domain }} http: diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index 5edf1e1d..183319a1 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -278,6 +278,9 @@ spec: default: {} description: Additionals annotations to add to all tools' ingresses type: object + className: + description: Ingress class name to use for all ingresses + type: string labels: x-kubernetes-preserve-unknown-fields: true default: {} diff --git a/roles/sonarqube/templates/values.yaml.j2 b/roles/sonarqube/templates/values.yaml.j2 index 96367d79..a2a9d34f 100644 --- a/roles/sonarqube/templates/values.yaml.j2 +++ b/roles/sonarqube/templates/values.yaml.j2 @@ -33,7 +33,7 @@ ingress: {% endfor %} # This property allows for reports up to a certain size to be uploaded to SonarQube nginx.ingress.kubernetes.io/proxy-body-size: "64m" - ingressClassName: "" + ingressClassName: {{ dsc.ingress.className }} labels: app: "sonar" {% if not dsc.ingress.tls.type == 'none' %} diff --git a/roles/vault/templates/ingress.yaml.j2 b/roles/vault/templates/ingress.yaml.j2 index 1c097273..1c18da9e 100644 --- a/roles/vault/templates/ingress.yaml.j2 +++ b/roles/vault/templates/ingress.yaml.j2 @@ -23,6 +23,7 @@ spec: secretName: vault-tls-secret {% endif %} {% endif %} + ingressClassName: {{ dsc.ingress.className }} rules: - host: {{ vault_domain }} http: From 34bda7b662b3d6c1005dc86141c307fa17bc0d71 Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Mon, 9 Oct 2023 18:09:57 +0200 Subject: [PATCH 11/31] fix: :ambulance: on a fait n'imp --- roles/argocd/templates/values.yaml.j2 | 10 +++++----- roles/console-dso/templates/values.yaml.j2 | 2 +- roles/gitlab/templates/gitlab-instance.yaml.j2 | 2 +- roles/harbor/templates/values.yaml.j2 | 2 +- roles/keycloak/templates/values.yaml.j2 | 2 +- roles/nexus/templates/ingress.yml.j2 | 2 +- roles/sonarqube/templates/values.yaml.j2 | 2 +- roles/vault/templates/ingress.yaml.j2 | 2 +- 8 files changed, 12 insertions(+), 12 deletions(-) diff --git a/roles/argocd/templates/values.yaml.j2 b/roles/argocd/templates/values.yaml.j2 index cee1a288..feaebfed 100644 --- a/roles/argocd/templates/values.yaml.j2 +++ b/roles/argocd/templates/values.yaml.j2 @@ -86,18 +86,18 @@ extraDeploy: kind: ConfigMap metadata: name: argocd-rbac-cm - namespace: {{ dsc.ingress.className }} + namespace: {{ dsc.argocd.namespace }} applicationSet: webhook: ingress: - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} notifications: webhook: ingress: - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} server: ingress: - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} server: ingressGrpc: - ingressClassName: {{ dsc.ingress.className }} \ No newline at end of file + ingressClassName: {{ dsc.ingress.className | default('') }} \ No newline at end of file diff --git a/roles/console-dso/templates/values.yaml.j2 b/roles/console-dso/templates/values.yaml.j2 index 683070d3..076ca5f8 100644 --- a/roles/console-dso/templates/values.yaml.j2 +++ b/roles/console-dso/templates/values.yaml.j2 @@ -1,5 +1,5 @@ ingress: - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} hosts: - {{ console_domain }} annotations: {{ dsc.ingress.annotations }} diff --git a/roles/gitlab/templates/gitlab-instance.yaml.j2 b/roles/gitlab/templates/gitlab-instance.yaml.j2 index abea705e..36fec8cf 100644 --- a/roles/gitlab/templates/gitlab-instance.yaml.j2 +++ b/roles/gitlab/templates/gitlab-instance.yaml.j2 @@ -70,7 +70,7 @@ global: name: {{ gitlab_domain }} {% endif %} ingress: - class: {{ dsc.ingress.className }} + class: {{ dsc.ingress.className | default('') }} annotations: {% for key, val in dsc.ingress.annotations.items() %} {{ key }}: {{ val }} diff --git a/roles/harbor/templates/values.yaml.j2 b/roles/harbor/templates/values.yaml.j2 index bc2ec533..b9e14570 100644 --- a/roles/harbor/templates/values.yaml.j2 +++ b/roles/harbor/templates/values.yaml.j2 @@ -19,7 +19,7 @@ expose: {% endif %} {% endif %} ingress: - className: {{ dsc.ingress.className }} + className: {{ dsc.ingress.className | default('') }} hosts: core: {{ harbor_domain }} notary: {{ dsc.harbor.subDomain }}-notary{{ root_domain }} diff --git a/roles/keycloak/templates/values.yaml.j2 b/roles/keycloak/templates/values.yaml.j2 index fdb4890b..64bde0f8 100644 --- a/roles/keycloak/templates/values.yaml.j2 +++ b/roles/keycloak/templates/values.yaml.j2 @@ -112,7 +112,7 @@ service: ingress: enabled: true - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} pathType: "Prefix" apiVersion: "" hostname: "{{ keycloak_domain }}" diff --git a/roles/nexus/templates/ingress.yml.j2 b/roles/nexus/templates/ingress.yml.j2 index d42c8da9..8bd4bda2 100644 --- a/roles/nexus/templates/ingress.yml.j2 +++ b/roles/nexus/templates/ingress.yml.j2 @@ -23,7 +23,7 @@ spec: secretName: nexus-tls-secret {% endif %} {% endif %} - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} rules: - host: {{ nexus_domain }} http: diff --git a/roles/sonarqube/templates/values.yaml.j2 b/roles/sonarqube/templates/values.yaml.j2 index a2a9d34f..ccb09cfd 100644 --- a/roles/sonarqube/templates/values.yaml.j2 +++ b/roles/sonarqube/templates/values.yaml.j2 @@ -33,7 +33,7 @@ ingress: {% endfor %} # This property allows for reports up to a certain size to be uploaded to SonarQube nginx.ingress.kubernetes.io/proxy-body-size: "64m" - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} labels: app: "sonar" {% if not dsc.ingress.tls.type == 'none' %} diff --git a/roles/vault/templates/ingress.yaml.j2 b/roles/vault/templates/ingress.yaml.j2 index 1c18da9e..431989f8 100644 --- a/roles/vault/templates/ingress.yaml.j2 +++ b/roles/vault/templates/ingress.yaml.j2 @@ -23,7 +23,7 @@ spec: secretName: vault-tls-secret {% endif %} {% endif %} - ingressClassName: {{ dsc.ingress.className }} + ingressClassName: {{ dsc.ingress.className | default('') }} rules: - host: {{ vault_domain }} http: From 593d5394c7d406d925060baf784c82b8715f7daa Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Tue, 10 Oct 2023 12:02:46 +0200 Subject: [PATCH 12/31] refactor: :art: rearrange and disable argocd values --- roles/argocd/templates/values.yaml.j2 | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/roles/argocd/templates/values.yaml.j2 b/roles/argocd/templates/values.yaml.j2 index feaebfed..f9730731 100644 --- a/roles/argocd/templates/values.yaml.j2 +++ b/roles/argocd/templates/values.yaml.j2 @@ -21,14 +21,17 @@ config: controller: <<: *securityContext dex: - <<: *securityContext - enabled: true + enabled: false server: <<: *securityContext + ingress: + ingressClassName: {{ dsc.ingress.className | default('') }} + ingressGrpc: + ingressClassName: {{ dsc.ingress.className | default('') }} insecure: true config: clusterResources: "true" - url: "https://{{ argocd_domain }}/" + url: "https://{{ argocd_domain }}" oidc.config: | issuer: https://{{ keycloak_domain }}/realms/dso requestedScopes: ["openid", "generic"] @@ -95,9 +98,3 @@ notifications: webhook: ingress: ingressClassName: {{ dsc.ingress.className | default('') }} -server: - ingress: - ingressClassName: {{ dsc.ingress.className | default('') }} -server: - ingressGrpc: - ingressClassName: {{ dsc.ingress.className | default('') }} \ No newline at end of file From 8a4dc61af169816b6391e89268f0aa0d254f8d6f Mon Sep 17 00:00:00 2001 From: this-is-tobi Date: Tue, 17 Oct 2023 11:00:24 +0200 Subject: [PATCH 13/31] fix: :bug: enable admin group on harbor oidc --- roles/harbor/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/harbor/tasks/main.yaml b/roles/harbor/tasks/main.yaml index b576a9f9..3b32d391 100644 --- a/roles/harbor/tasks/main.yaml +++ b/roles/harbor/tasks/main.yaml @@ -87,7 +87,7 @@ harbor_config: "auth_mode": "oidc_auth" "notification_enable": true - "oidc_admin_group": "admin" + "oidc_admin_group": "/admin" "oidc_auto_onboard": true "oidc_client_id": "{{ harbor_secret.resources[0].data.CLIENT_ID | b64decode }}" "oidc_endpoint": "https://{{ keycloak_domain }}/realms/dso" @@ -95,7 +95,7 @@ "oidc_group_filter": "" "oidc_groups_claim": "groups" "oidc_name": "keycloak" - "oidc_scope": "openid" + "oidc_scope": "openid,generic" "oidc_user_claim": "email" "oidc_verify_cert": "{{ dsc.exposedCA.type == 'none' }}" "project_creation_restriction": "adminonly" From d339b2615ab7d8449590fae571d0c73c89cb4588 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Mon, 23 Oct 2023 14:20:59 +0200 Subject: [PATCH 14/31] refactor: :recycle: installation cert-manager via helm chart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit réécriture du role cert-manager et de la partie cert-manager du playbook uninstall --- .../filter_plugins/custom_cert_manager.py | 13 - roles/cert-manager/tasks/main.yaml | 48 +- roles/cert-manager/templates/values.yaml.j2 | 739 ++++++++++++++++++ roles/socle-config/files/crd-conf-dso.yaml | 4 +- roles/socle-config/files/releases.yaml | 2 +- uninstall.yaml | 10 + 6 files changed, 774 insertions(+), 42 deletions(-) delete mode 100644 roles/cert-manager/filter_plugins/custom_cert_manager.py create mode 100644 roles/cert-manager/templates/values.yaml.j2 diff --git a/roles/cert-manager/filter_plugins/custom_cert_manager.py b/roles/cert-manager/filter_plugins/custom_cert_manager.py deleted file mode 100644 index fbe1eb70..00000000 --- a/roles/cert-manager/filter_plugins/custom_cert_manager.py +++ /dev/null @@ -1,13 +0,0 @@ -def custom_cert_manager(manifests, environments): - for i in range(len(manifests)): - if manifests[i]['kind'] == 'Deployment': - del manifests[i]['spec']['template']['spec']['securityContext']['seccompProfile'] - for env in environments: - manifests[i]['spec']['template']['spec']['containers'][0]['env'].append({ 'name': env['name'], 'value': env['value']}) - return manifests - -class FilterModule(object): - def filters(self): - return { - 'custom_cert_manager': custom_cert_manager, - } \ No newline at end of file diff --git a/roles/cert-manager/tasks/main.yaml b/roles/cert-manager/tasks/main.yaml index 00abc294..c0fd7a08 100644 --- a/roles/cert-manager/tasks/main.yaml +++ b/roles/cert-manager/tasks/main.yaml @@ -1,33 +1,29 @@ -- name: Check cert-manager deployment - kubernetes.core.k8s_info: - kind: Deployment - namespace: cert-manager - name: cert-manager-webhook - register: cert_manager_webhook +- name: Add cert-manager helm repo + kubernetes.core.helm_repository: + name: jetstack + repo_url: https://charts.jetstack.io -- name: Download cert-manager - ansible.builtin.uri: - url: "https://github.com/cert-manager/cert-manager/releases/download/{{ dsc.certmanager.version }}/cert-manager.yaml" - return_content: true - register: cert_manifest +# Installation des CRDs indépendamment du chart helm. +# Recommandé en production. +# Voir : https://cert-manager.io/docs/installation/helm +- name: Apply CRDs + kubernetes.core.k8s: + state: present + src: "https://github.com/cert-manager/cert-manager/releases/download/{{ dsc.certmanager.chartVersion }}/cert-manager.crds.yaml" -- name: Edit manifest +- name: Set cert-manager helm values ansible.builtin.set_fact: - manifest: "{{ cert_manifest.content | from_yaml_all | custom_cert_manager(envs) }}" - vars: - envs: - - name: http_proxy - value: "{{ dsc.proxy.http_proxy }}" - - name: https_proxy - value: "{{ dsc.proxy.https_proxy }}" - - name: no_proxy - value: "{{ dsc.proxy.no_proxy }}" - when: dsc.proxy.enabled + cm_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" -- name: Apply cert-manager - kubernetes.core.k8s: - state: present - definition: "{{ manifest | default(cert_manifest.content | from_yaml_all) }}" +- name: Deploy helm + kubernetes.core.helm: + # force: true + name: cert-manager + chart_ref: jetstack/cert-manager + chart_version: "{{ dsc.certmanager.chartVersion }}" + release_namespace: cert-manager + create_namespace: true + values: "{{ cm_values }}" - name: Wait cert-manager to initialize kubernetes.core.k8s_info: diff --git a/roles/cert-manager/templates/values.yaml.j2 b/roles/cert-manager/templates/values.yaml.j2 new file mode 100644 index 00000000..554d6b9b --- /dev/null +++ b/roles/cert-manager/templates/values.yaml.j2 @@ -0,0 +1,739 @@ +# Default values for cert-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # - name: "image-pull-secret" + + # Labels to apply to all resources + # Please note that this does not add labels to the resources created dynamically by the controllers. + # For these resources, you have to add the labels in the template in the cert-manager custom resource: + # eg. podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress + # ref: https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress + # eg. secretTemplate in CertificateSpec + # ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec + commonLabels: {} + # team_name: dev + + # Optional priority class to be used for the cert-manager pods + priorityClassName: "" + rbac: + create: true + # Aggregate ClusterRoles to Kubernetes default user-facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles + aggregateClusterRoles: true + + podSecurityPolicy: + enabled: false + useAppArmor: true + + # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. + logLevel: 2 + + leaderElection: + # Override the namespace used for the leader election lease + namespace: "kube-system" + + # The duration that non-leader candidates will wait after observing a + # leadership renewal until attempting to acquire leadership of a led but + # unrenewed leader slot. This is effectively the maximum duration that a + # leader can be stopped before it is replaced by another candidate. + # leaseDuration: 60s + + # The interval between attempts by the acting master to renew a leadership + # slot before it stops leading. This must be less than or equal to the + # lease duration. + # renewDeadline: 40s + + # The duration the clients should wait between attempting acquisition and + # renewal of a leadership. + # retryPeriod: 15s + +installCRDs: false + +replicaCount: 1 + +strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + +podDisruptionBudget: + enabled: false + + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` + # minAvailable: 1 + # maxUnavailable: 1 + +# Comma separated list of feature gates that should be enabled on the +# controller pod. +featureGates: "" + +# The maximum number of challenges that can be scheduled as 'processing' at once +maxConcurrentChallenges: 60 + +image: + repository: quay.io/jetstack/cert-manager-controller + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-controller + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + pullPolicy: IfNotPresent + +# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer +# resources. By default, the same namespace as cert-manager is deployed within is +# used. This namespace will not be automatically created by the Helm chart. +clusterResourceNamespace: "" + +# This namespace allows you to define where the services will be installed into +# if not set then they will use the namespace of the release +# This is helpful when installing cert manager as a chart dependency (sub chart) +namespace: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + # Optional additional labels to add to the controller's ServiceAccount + # labels: {} + automountServiceAccountToken: true + +# Automounting API credentials for a particular pod +# automountServiceAccountToken: true + +# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted +enableCertificateOwnerRef: false + +# Used to configure options for the controller pod. +# This allows setting options that'd usually be provided via flags. +# An APIVersion and Kind must be specified in your values.yaml file. +# Flags will override options that are set here. +config: +# apiVersion: controller.config.cert-manager.io/v1alpha1 +# kind: ControllerConfiguration +# logging: +# verbosity: 2 +# format: text +# leaderElectionConfig: +# namespace: kube-system +# kubernetesAPIQPS: 9000 +# kubernetesAPIBurst: 9000 +# numberOfConcurrentWorkers: 200 +# featureGates: +# additionalCertificateOutputFormats: true +# experimentalCertificateSigningRequestControllers: true +# experimentalGatewayAPISupport: true +# serverSideApply: true +# literalCertificateSubject: true +# useCertificateRequestBasicConstraints: true + +# Setting Nameservers for DNS01 Self Check +# See: https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check + +# Comma separated string with host and port of the recursive nameservers cert-manager should query +dns01RecursiveNameservers: "" + +# Forces cert-manager to only use the recursive nameservers for verification. +# Enabling this option could cause the DNS01 self check to take longer due to caching performed by the recursive nameservers +dns01RecursiveNameserversOnly: false + +# Additional command line flags to pass to cert-manager controller binary. +# To see all available flags run docker run quay.io/jetstack/cert-manager-controller: --help +extraArgs: [] + # Use this flag to enable or disable arbitrary controllers, for example, disable the CertificiateRequests approver + # - --controllers=*,-certificaterequests-approver + +extraEnv: [] +# - name: SOME_VAR +# value: 'some value' + +resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + +# Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + +# Container Security Context to be set on the controller component container +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + +volumes: [] + +volumeMounts: [] + +# Optional additional annotations to add to the controller Deployment +# deploymentAnnotations: {} + +# Optional additional annotations to add to the controller Pods +# podAnnotations: {} + +podLabels: {} + +# Optional annotations to add to the controller Service +# serviceAnnotations: {} + +# Optional additional labels to add to the controller Service +# serviceLabels: {} + +# Optional DNS settings, useful if you have a public and private DNS zone for +# the same domain on Route 53. What follows is an example of ensuring +# cert-manager can access an ingress or DNS TXT records at all times. +# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for +# the cluster to work. +# podDnsPolicy: "None" +# podDnsConfig: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +nodeSelector: + kubernetes.io/os: linux + +ingressShim: {} + # defaultIssuerName: "" + # defaultIssuerKind: "" + # defaultIssuerGroup: "" + +prometheus: + enabled: true + servicemonitor: + enabled: false + prometheusInstance: default + targetPort: 9402 + path: /metrics + interval: 60s + scrapeTimeout: 30s + labels: {} + annotations: {} + honorLabels: false + endpointAdditionalProperties: {} + +# Use these variables to configure the HTTP_PROXY environment variables +{% if dsc.proxy.enabled %} +http_proxy: "{{ dsc.proxy.http_proxy }}" +https_proxy: "{{ dsc.proxy.https_proxy }}" +no_proxy: "{{ dsc.proxy.no_proxy }}" +{% endif %} + +# A Kubernetes Affinty, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# A list of Kubernetes Tolerations, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# A list of Kubernetes TopologySpreadConstraints, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core +# for example: +# topologySpreadConstraints: +# - maxSkew: 2 +# topologyKey: topology.kubernetes.io/zone +# whenUnsatisfiable: ScheduleAnyway +# labelSelector: +# matchLabels: +# app.kubernetes.io/instance: cert-manager +# app.kubernetes.io/component: controller +topologySpreadConstraints: [] + +# LivenessProbe settings for the controller container of the controller Pod. +# +# Disabled by default, because the controller has a leader election mechanism +# which should cause it to exit if it is unable to renew its leader election +# record. +# LivenessProbe durations and thresholds are based on those used for the Kubernetes +# controller-manager. See: +# https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245 +livenessProbe: + enabled: false + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 15 + successThreshold: 1 + failureThreshold: 8 + +# enableServiceLinks indicates whether information about services should be +# injected into pod's environment variables, matching the syntax of Docker +# links. +enableServiceLinks: false + +webhook: + replicaCount: 1 + timeoutSeconds: 10 + + # Used to configure options for the webhook pod. + # This allows setting options that'd usually be provided via flags. + # An APIVersion and Kind must be specified in your values.yaml file. + # Flags will override options that are set here. + config: + # apiVersion: webhook.config.cert-manager.io/v1alpha1 + # kind: WebhookConfiguration + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. + # This should be uncommented and set as a default by the chart once we graduate + # the apiVersion of WebhookConfiguration past v1alpha1. + # securePort: 10250 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the webhook component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + podDisruptionBudget: + enabled: false + + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` + # minAvailable: 1 + # maxUnavailable: 1 + + # Container Security Context to be set on the webhook component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + # Optional additional annotations to add to the webhook Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the webhook Pods + # podAnnotations: {} + + # Optional additional annotations to add to the webhook Service + # serviceAnnotations: {} + + # Optional additional annotations to add to the webhook MutatingWebhookConfiguration + # mutatingWebhookConfigurationAnnotations: {} + + # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration + # validatingWebhookConfigurationAnnotations: {} + + # Additional command line flags to pass to cert-manager webhook binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-webhook: --help + extraArgs: [] + # Path to a file containing a WebhookConfiguration object used to configure the webhook + # - --config= + + # Comma separated list of feature gates that should be enabled on the + # webhook pod. + featureGates: "" + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + topologySpreadConstraints: [] + + # Optional additional labels to add to the Webhook Pods + podLabels: {} + + # Optional additional labels to add to the Webhook Service + serviceLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-webhook + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-webhook + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Optional additional labels to add to the webhook's ServiceAccount + # labels: {} + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + # The port that the webhook should listen on for requests. + # In GKE private clusters, by default kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. so configuring + # securePort: 10250, will work out of the box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000 + securePort: 10250 + + # Specifies if the webhook should be started in hostNetwork mode. + # + # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom + # CNI (such as calico), because control-plane managed by AWS cannot communicate + # with pods' IP CIDR and admission webhooks are not working + # + # Since the default port for the webhook conflicts with kubelet on the host + # network, `webhook.securePort` should be changed to an available port if + # running in hostNetwork mode. + hostNetwork: false + + # Specifies how the service should be handled. Useful if you want to expose the + # webhook to outside of the cluster. In some cases, the control plane cannot + # reach internal services. + serviceType: ClusterIP + # loadBalancerIP: + + # Overrides the mutating webhook and validating webhook so they reach the webhook + # service using the `url` field instead of a service. + url: {} + # host: + + # Enables default network policies for webhooks. + networkPolicy: + enabled: false + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + egress: + - ports: + - port: 80 + protocol: TCP + - port: 443 + protocol: TCP + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + # On OpenShift and OKD, the Kubernetes API server listens on + # port 6443. + - port: 6443 + protocol: TCP + to: + - ipBlock: + cidr: 0.0.0.0/0 + + volumes: [] + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false + +cainjector: + enabled: true + replicaCount: 1 + + strategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + + # Pod Security Context to be set on the cainjector component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + podDisruptionBudget: + enabled: false + + # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) + # or a percentage value (e.g. 25%) + # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` + # minAvailable: 1 + # maxUnavailable: 1 + + # Container Security Context to be set on the cainjector component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + + # Optional additional annotations to add to the cainjector Deployment + # deploymentAnnotations: {} + + # Optional additional annotations to add to the cainjector Pods + # podAnnotations: {} + + # Additional command line flags to pass to cert-manager cainjector binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-cainjector: --help + extraArgs: [] + # Enable profiling for cainjector + # - --enable-profiling=true + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + topologySpreadConstraints: [] + + # Optional additional labels to add to the CA Injector Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-cainjector + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-cainjector + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + # Optional additional annotations to add to the controller's ServiceAccount + # annotations: {} + # Automount API credentials for a Service Account. + # Optional additional labels to add to the cainjector's ServiceAccount + # labels: {} + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + volumes: [] + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false + +acmesolver: + image: + repository: quay.io/jetstack/cert-manager-acmesolver + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-acmesolver + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + +# This startupapicheck is a Helm post-install hook that waits for the webhook +# endpoints to become available. +# The check is implemented using a Kubernetes Job- if you are injecting mesh +# sidecar proxies into cert-manager pods, you probably want to ensure that they +# are not injected into this Job's pod. Otherwise the installation may time out +# due to the Job never being completed because the sidecar proxy does not exit. +# See https://github.com/cert-manager/cert-manager/pull/4414 for context. +startupapicheck: + enabled: true + + # Pod Security Context to be set on the startupapicheck component Pod + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + # Container Security Context to be set on the controller component container + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + + # Timeout for 'kubectl check api' command + timeout: 1m + + # Job backoffLimit + backoffLimit: 4 + + # Optional additional annotations to add to the startupapicheck Job + jobAnnotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "1" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Optional additional annotations to add to the startupapicheck Pods + # podAnnotations: {} + + # Additional command line flags to pass to startupapicheck binary. + # To see all available flags run docker run quay.io/jetstack/cert-manager-ctl: --help + extraArgs: [] + + resources: {} + # requests: + # cpu: 10m + # memory: 32Mi + + nodeSelector: + kubernetes.io/os: linux + + affinity: {} + + tolerations: [] + + # Optional additional labels to add to the startupapicheck Pods + podLabels: {} + + image: + repository: quay.io/jetstack/cert-manager-ctl + # You can manage a registry with + # registry: quay.io + # repository: jetstack/cert-manager-ctl + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # tag: canary + + # Setting a digest will override any tag + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + pullPolicy: IfNotPresent + + rbac: + # annotations for the startup API Check job RBAC and PSP resources + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automounting API credentials for a particular pod + # automountServiceAccountToken: true + + serviceAccount: + # Specifies whether a service account should be created + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # name: "" + + # Optional additional annotations to add to the Job's ServiceAccount + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Optional additional labels to add to the startupapicheck's ServiceAccount + # labels: {} + + volumes: [] + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index 183319a1..7dc910c1 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -145,8 +145,8 @@ spec: certmanager: description: Configuration for Cert Manager. properties: - version: - description: Specifies the version of Cert Manager to use. + chartVersion: + description: Cert-manager helm chart version (e.g., "v1.13.1"). type: string type: object cloudnativepg: diff --git a/roles/socle-config/files/releases.yaml b/roles/socle-config/files/releases.yaml index c1c53866..796ed8b5 100644 --- a/roles/socle-config/files/releases.yaml +++ b/roles/socle-config/files/releases.yaml @@ -8,7 +8,7 @@ spec: chartVersion: 4.7.19 certmanager: # https://github.com/cert-manager/cert-manager/releases - version: v1.11.0 + chartVersion: v1.13.1 cloudnativepg: # https://artifacthub.io/packages/helm/cloudnative-pg/cloudnative-pg chartVersion: 0.18.2 diff --git a/uninstall.yaml b/uninstall.yaml index 63e98991..fc1b032f 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -69,6 +69,16 @@ tags: - always + - name: Suppression de cert-manager + kubernetes.core.helm: + name: cert-manager + release_namespace: cert-manager + state: absent + wait: true + tags: + - never + - cert-manager + - name: Suppression du namespace cert-manager kubernetes.core.k8s: state: absent From e92669d4662d1c52db697393a13133d29b8a1573 Mon Sep 17 00:00:00 2001 From: this-is-tobi Date: Wed, 25 Oct 2023 12:08:17 +0200 Subject: [PATCH 15/31] feat: :lock: add keycloak password policy --- roles/keycloak/tasks/main.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/keycloak/tasks/main.yml b/roles/keycloak/tasks/main.yml index eeac3734..9e8b06b9 100644 --- a/roles/keycloak/tasks/main.yml +++ b/roles/keycloak/tasks/main.yml @@ -157,6 +157,7 @@ id: dso realm: dso display_name: Dso Realm + password_policy: "length(8) and lowerCase(1) and upperCase(1) and specialChars(1) and digits(1) and passwordHistory(1) and notUsername() and forceExpiredPasswordChange(365)" enabled: true - name: Get keycloak dso realm users from API @@ -183,7 +184,7 @@ block: - name: Generate admin user password ansible.builtin.set_fact: - admin_user_password: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') }}" + admin_user_password: "{{ lookup('community.general.random_string', length=16, min_lower=1, min_upper=1, min_special=1, min_numeric=1) }}" - name: Create dso secret kubernetes.core.k8s: From b4c560967318333b739c66d7216f7db09aafcd4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Wed, 18 Oct 2023 11:22:41 +0200 Subject: [PATCH 16/31] =?UTF-8?q?refactor:=20:recycle:=20d=C3=A9but=20r?= =?UTF-8?q?=C3=A9=C3=A9criture=20GitLab=20et=20GitLab=20Runner?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .husky/commit-msg | 0 .husky/pre-commit | 0 roles/gitlab-runner-old/tasks/main.yaml | 35 ++++ .../templates/custom-env.yaml.j2 | 9 + .../templates/gitlab-runner-auth.yaml.j2 | 45 +++++ .../templates/gitlab-runner-instance.yaml.j2 | 22 +++ .../templates/operator-subscription.yaml.j2 | 9 + roles/gitlab-runner/tasks/main.yaml | 67 +++++--- roles/gitlab-runner/templates/values.yaml.j2 | 160 ++++++++++++++++++ roles/gitlab/tasks/main.yaml | 61 ++++--- .../gitlab/templates/gitlab-instance.yaml.j2 | 3 +- roles/gitlab/templates/operator-group.yaml.j2 | 13 -- .../templates/operator-subscription.yaml.j2 | 11 -- .../gitlab/templates/operator-values.yaml.j2 | 73 ++++++++ .../files/cr-conf-dso-default.yaml | 1 + roles/socle-config/files/crd-conf-dso.yaml | 15 ++ roles/socle-config/files/releases.yaml | 8 +- 17 files changed, 457 insertions(+), 75 deletions(-) mode change 100755 => 100644 .husky/commit-msg mode change 100755 => 100644 .husky/pre-commit create mode 100644 roles/gitlab-runner-old/tasks/main.yaml create mode 100644 roles/gitlab-runner-old/templates/custom-env.yaml.j2 create mode 100644 roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 create mode 100644 roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 rename roles/{gitlab-runner => gitlab-runner-old}/templates/operator-subscription.yaml.j2 (62%) create mode 100644 roles/gitlab-runner/templates/values.yaml.j2 delete mode 100644 roles/gitlab/templates/operator-group.yaml.j2 delete mode 100644 roles/gitlab/templates/operator-subscription.yaml.j2 create mode 100644 roles/gitlab/templates/operator-values.yaml.j2 diff --git a/.husky/commit-msg b/.husky/commit-msg old mode 100755 new mode 100644 diff --git a/.husky/pre-commit b/.husky/pre-commit old mode 100755 new mode 100644 diff --git a/roles/gitlab-runner-old/tasks/main.yaml b/roles/gitlab-runner-old/tasks/main.yaml new file mode 100644 index 00000000..3676744b --- /dev/null +++ b/roles/gitlab-runner-old/tasks/main.yaml @@ -0,0 +1,35 @@ +- name: Get Gitlab namespace + kubernetes.core.k8s_info: + kind: Namespace + name: "{{ dsc.gitlab.namespace }}" + register: gitlab_ns + +- name: Fail if Gitlab namespace is not present + ansible.builtin.fail: + msg: "Gitlab ne semble pas avoir été provisionné sur le cluster veuillez l'installer avant" + when: gitlab_ns | length == 0 + +- name: Install gitlab-runner subscription and role + kubernetes.core.k8s: + template: "{{ item }}" + with_items: + - operator-subscription.yaml.j2 + - gitlab-runner-auth.yaml.j2 + +- name: Wait Gitlab Runner exists + kubernetes.core.k8s_info: + api_version: apps.gitlab.com/v1beta2 + kind: Runner + name: gitlab-runner + namespace: "{{ dsc.gitlab.namespace }}" + register: runner_kind + until: runner_kind.api_found + retries: 5 + +- name: Add custom env + kubernetes.core.k8s: + template: custom-env.yaml.j2 + +- name: Install gitlab instance + kubernetes.core.k8s: + template: gitlab-runner-instance.yaml.j2 diff --git a/roles/gitlab-runner-old/templates/custom-env.yaml.j2 b/roles/gitlab-runner-old/templates/custom-env.yaml.j2 new file mode 100644 index 00000000..63c42664 --- /dev/null +++ b/roles/gitlab-runner-old/templates/custom-env.yaml.j2 @@ -0,0 +1,9 @@ +apiVersion: v1 +data: + HTTP_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.http_proxy, '') }}" + HTTPS_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.https_proxy, '') }}" + NO_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.no_proxy, '') }}" +kind: ConfigMap +metadata: + name: custom-env + namespace: {{ dsc.gitlab.namespace }} diff --git a/roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 b/roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 new file mode 100644 index 00000000..952f7116 --- /dev/null +++ b/roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 @@ -0,0 +1,45 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: gitlab-runner + namespace: {{ dsc.gitlab.namespace }} +rules: + - apiGroups: [""] + resources: ["pods", "secrets", "configmaps"] + verbs: ["list", "get", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["pods/exec", "pods/attach"] + verbs: ["create"] + - apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: "RoleBinding" +metadata: + name: gitlab-runner-gitlab-runner + namespace: {{ dsc.gitlab.namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: "Role" + name: gitlab-runner +subjects: + - kind: ServiceAccount + name: gitlab-runner-sa + namespace: {{ dsc.gitlab.namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + name: system:openshift:scc:anyuid + namespace: {{ dsc.gitlab.namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:openshift:scc:anyuid +subjects: +- kind: ServiceAccount + name: gitlab-runner-sa + namespace: {{ dsc.gitlab.namespace }} \ No newline at end of file diff --git a/roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 b/roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 new file mode 100644 index 00000000..43f895e6 --- /dev/null +++ b/roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 @@ -0,0 +1,22 @@ +apiVersion: apps.gitlab.com/v1beta2 +kind: Runner +metadata: + name: gitlab-runner + namespace: {{ dsc.gitlab.namespace }} +spec: + buildImage: alpine +{% if dsc.ingress.tls.type == 'tlsSecret' %} + ca: {{ dsc.ingress.tls.tlsSecret.name }} +{% elif dsc.exposedCA.type != 'none' %} + ca: exposed-ca +{% else %} + ca: +{% endif %} + #config: custom-runner-config-toml + env: custom-env + gitlabUrl: https://{{ gitlab_domain }}/ + runUntagged: true + tags: openshift + token: gitlab-gitlab-runner-secret + runnerImage: gitlab/gitlab-runner:alpine-v16.3.0 + helperImage: gitlab/gitlab-runner-helper:alpine-latest-x86_64-v16.3.0 diff --git a/roles/gitlab-runner/templates/operator-subscription.yaml.j2 b/roles/gitlab-runner-old/templates/operator-subscription.yaml.j2 similarity index 62% rename from roles/gitlab-runner/templates/operator-subscription.yaml.j2 rename to roles/gitlab-runner-old/templates/operator-subscription.yaml.j2 index ab1a68f8..9e56582d 100644 --- a/roles/gitlab-runner/templates/operator-subscription.yaml.j2 +++ b/roles/gitlab-runner-old/templates/operator-subscription.yaml.j2 @@ -1,3 +1,12 @@ +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: {{ dsc.gitlab.namespace }}-operator-group + namespace: {{ dsc.gitlab.namespace }} +spec: + targetNamespaces: + - {{ dsc.gitlab.namespace }} +--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: diff --git a/roles/gitlab-runner/tasks/main.yaml b/roles/gitlab-runner/tasks/main.yaml index 3676744b..a1e6420c 100644 --- a/roles/gitlab-runner/tasks/main.yaml +++ b/roles/gitlab-runner/tasks/main.yaml @@ -9,27 +9,50 @@ msg: "Gitlab ne semble pas avoir été provisionné sur le cluster veuillez l'installer avant" when: gitlab_ns | length == 0 -- name: Install gitlab-runner subscription and role - kubernetes.core.k8s: - template: "{{ item }}" - with_items: - - operator-subscription.yaml.j2 - - gitlab-runner-auth.yaml.j2 - -- name: Wait Gitlab Runner exists +- name: Get dso-config inventory kubernetes.core.k8s_info: - api_version: apps.gitlab.com/v1beta2 - kind: Runner + namespace: "{{ dsc.console.namespace }}" + kind: ConfigMap + name: dso-config + register: ansible_inventory + +- name: Get gitlab token + ansible.builtin.set_fact: + gitlab_token: "{{ ansible_inventory.resources[0].data.GITLAB_TOKEN }}" + +- name: Initiate a runner in GitLab instance + ansible.builtin.uri: + validate_certs: "{{ dsc.exposedCA.type == 'none' }}" + url: "https://{{ gitlab_domain }}/api/v4/user/runners" + method: POST + headers: + "PRIVATE-TOKEN": "{{ gitlab_token }}" + body: + runner_type: instance_type + group_id: 1 + description: Default-instance-runner + platform: null + run_untagged: true + body_format: form-urlencoded + changed_when: true + +- meta: end_play + + + +- name: Add GitLab Runner helm repo + kubernetes.core.helm_repository: + name: gitlab + repo_url: https://charts.gitlab.io + +- name: Set GitLab Runner helm values + ansible.builtin.set_fact: + runner_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" + +- name: Deploy GitLab Runner helm + kubernetes.core.helm: name: gitlab-runner - namespace: "{{ dsc.gitlab.namespace }}" - register: runner_kind - until: runner_kind.api_found - retries: 5 - -- name: Add custom env - kubernetes.core.k8s: - template: custom-env.yaml.j2 - -- name: Install gitlab instance - kubernetes.core.k8s: - template: gitlab-runner-instance.yaml.j2 + chart_ref: gitlab/gitlab-runner + chart_version: "{{ dsc.gitlabRunner.chartVersion }}" + release_namespace: "{{ dsc.gitlab.namespace }}" + values: "{{ runner_values }}" diff --git a/roles/gitlab-runner/templates/values.yaml.j2 b/roles/gitlab-runner/templates/values.yaml.j2 new file mode 100644 index 00000000..1df835ac --- /dev/null +++ b/roles/gitlab-runner/templates/values.yaml.j2 @@ -0,0 +1,160 @@ +image: + registry: registry.gitlab.com + image: gitlab-org/gitlab-runner + # tag: alpine-v11.6.0 + +imagePullPolicy: IfNotPresent + +replicas: 1 + +## How many old ReplicaSets for this Deployment you want to retain +revisionHistoryLimit: 2 + +gitlabUrl: https://{{ gitlab_domain }}/ + +runnerRegistrationToken: "MIdOlHo6WN9yAP07v7EM15JR8wLBrK2J6aiOxyw0jWfwAYtyO3zMdrqP3a2dGDx1" + +runnerToken: "" + +terminationGracePeriodSeconds: 3600 + +## Set the certsSecretName in order to pass custom certficates for GitLab Runner to use +## Provide resource name for a Kubernetes Secret Object in the same namespace, +## this is used to populate the /home/gitlab-runner/.gitlab-runner/certs/ directory +## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates-targeting-the-gitlab-server + +## +certsSecretName: gitlab-wildcard-tls-chain + +concurrent: 10 + +checkInterval: 30 + +rbac: + create: true + rules: + - apiGroups: [""] + resources: ["pods", "secrets", "configmaps"] + verbs: ["list", "get", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["pods/exec", "pods/attach"] + verbs: ["create"] + - apiGroups: [""] + resources: ["pods/log"] + verbs: ["get"] + clusterWideAccess: false + +## Configuration for the Pods that the runner launches for each new job +## +runners: + # runner configuration, where the multi line strings is evaluated as + # template so you can specify helm values inside of it. + # + # tpl: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function + # runner configuration: https://docs.gitlab.com/runner/configuration/advanced-configuration.html + config: | + [[runners]] + [runners.kubernetes] + namespace = "{{ dsc.gitlab.namespace }}" + image = "ubuntu:22.04" + + ## Absolute path for an existing runner configuration file + ## Can be used alongside "volumes" and "volumeMounts" to use an external config file + ## Active if runners.config is empty or null + configPath: "" + + ## Which executor should be used + ## + # executor: kubernetes + + ## Specify the name for the runner. + ## + name: gitlab-runner + + ## The name of the secret containing runner-token and runner-registration-token + secret: gitlab-runner-secret + + cache: {} + +## Configure securitycontext for the main container +## ref: https://kubernetes.io/docs/concepts/security/pod-security-standards/ +## +securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + runAsNonRoot: true + privileged: false + capabilities: + drop: ["ALL"] + +## Configure securitycontext valid for the whole pod +## ref: https://kubernetes.io/docs/concepts/security/pod-security-standards/ +## +podSecurityContext: + runAsUser: null + fsGroup: null + +## Configure resource requests and limits +## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +## +resources: {} + # limits: + # memory: 256Mi + # cpu: 200m + # requests: + # memory: 128Mi + # cpu: 100m + +## Configure environment variables that will be present when the registration command runs +## This provides further control over the registration process and the config.toml file +## ref: `gitlab-runner register --help` +## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html +## +envVars: + - name: RUNNER_EXECUTOR + value: kubernetes +{% if dsc.proxy.enabled %} + - name: HTTP_PROXY + value: "{{ dsc.proxy.http_proxy }}" + - name: HTTPS_PROXY + value: "{{ dsc.proxy.https_proxy }}" + - name: NO_PROXY + value: "{{ dsc.proxy.no_proxy }}" +{% endif %} + +## Annotations to be added to deployment +## +deploymentAnnotations: {} + # Example: + # downscaler/uptime: + +## Labels to be added to deployment +## +deploymentLabels: {} + # Example: + # owner.team: + +## Annotations to be added to manager pod +## +podAnnotations: {} + # Example: + # iam.amazonaws.com/role: + +## Labels to be added to manager pod +## +podLabels: {} + # Example: + # owner.team: + +## Configure priorityClassName for manager pod. See k8s docs for more info on how pod priority works: +## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +volumeMounts: + - name: root-gitlab-runner + mountPath: /.gitlab-runner + +volumes: + - name: root-gitlab-runner + emptyDir: + medium: "Memory" diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index 84290a26..5e9451fd 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -4,22 +4,41 @@ npm_file: "{{ lookup('file', '{{ playbook_dir }}/roles/gitlab/npm_file') }}" mvn_config_file: "{{ lookup('ansible.builtin.template', 'mvn_conf_file.j2') }}" -- name: Install gitlab subscription - kubernetes.core.k8s: - template: "{{ item }}" - with_items: - - operator-group.yaml.j2 - - operator-subscription.yaml.j2 +- name: Install GitLab Operator + block: + - name: Create GitLab namespace + kubernetes.core.k8s: + name: "{{ dsc.gitlab.namespace }}" + api_version: v1 + kind: Namespace + state: present -- name: Wait Gitlab CRD exist - kubernetes.core.k8s_info: - api_version: apps.gitlab.com/v1beta1 - kind: GitLab - namespace: "{{ dsc.gitlab.namespace }}" - register: gitlab_kind - until: gitlab_kind.api_found - retries: 15 - delay: 5 + - name: Add GitLab Operator helm repo + kubernetes.core.helm_repository: + name: gitlab-operator + repo_url: https://gitlab.com/api/v4/projects/18899486/packages/helm/stable + + - name: Set GitLab Operator helm values + ansible.builtin.set_fact: + operator_values: "{{ lookup('template', 'operator-values.yaml.j2') | from_yaml }}" + + - name: Deploy GitLab Operator helm + kubernetes.core.helm: + name: gitlab-operator + chart_ref: gitlab-operator/gitlab-operator + chart_version: "{{ dsc.gitlabOperator.chartVersion }}" + release_namespace: "{{ dsc.gitlab.namespace }}" + values: "{{ operator_values }}" + + - name: Wait gitlab-webhook-service endpoint + kubernetes.core.k8s_info: + kind: Endpoints + namespace: "{{ dsc.gitlab.namespace }}" + name: gitlab-webhook-service + register: endpoint + until: endpoint.resources[0].subsets[0].addresses[0] is defined + retries: 15 + delay: 5 - name: Get Gitlab client secret kubernetes.core.k8s_info: @@ -89,17 +108,6 @@ version: "{{ dsc.gitlab.chartVersion }}" values: "{{ gitlab_values }}" - # - name: Wait gitlab instance to be 'Running' - # kubernetes.core.k8s_info: - # api_version: apps.gitlab.com/v1beta1 - # kind: GitLab - # namespace: "{{ dsc.gitlab.namespace }}" - # name: gitlab - # register: gitlab_instance - # until: gitlab_instance.resources[0] is defined and gitlab_instance.resources[0].status is defined and gitlab_instance.resources[0].status.phase == 'Running' - # retries: 45 - # delay: 20 - - name: Wait Gitlab webservice endpoint to be available kubernetes.core.k8s_info: kind: Endpoints @@ -162,7 +170,6 @@ - name: Set new gitlab token ansible.builtin.set_fact: gitlab_token: "{{ token.stdout_lines[-2][1:-1] }}" - # when: ansible_inventory.resources[0].data.GITLAB_TOKEN is defined - name: Update inventory kubernetes.core.k8s: diff --git a/roles/gitlab/templates/gitlab-instance.yaml.j2 b/roles/gitlab/templates/gitlab-instance.yaml.j2 index 36fec8cf..5d53d0ef 100644 --- a/roles/gitlab/templates/gitlab-instance.yaml.j2 +++ b/roles/gitlab/templates/gitlab-instance.yaml.j2 @@ -21,7 +21,7 @@ gitlab: minio: ingress: tls: - secretName: gitlab-minio + secretName: gitlab-minio-secret {% else %} gitlab: webservice: @@ -40,6 +40,7 @@ minio: ingress: tls: {} {% endif %} + global: registry: enabled: false diff --git a/roles/gitlab/templates/operator-group.yaml.j2 b/roles/gitlab/templates/operator-group.yaml.j2 deleted file mode 100644 index 108e7631..00000000 --- a/roles/gitlab/templates/operator-group.yaml.j2 +++ /dev/null @@ -1,13 +0,0 @@ -kind: Namespace -apiVersion: v1 -metadata: - name: {{ dsc.gitlab.namespace }} ---- -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: gitlab-opgroup - namespace: {{ dsc.gitlab.namespace }} -spec: - targetNamespaces: - - {{ dsc.gitlab.namespace }} diff --git a/roles/gitlab/templates/operator-subscription.yaml.j2 b/roles/gitlab/templates/operator-subscription.yaml.j2 deleted file mode 100644 index 83c979cc..00000000 --- a/roles/gitlab/templates/operator-subscription.yaml.j2 +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: gitlab-operator-kubernetes - namespace: {{ dsc.gitlab.namespace }} -spec: - channel: stable - installPlanApproval: Automatic - name: gitlab-operator-kubernetes - source: community-operators - sourceNamespace: openshift-marketplace diff --git a/roles/gitlab/templates/operator-values.yaml.j2 b/roles/gitlab/templates/operator-values.yaml.j2 new file mode 100644 index 00000000..5f276873 --- /dev/null +++ b/roles/gitlab/templates/operator-values.yaml.j2 @@ -0,0 +1,73 @@ +watchCluster: false + +image: + registry: registry.gitlab.com + repository: gitlab-org/cloud-native + name: gitlab-operator + tag: {{ dsc.gitlabOperator.chartVersion }} + # digest: + pullPolicy: IfNotPresent + # pullSecrets: [] + prepend: "false" + +nameOverride: "gitlab" +fullnameOverride: "" + +extraEnvs: [] + +podAnnotations: {} + +resources: + limits: + cpu: 200m + memory: 300Mi + requests: + cpu: 200m + memory: 100Mi + +affinity: {} +nodeSelector: {} +tolerations: [] + +manager: + debug: + enabled: true + serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the name template + # name: + webhook: + selfSignedCert: + # Specifies whether CertManager Issuer and Certificate should be created + # to generate a self-signed certificate for the Manager's webhook. + create: true + +app: + serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the name template + # name: + +nginx-ingress: + serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the name template + # name: + +scc: {} +# apiVersion: "" + +cert-manager: + install: false diff --git a/roles/socle-config/files/cr-conf-dso-default.yaml b/roles/socle-config/files/cr-conf-dso-default.yaml index a4a92c0a..963d2520 100644 --- a/roles/socle-config/files/cr-conf-dso-default.yaml +++ b/roles/socle-config/files/cr-conf-dso-default.yaml @@ -16,6 +16,7 @@ spec: dbPassword: myAwesomePassword values: {} gitlab: {} + gitlabOperator: {} global: environment: production projectsRootDir: diff --git a/roles/socle-config/files/crd-conf-dso.yaml b/roles/socle-config/files/crd-conf-dso.yaml index 7dc910c1..b41005ca 100644 --- a/roles/socle-config/files/crd-conf-dso.yaml +++ b/roles/socle-config/files/crd-conf-dso.yaml @@ -208,6 +208,20 @@ spec: Configuring tools in pipelines container is not an easy job. type: boolean type: object + gitlabOperator: + description: Configuration for GitLab Operator. + properties: + chartVersion: + description: GitLab Operator release version (e.g., "0.24.1"). + type: string + type: object + gitlabRunner: + description: Configuration for GitLab Runner. + properties: + chartVersion: + description: GitLab Runner chart version (e.g., "0.57.0"). + type: string + type: object global: description: Global configuration not specific to one service properties: @@ -504,6 +518,7 @@ spec: - console - exposedCA - gitlab + - gitlabOperator - global - harbor - ingress diff --git a/roles/socle-config/files/releases.yaml b/roles/socle-config/files/releases.yaml index 796ed8b5..5c364005 100644 --- a/roles/socle-config/files/releases.yaml +++ b/roles/socle-config/files/releases.yaml @@ -14,10 +14,16 @@ spec: chartVersion: 0.18.2 console: # https://github.com/cloud-pi-native/console/releases - release: "5.9.1" + release: "5.10.0" gitlab: # https://artifacthub.io/packages/helm/gitlab/gitlab chartVersion: "7.3.4" + gitlabOperator: + # https://gitlab.com/gitlab-org/cloud-native/gitlab-operator/-/tags + chartVersion: "0.24.0" + gitlabRunner: + # https://gitlab.com/gitlab-org/charts/gitlab-runner/-/tags + chartVersion: "0.57.0" harbor: # https://artifacthub.io/packages/helm/harbor/harbor chartVersion: 1.13.0 From 7a1488d1bd270551d8ef7b491808b20da91fcb34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Wed, 18 Oct 2023 14:25:44 +0200 Subject: [PATCH 17/31] =?UTF-8?q?fix:=20:alien:=20changement=20de=20m?= =?UTF-8?q?=C3=A9thode=20d'enregistrement=20des=20runners?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Voir : https://docs.gitlab.com/ee/ci/runners/new_creation_workflow.html --- roles/gitlab-runner/tasks/main.yaml | 9 +++------ roles/gitlab-runner/templates/values.yaml.j2 | 4 +--- roles/gitlab/tasks/main.yaml | 2 +- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/roles/gitlab-runner/tasks/main.yaml b/roles/gitlab-runner/tasks/main.yaml index a1e6420c..b0467990 100644 --- a/roles/gitlab-runner/tasks/main.yaml +++ b/roles/gitlab-runner/tasks/main.yaml @@ -29,16 +29,13 @@ "PRIVATE-TOKEN": "{{ gitlab_token }}" body: runner_type: instance_type - group_id: 1 - description: Default-instance-runner + description: dso-runner platform: null run_untagged: true body_format: form-urlencoded + status_code: [201] changed_when: true - -- meta: end_play - - + register: default_runner - name: Add GitLab Runner helm repo kubernetes.core.helm_repository: diff --git a/roles/gitlab-runner/templates/values.yaml.j2 b/roles/gitlab-runner/templates/values.yaml.j2 index 1df835ac..e296508f 100644 --- a/roles/gitlab-runner/templates/values.yaml.j2 +++ b/roles/gitlab-runner/templates/values.yaml.j2 @@ -12,9 +12,7 @@ revisionHistoryLimit: 2 gitlabUrl: https://{{ gitlab_domain }}/ -runnerRegistrationToken: "MIdOlHo6WN9yAP07v7EM15JR8wLBrK2J6aiOxyw0jWfwAYtyO3zMdrqP3a2dGDx1" - -runnerToken: "" +runnerToken: "{{ default_runner.json.token }}" terminationGracePeriodSeconds: 3600 diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index 5e9451fd..6f11d478 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -163,7 +163,7 @@ command: > bash -c "echo 'PersonalAccessToken.create!(user_id: 1 , name: \"ANSIBLE-DSO\" - , scopes: [:api, :read_repository, :write_repository] + , scopes: [:api, :read_repository, :write_repository, :create_runner] , expires_at: 365.days.from_now).token' | gitlab-rails console" register: token From 330280e06394d8fddf1f75ce1eec86f876efda0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Wed, 18 Oct 2023 17:25:58 +0200 Subject: [PATCH 18/31] =?UTF-8?q?refactor:=20:recycle:=20adaptation=20du?= =?UTF-8?q?=20playbook=20de=20d=C3=A9sinstallation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit changements liés aux évolutions de la conf et aux refactors GitLab et GitLab Runner --- uninstall.yaml | 92 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 66 insertions(+), 26 deletions(-) diff --git a/uninstall.yaml b/uninstall.yaml index fc1b032f..d63f3535 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -4,7 +4,7 @@ tasks: - - name: "Get socle config from conf-dso dsc (default)" + - name: "Récupération de la conf socle à partir de la dsc conf-dso (défaut)" kubernetes.core.k8s_info: kind: dsc name: conf-dso @@ -13,7 +13,7 @@ tags: - always - - name: Get socle config from dsc_cr extra var when defined + - name: "Récupération de la conf socle à partir de l'extra var dsc_cr quand définie" kubernetes.core.k8s_info: kind: dsc name: "{{ dsc_cr }}" @@ -23,7 +23,7 @@ tags: - always - - name: Check socle_config_custom and exit if empty + - name: "Vérification de socle_config_custom et arrêt si vide" when: (dsc_cr is defined) and (socle_config_custom.resources | length == 0) tags: - always @@ -50,25 +50,44 @@ - name: Exit playbook ansible.builtin.meta: end_play - - name: Set socle_config fact when dsc_cr defined and not empty + - name: "Déclaration du fact socle_config quand dsc_cr est définie et non vide" ansible.builtin.set_fact: socle_config: "{{ socle_config_custom }}" when: (socle_config_custom is not skipped) and (socle_config_custom.resources | length > 0) tags: - always - - name: Set DSC Name fact + - name: "Déclaration du fact dsc_name" ansible.builtin.set_fact: dsc_name: "{{ socle_config.resources[0].metadata.name }}" tags: - always - - name: Set DSC fact + - name: "Déclaration du fact dsc" ansible.builtin.set_fact: dsc: "{{ socle_config.resources[0].spec }}" tags: - always + - name: "Déclaration des facts config et releases" + ansible.builtin.set_fact: + dsc_default_config: "{{ lookup('ansible.builtin.file', './roles/socle-config/files/config.yaml') | from_yaml }}" + dsc_default_releases: "{{ lookup('ansible.builtin.file', './roles/socle-config/files/releases.yaml') | from_yaml }}" + tags: + - always + + - name: "Combinaison des nouveaux facts avec le fact dsc" + ansible.builtin.set_fact: + dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True) }}" + tags: + - always + + - name: "Mise à jour du fact dsc" + ansible.builtin.set_fact: + dsc: "{{ dsc.spec }}" + tags: + - always + - name: Suppression de cert-manager kubernetes.core.helm: name: cert-manager @@ -79,7 +98,7 @@ - never - cert-manager - - name: Suppression du namespace cert-manager + - name: "Suppression du namespace cert-manager" kubernetes.core.k8s: state: absent kind: Namespace @@ -88,7 +107,7 @@ - never - cert-manager - - name: Désinstallation de Kubed + - name: "Désinstallation de Kubed" kubernetes.core.helm: name: kubed chart_ref: appscode/kubed @@ -100,18 +119,18 @@ - confSyncer - kubed - - name: Suppression de l'instance CloudNativePG + - name: "Suppression de l'instance CloudNativePG" kubernetes.core.helm: name: cloudnative-pg release_namespace: "{{ dsc.cloudnativepg.namespace }}" - state: absent + release_state: absent wait: true tags: - never - cnpg - cloudnativepg - - name: Suppression du namespace CloudNativePG + - name: "Suppression du namespace CloudNativePG" kubernetes.core.k8s: state: absent kind: Namespace @@ -121,7 +140,7 @@ - cnpg - cloudnativepg - - name: Suppression de l'instance Keycloak + - name: "Suppression de l'instance Keycloak" kubernetes.core.helm: name: keycloak release_namespace: "{{ dsc.keycloak.namespace }}" @@ -130,7 +149,7 @@ tags: - keycloak - - name: Suppression du namespace Keycloak + - name: "Suppression du namespace Keycloak" kubernetes.core.k8s: state: absent kind: Namespace @@ -138,7 +157,7 @@ tags: - keycloak - - name: Suppression du namespace Nexus + - name: "Suppression du namespace Nexus" kubernetes.core.k8s: state: absent kind: Namespace @@ -146,7 +165,7 @@ tags: - nexus - - name: Suppression du namespace Sonarqube + - name: "Suppression du namespace Sonarqube" kubernetes.core.k8s: state: absent kind: Namespace @@ -154,18 +173,39 @@ tags: - sonarqube - - name: Suppression du gitlab runner +# - name: Suppression du gitlab runner +# kubernetes.core.k8s: +# state: absent +# api_version: apps.gitlab.com/v1beta2 +# kind: Runner +# name: gitlab-runner +# namespace: "{{ dsc.gitlab.namespace }}" +# tags: +# - gitlab-runner +# - gitlab + + - name: "Suppression du GitLab Runner" + kubernetes.core.helm: + name: gitlab-runner + release_namespace: "{{ dsc.gitlab.namespace }}" + release_state: absent + wait: true + tags: + - gitlab-runner + - gitlab + + - name: "Suppression de l'instance GitLab" kubernetes.core.k8s: state: absent - api_version: apps.gitlab.com/v1beta2 - kind: Runner - name: gitlab-runner + api_version: apps.gitlab.com/v1beta1 + kind: GitLab + name: gitlab namespace: "{{ dsc.gitlab.namespace }}" + wait: true tags: - - gitlab-runner - gitlab - - name: Suppression du namespace GitLab + - name: "Suppression du namespace GitLab" kubernetes.core.k8s: state: absent kind: Namespace @@ -173,7 +213,7 @@ tags: - gitlab - - name: Suppression du namespace Vault + - name: "Suppression du namespace Vault" kubernetes.core.k8s: state: absent kind: Namespace @@ -181,7 +221,7 @@ tags: - vault - - name: Suppression du namespace SOPS + - name: "Suppression du namespace SOPS" kubernetes.core.k8s: state: absent kind: Namespace @@ -189,7 +229,7 @@ tags: - sops - - name: Suppression du namespace ArgoCD + - name: "Suppression du namespace ArgoCD" kubernetes.core.k8s: state: absent kind: Namespace @@ -199,7 +239,7 @@ - argo - gitops - - name: Suppression du namespace Harbor + - name: "Suppression du namespace Harbor" kubernetes.core.k8s: state: absent kind: Namespace @@ -208,7 +248,7 @@ - harbor - registry - - name: Suppression du namespace Console + - name: "Suppression du namespace Console" kubernetes.core.k8s: state: absent kind: Namespace From 1f90bd9deed63bb51b2eb81853c5df392e3e2aa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Fri, 20 Oct 2023 10:16:26 +0200 Subject: [PATCH 19/31] refactor: :fire: mise en commentaire du role SOPS Nous n'installons plus SOPS dans le cadre du socle. --- install.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/install.yaml b/install.yaml index 1320069d..ac20e2fb 100644 --- a/install.yaml +++ b/install.yaml @@ -62,9 +62,9 @@ tags: - vault - - name: sops - tags: - - sops +# - name: sops +# tags: +# - sops - name: argocd tags: From 6967c918582f28fa913f80339dae5ae04da8ebcb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Mon, 23 Oct 2023 14:20:59 +0200 Subject: [PATCH 20/31] refactor: :recycle: installation cert-manager via helm chart MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit réécriture du role cert-manager et de la partie cert-manager du playbook uninstall --- uninstall.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/uninstall.yaml b/uninstall.yaml index d63f3535..a34324a6 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -69,6 +69,7 @@ tags: - always +<<<<<<< HEAD - name: "Déclaration des facts config et releases" ansible.builtin.set_fact: dsc_default_config: "{{ lookup('ansible.builtin.file', './roles/socle-config/files/config.yaml') | from_yaml }}" @@ -99,6 +100,19 @@ - cert-manager - name: "Suppression du namespace cert-manager" +======= + - name: Suppression de cert-manager + kubernetes.core.helm: + name: cert-manager + release_namespace: cert-manager + state: absent + wait: true + tags: + - never + - cert-manager + + - name: Suppression du namespace cert-manager +>>>>>>> d339b26 (refactor: :recycle: installation cert-manager via helm chart) kubernetes.core.k8s: state: absent kind: Namespace From a1a47091462f7748915045644cc6194d1f1cbf1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Wed, 18 Oct 2023 17:25:58 +0200 Subject: [PATCH 21/31] =?UTF-8?q?refactor:=20:recycle:=20adaptation=20du?= =?UTF-8?q?=20playbook=20de=20d=C3=A9sinstallation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit changements liés aux évolutions de la conf et aux refactors GitLab et GitLab Runner Correctif uninstall --- uninstall.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/uninstall.yaml b/uninstall.yaml index a34324a6..382f6313 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -69,7 +69,6 @@ tags: - always -<<<<<<< HEAD - name: "Déclaration des facts config et releases" ansible.builtin.set_fact: dsc_default_config: "{{ lookup('ansible.builtin.file', './roles/socle-config/files/config.yaml') | from_yaml }}" @@ -79,7 +78,7 @@ - name: "Combinaison des nouveaux facts avec le fact dsc" ansible.builtin.set_fact: - dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True) }}" + dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True)}}" tags: - always @@ -89,6 +88,7 @@ tags: - always +<<<<<<< HEAD - name: Suppression de cert-manager kubernetes.core.helm: name: cert-manager @@ -101,6 +101,8 @@ - name: "Suppression du namespace cert-manager" ======= +======= +>>>>>>> 11588de (refactor: :recycle: adaptation du playbook de désinstallation) - name: Suppression de cert-manager kubernetes.core.helm: name: cert-manager @@ -111,8 +113,7 @@ - never - cert-manager - - name: Suppression du namespace cert-manager ->>>>>>> d339b26 (refactor: :recycle: installation cert-manager via helm chart) + - name: "Suppression du namespace cert-manager" kubernetes.core.k8s: state: absent kind: Namespace From 957ded338dd714187be4db3a2c15fc3b5ce0b0b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Wed, 25 Oct 2023 16:12:09 +0200 Subject: [PATCH 22/31] refactor: :recycle: prise en compte upgrade version console + adaptations mineures install/uninstall --- install.yaml | 1 + roles/socle-config/files/releases.yaml | 2 +- uninstall.yaml | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/install.yaml b/install.yaml index ac20e2fb..1f020898 100644 --- a/install.yaml +++ b/install.yaml @@ -19,6 +19,7 @@ tags: - cert-manager - cm + - always - name: confSyncer tags: diff --git a/roles/socle-config/files/releases.yaml b/roles/socle-config/files/releases.yaml index 5c364005..abc4d254 100644 --- a/roles/socle-config/files/releases.yaml +++ b/roles/socle-config/files/releases.yaml @@ -14,7 +14,7 @@ spec: chartVersion: 0.18.2 console: # https://github.com/cloud-pi-native/console/releases - release: "5.10.0" + release: "5.11.0" gitlab: # https://artifacthub.io/packages/helm/gitlab/gitlab chartVersion: "7.3.4" diff --git a/uninstall.yaml b/uninstall.yaml index 382f6313..eccacbeb 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -78,7 +78,7 @@ - name: "Combinaison des nouveaux facts avec le fact dsc" ansible.builtin.set_fact: - dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True)}}" + dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True) }}" tags: - always From de415aeaea00ee99301d599fefd015cb6f095674 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Wed, 25 Oct 2023 17:29:18 +0200 Subject: [PATCH 23/31] fix: :bug: correctif partie cert-manager --- uninstall.yaml | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/uninstall.yaml b/uninstall.yaml index eccacbeb..d63f3535 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -88,21 +88,6 @@ tags: - always -<<<<<<< HEAD - - name: Suppression de cert-manager - kubernetes.core.helm: - name: cert-manager - release_namespace: cert-manager - state: absent - wait: true - tags: - - never - - cert-manager - - - name: "Suppression du namespace cert-manager" -======= -======= ->>>>>>> 11588de (refactor: :recycle: adaptation du playbook de désinstallation) - name: Suppression de cert-manager kubernetes.core.helm: name: cert-manager From c3d1c2352e8c45161fff11896eafba1cb35ec63c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Thu, 26 Oct 2023 17:56:19 +0200 Subject: [PATCH 24/31] fix: :bug: correction erreur permission denied sur /.gitlabconfig --- roles/gitlab-runner/tasks/main.yaml | 4 +++ .../templates/gitlab-runner-auth.yaml.j2 | 21 ++----------- roles/gitlab-runner/templates/values.yaml.j2 | 30 ++++++++++++------- 3 files changed, 26 insertions(+), 29 deletions(-) diff --git a/roles/gitlab-runner/tasks/main.yaml b/roles/gitlab-runner/tasks/main.yaml index b0467990..6cae1100 100644 --- a/roles/gitlab-runner/tasks/main.yaml +++ b/roles/gitlab-runner/tasks/main.yaml @@ -46,6 +46,10 @@ ansible.builtin.set_fact: runner_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" +- name: Create gitlab-runner role + kubernetes.core.k8s: + template: gitlab-runner-auth.yaml.j2 + - name: Deploy GitLab Runner helm kubernetes.core.helm: name: gitlab-runner diff --git a/roles/gitlab-runner/templates/gitlab-runner-auth.yaml.j2 b/roles/gitlab-runner/templates/gitlab-runner-auth.yaml.j2 index 952f7116..c974c502 100644 --- a/roles/gitlab-runner/templates/gitlab-runner-auth.yaml.j2 +++ b/roles/gitlab-runner/templates/gitlab-runner-auth.yaml.j2 @@ -1,21 +1,4 @@ apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: gitlab-runner - namespace: {{ dsc.gitlab.namespace }} -rules: - - apiGroups: [""] - resources: ["pods", "secrets", "configmaps"] - verbs: ["list", "get", "watch", "create", "delete", "update"] - - apiGroups: [""] - resources: ["pods/exec", "pods/attach"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 kind: "RoleBinding" metadata: name: gitlab-runner-gitlab-runner @@ -26,7 +9,7 @@ roleRef: name: gitlab-runner subjects: - kind: ServiceAccount - name: gitlab-runner-sa + name: gitlab-runner namespace: {{ dsc.gitlab.namespace }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -41,5 +24,5 @@ roleRef: name: system:openshift:scc:anyuid subjects: - kind: ServiceAccount - name: gitlab-runner-sa + name: gitlab-runner namespace: {{ dsc.gitlab.namespace }} \ No newline at end of file diff --git a/roles/gitlab-runner/templates/values.yaml.j2 b/roles/gitlab-runner/templates/values.yaml.j2 index e296508f..e65a0fa8 100644 --- a/roles/gitlab-runner/templates/values.yaml.j2 +++ b/roles/gitlab-runner/templates/values.yaml.j2 @@ -31,16 +31,26 @@ checkInterval: 30 rbac: create: true rules: +# - apiGroups: [""] +# resources: ["pods", "secrets", "configmaps"] +# verbs: ["list", "get", "watch", "create", "delete", "update"] +# - apiGroups: [""] +# resources: ["pods/exec", "pods/attach"] +# verbs: ["create"] +# - apiGroups: [""] +# resources: ["pods/log"] +# verbs: ["get"] + - resources: ["configmaps", "events", "pods", "pods/attach", "pods/exec", "secrets", "services"] + verbs: ["get", "list", "watch", "create", "patch", "update", "delete"] - apiGroups: [""] - resources: ["pods", "secrets", "configmaps"] - verbs: ["list", "get", "watch", "create", "delete", "update"] - - apiGroups: [""] - resources: ["pods/exec", "pods/attach"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get"] + resources: ["pods/exec"] + verbs: ["create", "patch", "delete"] clusterWideAccess: false + serviceAccountName: gitlab-runner + podSecurityPolicy: + enabled: true + resourceNames: + - gitlab-runner ## Configuration for the Pods that the runner launches for each new job ## @@ -89,8 +99,8 @@ securityContext: ## ref: https://kubernetes.io/docs/concepts/security/pod-security-standards/ ## podSecurityContext: - runAsUser: null - fsGroup: null + runAsUser: 100 + fsGroup: 65533 ## Configure resource requests and limits ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ From 666bf4f512f7cc2eac86f7b09acdc182a70d5b3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Fri, 27 Oct 2023 11:17:45 +0200 Subject: [PATCH 25/31] fix: :bug: correctif certsSecretName nous ne voulons un certificat custom que si nous avons un exposedCA --- roles/gitlab-runner/templates/values.yaml.j2 | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/roles/gitlab-runner/templates/values.yaml.j2 b/roles/gitlab-runner/templates/values.yaml.j2 index e65a0fa8..f628a7c1 100644 --- a/roles/gitlab-runner/templates/values.yaml.j2 +++ b/roles/gitlab-runner/templates/values.yaml.j2 @@ -22,7 +22,10 @@ terminationGracePeriodSeconds: 3600 ## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates-targeting-the-gitlab-server ## -certsSecretName: gitlab-wildcard-tls-chain +#certsSecretName: gitlab-wildcard-tls-chain +{% if dsc.exposedCA != 'none' %} +certsSecretName: exposed-ca +{% endif %} concurrent: 10 From b74e5c6f8bc3440bba73a145fb06623bdb39f8ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Fri, 27 Oct 2023 11:49:37 +0200 Subject: [PATCH 26/31] style: :bulb: indentation des commentaires pour faire plaisir au linter --- install.yaml | 6 +++--- uninstall.yaml | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/install.yaml b/install.yaml index 1f020898..da6af738 100644 --- a/install.yaml +++ b/install.yaml @@ -63,9 +63,9 @@ tags: - vault -# - name: sops -# tags: -# - sops + # - name: sops + # tags: + # - sops - name: argocd tags: diff --git a/uninstall.yaml b/uninstall.yaml index d63f3535..de40c1ce 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -173,16 +173,16 @@ tags: - sonarqube -# - name: Suppression du gitlab runner -# kubernetes.core.k8s: -# state: absent -# api_version: apps.gitlab.com/v1beta2 -# kind: Runner -# name: gitlab-runner -# namespace: "{{ dsc.gitlab.namespace }}" -# tags: -# - gitlab-runner -# - gitlab + # - name: Suppression du gitlab runner + # kubernetes.core.k8s: + # state: absent + # api_version: apps.gitlab.com/v1beta2 + # kind: Runner + # name: gitlab-runner + # namespace: "{{ dsc.gitlab.namespace }}" + # tags: + # - gitlab-runner + # - gitlab - name: "Suppression du GitLab Runner" kubernetes.core.helm: From 80fb216fff23199de935c3ad0c167f3adaa65fc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Fri, 27 Oct 2023 14:16:36 +0200 Subject: [PATCH 27/31] =?UTF-8?q?chore:=20:fire:=20M=C3=A9nage?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- roles/gitlab-runner-old/tasks/main.yaml | 35 --------------- .../templates/custom-env.yaml.j2 | 9 ---- .../templates/gitlab-runner-auth.yaml.j2 | 45 ------------------- .../templates/gitlab-runner-instance.yaml.j2 | 22 --------- .../templates/operator-subscription.yaml.j2 | 21 --------- .../templates/custom-env.yaml.j2 | 9 ---- .../templates/gitlab-runner-instance.yaml.j2 | 20 --------- uninstall.yaml | 11 ----- 8 files changed, 172 deletions(-) delete mode 100644 roles/gitlab-runner-old/tasks/main.yaml delete mode 100644 roles/gitlab-runner-old/templates/custom-env.yaml.j2 delete mode 100644 roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 delete mode 100644 roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 delete mode 100644 roles/gitlab-runner-old/templates/operator-subscription.yaml.j2 delete mode 100644 roles/gitlab-runner/templates/custom-env.yaml.j2 delete mode 100644 roles/gitlab-runner/templates/gitlab-runner-instance.yaml.j2 diff --git a/roles/gitlab-runner-old/tasks/main.yaml b/roles/gitlab-runner-old/tasks/main.yaml deleted file mode 100644 index 3676744b..00000000 --- a/roles/gitlab-runner-old/tasks/main.yaml +++ /dev/null @@ -1,35 +0,0 @@ -- name: Get Gitlab namespace - kubernetes.core.k8s_info: - kind: Namespace - name: "{{ dsc.gitlab.namespace }}" - register: gitlab_ns - -- name: Fail if Gitlab namespace is not present - ansible.builtin.fail: - msg: "Gitlab ne semble pas avoir été provisionné sur le cluster veuillez l'installer avant" - when: gitlab_ns | length == 0 - -- name: Install gitlab-runner subscription and role - kubernetes.core.k8s: - template: "{{ item }}" - with_items: - - operator-subscription.yaml.j2 - - gitlab-runner-auth.yaml.j2 - -- name: Wait Gitlab Runner exists - kubernetes.core.k8s_info: - api_version: apps.gitlab.com/v1beta2 - kind: Runner - name: gitlab-runner - namespace: "{{ dsc.gitlab.namespace }}" - register: runner_kind - until: runner_kind.api_found - retries: 5 - -- name: Add custom env - kubernetes.core.k8s: - template: custom-env.yaml.j2 - -- name: Install gitlab instance - kubernetes.core.k8s: - template: gitlab-runner-instance.yaml.j2 diff --git a/roles/gitlab-runner-old/templates/custom-env.yaml.j2 b/roles/gitlab-runner-old/templates/custom-env.yaml.j2 deleted file mode 100644 index 63c42664..00000000 --- a/roles/gitlab-runner-old/templates/custom-env.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -data: - HTTP_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.http_proxy, '') }}" - HTTPS_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.https_proxy, '') }}" - NO_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.no_proxy, '') }}" -kind: ConfigMap -metadata: - name: custom-env - namespace: {{ dsc.gitlab.namespace }} diff --git a/roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 b/roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 deleted file mode 100644 index 952f7116..00000000 --- a/roles/gitlab-runner-old/templates/gitlab-runner-auth.yaml.j2 +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: gitlab-runner - namespace: {{ dsc.gitlab.namespace }} -rules: - - apiGroups: [""] - resources: ["pods", "secrets", "configmaps"] - verbs: ["list", "get", "watch", "create", "delete", "update"] - - apiGroups: [""] - resources: ["pods/exec", "pods/attach"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/log"] - verbs: ["get"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: "RoleBinding" -metadata: - name: gitlab-runner-gitlab-runner - namespace: {{ dsc.gitlab.namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: "Role" - name: gitlab-runner -subjects: - - kind: ServiceAccount - name: gitlab-runner-sa - namespace: {{ dsc.gitlab.namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - creationTimestamp: null - name: system:openshift:scc:anyuid - namespace: {{ dsc.gitlab.namespace }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:openshift:scc:anyuid -subjects: -- kind: ServiceAccount - name: gitlab-runner-sa - namespace: {{ dsc.gitlab.namespace }} \ No newline at end of file diff --git a/roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 b/roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 deleted file mode 100644 index 43f895e6..00000000 --- a/roles/gitlab-runner-old/templates/gitlab-runner-instance.yaml.j2 +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: apps.gitlab.com/v1beta2 -kind: Runner -metadata: - name: gitlab-runner - namespace: {{ dsc.gitlab.namespace }} -spec: - buildImage: alpine -{% if dsc.ingress.tls.type == 'tlsSecret' %} - ca: {{ dsc.ingress.tls.tlsSecret.name }} -{% elif dsc.exposedCA.type != 'none' %} - ca: exposed-ca -{% else %} - ca: -{% endif %} - #config: custom-runner-config-toml - env: custom-env - gitlabUrl: https://{{ gitlab_domain }}/ - runUntagged: true - tags: openshift - token: gitlab-gitlab-runner-secret - runnerImage: gitlab/gitlab-runner:alpine-v16.3.0 - helperImage: gitlab/gitlab-runner-helper:alpine-latest-x86_64-v16.3.0 diff --git a/roles/gitlab-runner-old/templates/operator-subscription.yaml.j2 b/roles/gitlab-runner-old/templates/operator-subscription.yaml.j2 deleted file mode 100644 index 9e56582d..00000000 --- a/roles/gitlab-runner-old/templates/operator-subscription.yaml.j2 +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: operators.coreos.com/v1 -kind: OperatorGroup -metadata: - name: {{ dsc.gitlab.namespace }}-operator-group - namespace: {{ dsc.gitlab.namespace }} -spec: - targetNamespaces: - - {{ dsc.gitlab.namespace }} ---- -apiVersion: operators.coreos.com/v1alpha1 -kind: Subscription -metadata: - name: gitlab-runner-operator-kubernetes - namespace: {{ dsc.gitlab.namespace }} -spec: - channel: stable - installPlanApproval: Automatic - name: gitlab-runner-operator - source: certified-operators - sourceNamespace: openshift-marketplace - #startingCSV: gitlab-runner-operator.v1.10.0 diff --git a/roles/gitlab-runner/templates/custom-env.yaml.j2 b/roles/gitlab-runner/templates/custom-env.yaml.j2 deleted file mode 100644 index 63c42664..00000000 --- a/roles/gitlab-runner/templates/custom-env.yaml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -data: - HTTP_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.http_proxy, '') }}" - HTTPS_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.https_proxy, '') }}" - NO_PROXY: "{{ dsc.proxy.enabled | ternary(dsc.proxy.no_proxy, '') }}" -kind: ConfigMap -metadata: - name: custom-env - namespace: {{ dsc.gitlab.namespace }} diff --git a/roles/gitlab-runner/templates/gitlab-runner-instance.yaml.j2 b/roles/gitlab-runner/templates/gitlab-runner-instance.yaml.j2 deleted file mode 100644 index 0926eafe..00000000 --- a/roles/gitlab-runner/templates/gitlab-runner-instance.yaml.j2 +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: apps.gitlab.com/v1beta2 -kind: Runner -metadata: - name: gitlab-runner - namespace: {{ dsc.gitlab.namespace }} -spec: - buildImage: alpine -{% if dsc.ingress.tls.type == 'tlsSecret' %} - ca: {{ dsc.ingress.tls.tlsSecret.name }} -{% elif dsc.exposedCA.type != 'none' %} - ca: exposed-ca -{% else %} - ca: -{% endif %} - #config: custom-config-toml - env: custom-env - gitlabUrl: https://{{ gitlab_domain }}/ - runUntagged: true - tags: openshift - token: gitlab-gitlab-runner-secret diff --git a/uninstall.yaml b/uninstall.yaml index de40c1ce..3d1580d2 100644 --- a/uninstall.yaml +++ b/uninstall.yaml @@ -173,17 +173,6 @@ tags: - sonarqube - # - name: Suppression du gitlab runner - # kubernetes.core.k8s: - # state: absent - # api_version: apps.gitlab.com/v1beta2 - # kind: Runner - # name: gitlab-runner - # namespace: "{{ dsc.gitlab.namespace }}" - # tags: - # - gitlab-runner - # - gitlab - - name: "Suppression du GitLab Runner" kubernetes.core.helm: name: gitlab-runner From 8fc0d5a2121238934a051a522696e027d86d62e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Tue, 31 Oct 2023 11:49:28 +0100 Subject: [PATCH 28/31] =?UTF-8?q?fix:=20:bug:=20correctif=20task=20de=20cr?= =?UTF-8?q?=C3=A9ation=20du=20token=20GitLab?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit le scope create_runner n'existe pas pour les versions de chart antérieures à 7.2.0 --- roles/gitlab/tasks/main.yaml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index 6f11d478..0a42bd15 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -156,7 +156,8 @@ ansible.builtin.set_fact: tb_pod: "{{ toolbox_pods.resources[0].metadata.name }}" - - name: Create Token + - name: Create Token for 7.2.0 chart version and higher + when: dsc.gitlab.chartVersion is version('7.2.0', operator='ge', version_type='loose') kubernetes.core.k8s_exec: pod: "{{ tb_pod }}" namespace: "{{ dsc.gitlab.namespace }}" @@ -167,6 +168,18 @@ , expires_at: 365.days.from_now).token' | gitlab-rails console" register: token + - name: Create Token for chart versions lower than 7.2.0 + when: dsc.gitlab.chartVersion is version('7.2.0', operator='lt', version_type='loose') + kubernetes.core.k8s_exec: + pod: "{{ tb_pod }}" + namespace: "{{ dsc.gitlab.namespace }}" + command: > + bash -c "echo 'PersonalAccessToken.create!(user_id: 1 + , name: \"ANSIBLE-DSO\" + , scopes: [:api, :read_repository, :write_repository] + , expires_at: 365.days.from_now).token' | gitlab-rails console" + register: token + - name: Set new gitlab token ansible.builtin.set_fact: gitlab_token: "{{ token.stdout_lines[-2][1:-1] }}" From 747aa1074f3d20dc65fd545487e0387a62ef4231 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Montagne?= Date: Tue, 31 Oct 2023 12:13:45 +0100 Subject: [PATCH 29/31] =?UTF-8?q?fix:=20:bug:=20adaptation=20de=20d=C3=A9c?= =?UTF-8?q?laration=20du=20fact=20gitlab=5Ftoken?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit la task doit être adaptée pour tenir compte du changement précédent --- roles/gitlab/tasks/main.yaml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index 0a42bd15..632d2518 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -178,12 +178,18 @@ , name: \"ANSIBLE-DSO\" , scopes: [:api, :read_repository, :write_repository] , expires_at: 365.days.from_now).token' | gitlab-rails console" - register: token + register: token_old - - name: Set new gitlab token + - name: Set new gitlab token for 7.2.0 chart version and higher + when: dsc.gitlab.chartVersion is version('7.2.0', operator='ge', version_type='loose') ansible.builtin.set_fact: gitlab_token: "{{ token.stdout_lines[-2][1:-1] }}" + - name: Set new gitlab token for chart versions lower than 7.2.0 + when: dsc.gitlab.chartVersion is version('7.2.0', operator='lt', version_type='loose') + ansible.builtin.set_fact: + gitlab_token: "{{ token_old.stdout_lines[-2][1:-1] }}" + - name: Update inventory kubernetes.core.k8s: kind: ConfigMap From 4c1e6602c97e073d2850e684e7143ffc44c10bcf Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Tue, 31 Oct 2023 14:31:31 +0100 Subject: [PATCH 30/31] refactor: :recycle: simplify token creation --- roles/gitlab/tasks/create-token-7.2.0+.yaml | 10 ++++++ roles/gitlab/tasks/create-token-7.2.0-.yaml | 10 ++++++ roles/gitlab/tasks/main.yaml | 34 +++------------------ 3 files changed, 24 insertions(+), 30 deletions(-) create mode 100644 roles/gitlab/tasks/create-token-7.2.0+.yaml create mode 100644 roles/gitlab/tasks/create-token-7.2.0-.yaml diff --git a/roles/gitlab/tasks/create-token-7.2.0+.yaml b/roles/gitlab/tasks/create-token-7.2.0+.yaml new file mode 100644 index 00000000..6dfc657d --- /dev/null +++ b/roles/gitlab/tasks/create-token-7.2.0+.yaml @@ -0,0 +1,10 @@ +- name: Create Token for 7.2.0 chart version and higher + kubernetes.core.k8s_exec: + pod: "{{ tb_pod }}" + namespace: "{{ dsc.gitlab.namespace }}" + command: > + bash -c "echo 'PersonalAccessToken.create!(user_id: 1 + , name: \"ANSIBLE-DSO\" + , scopes: [:api, :read_repository, :write_repository, :create_runner] + , expires_at: 365.days.from_now).token' | gitlab-rails console" + register: token \ No newline at end of file diff --git a/roles/gitlab/tasks/create-token-7.2.0-.yaml b/roles/gitlab/tasks/create-token-7.2.0-.yaml new file mode 100644 index 00000000..e848c75b --- /dev/null +++ b/roles/gitlab/tasks/create-token-7.2.0-.yaml @@ -0,0 +1,10 @@ +- name: Create Token for chart versions lower than 7.2.0 + kubernetes.core.k8s_exec: + pod: "{{ tb_pod }}" + namespace: "{{ dsc.gitlab.namespace }}" + command: > + bash -c "echo 'PersonalAccessToken.create!(user_id: 1 + , name: \"ANSIBLE-DSO\" + , scopes: [:api, :read_repository, :write_repository] + , expires_at: 365.days.from_now).token' | gitlab-rails console" + register: token \ No newline at end of file diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index 632d2518..f660dd37 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -156,40 +156,14 @@ ansible.builtin.set_fact: tb_pod: "{{ toolbox_pods.resources[0].metadata.name }}" - - name: Create Token for 7.2.0 chart version and higher - when: dsc.gitlab.chartVersion is version('7.2.0', operator='ge', version_type='loose') - kubernetes.core.k8s_exec: - pod: "{{ tb_pod }}" - namespace: "{{ dsc.gitlab.namespace }}" - command: > - bash -c "echo 'PersonalAccessToken.create!(user_id: 1 - , name: \"ANSIBLE-DSO\" - , scopes: [:api, :read_repository, :write_repository, :create_runner] - , expires_at: 365.days.from_now).token' | gitlab-rails console" - register: token - - - name: Create Token for chart versions lower than 7.2.0 - when: dsc.gitlab.chartVersion is version('7.2.0', operator='lt', version_type='loose') - kubernetes.core.k8s_exec: - pod: "{{ tb_pod }}" - namespace: "{{ dsc.gitlab.namespace }}" - command: > - bash -c "echo 'PersonalAccessToken.create!(user_id: 1 - , name: \"ANSIBLE-DSO\" - , scopes: [:api, :read_repository, :write_repository] - , expires_at: 365.days.from_now).token' | gitlab-rails console" - register: token_old + - name: Create token + ansible.builtin.include_tasks: + file: "{{ dsc.gitlab.chartVersion is version('7.2.0', operator='lt', version_type='loose') | ternary('create-token-7.2.0-.yaml', 'create-token-7.2.0+.yaml') }}" - - name: Set new gitlab token for 7.2.0 chart version and higher - when: dsc.gitlab.chartVersion is version('7.2.0', operator='ge', version_type='loose') + - name: Set new gitlab token ansible.builtin.set_fact: gitlab_token: "{{ token.stdout_lines[-2][1:-1] }}" - - name: Set new gitlab token for chart versions lower than 7.2.0 - when: dsc.gitlab.chartVersion is version('7.2.0', operator='lt', version_type='loose') - ansible.builtin.set_fact: - gitlab_token: "{{ token_old.stdout_lines[-2][1:-1] }}" - - name: Update inventory kubernetes.core.k8s: kind: ConfigMap From 2ddb93f918d5f0ed2bc372b9d0131cd8acc2f896 Mon Sep 17 00:00:00 2001 From: ArnaudTa <33383276+ArnaudTA@users.noreply.github.com> Date: Fri, 3 Nov 2023 12:57:59 +0100 Subject: [PATCH 31/31] chore: :pushpin: generate versions.md --- .github/workflows/release.yml | 2 +- README.md | 2 ++ ci/release.sh | 20 ++++++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) create mode 100755 ci/release.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5e5cef6e..ec56ec03 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -4,7 +4,7 @@ on: workflow_call: outputs: release-created: - description: Has the releease been created + description: Has the release been created value: ${{ jobs.release.outputs.release-created }} major-tag: description: Major version tag diff --git a/README.md b/README.md index 2e6bbf46..e8a06839 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,8 @@ Les éléments déployés seront les suivants : Certains outils peuvent prendre un peu de temps pour s'installer, par exemple Keycloak ou GitLab. +Vous pouvez trouvez la version des outils installé [ici](versions.md) + ## Prérequis Cette installation s'effectue dans un cluster OpenShift opérationnel et correctement démarré. diff --git a/ci/release.sh b/ci/release.sh new file mode 100755 index 00000000..38a40291 --- /dev/null +++ b/ci/release.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Fichier YAML d'entrée +input_file="./roles/socle-config/files/releases.yaml" + +# Chemin vers le champ .spec +spec_path=".spec" +echo "| Outil | Version | Source |" > ./versions.md +echo "| ----- | ------- | ------ |" >> ./versions.md + +# Fonction pour extraire les clés, valeurs et commentaires +yq eval ".spec | to_entries | .[] | .key" "$input_file" | while read -r key; do + # Génère une ligne en utilisant le modèle + export key=$key + yq '.spec[env(key)]' "$input_file" | while read -r comment && read -r value; do + export comment=$(cut -c 3- <<<$comment) + export version=$(awk '{print $2}' <<< $value | tr -d '\"') + echo "| $key | $version | [$key]($comment) |" >> ./versions.md + done +done