diff --git a/install.yaml b/install.yaml index da6af738..72c6fe18 100644 --- a/install.yaml +++ b/install.yaml @@ -1,3 +1,4 @@ +--- - name: Installation du socle DSO hosts: localhost gather_facts: false @@ -84,7 +85,8 @@ - console-dso post_tasks: - - debug: + - name: Post-Install Disclaimer + ansible.builtin.debug: msg: "{{ dsc | get_debug_messages }}" tags: - - always \ No newline at end of file + - always diff --git a/roles/argocd/tasks/main.yaml b/roles/argocd/tasks/main.yaml index 9166fe37..f734b4cf 100644 --- a/roles/argocd/tasks/main.yaml +++ b/roles/argocd/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Get argo client secret kubernetes.core.k8s_info: kind: Secret @@ -12,7 +13,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - creationTimestamp: null + creationTimestamp: name: system:openshift:scc:privileged roleRef: apiGroup: rbac.authorization.k8s.io @@ -34,13 +35,23 @@ name: bitnami repo_url: https://charts.bitnami.com/bitnami -- name: Set extra env vars +- name: Set argo_values ansible.builtin.set_fact: argo_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" +- name: Merge with proxy settings + when: dsc.proxy.enabled + block: + - name: Generate proxy values + ansible.builtin.set_fact: + argo_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with argo proxy values + ansible.builtin.set_fact: + argo_values: "{{ argo_values | combine(argo_proxy_values, recursive=True, list_merge='append') }}" + - name: Merge with argo user values ansible.builtin.set_fact: - argo_values: "{{ argo_values | combine(dsc.argocd['values'], recursive=True) }}" + argo_values: "{{ argo_values | combine(dsc.argocd['values'], recursive=True, list_merge='append') }}" - name: Deploy helm kubernetes.core.helm: diff --git a/roles/argocd/templates/proxy-values.yaml.j2 b/roles/argocd/templates/proxy-values.yaml.j2 new file mode 100644 index 00000000..5e2f53a1 --- /dev/null +++ b/roles/argocd/templates/proxy-values.yaml.j2 @@ -0,0 +1,12 @@ + +server: + extraEnvVars: &extraEnvVars + - name: HTTP_PROXY + value: "{{ dsc.proxy.http_proxy }}" + - name: HTTPS_PROXY + value: "{{ dsc.proxy.https_proxy }}" + - name: NO_PROXY + value: "{{ dsc.proxy.no_proxy }},argo-argo-cd-repo-server" + +repoServer: + extraEnvVars: *extraEnvVars diff --git a/roles/argocd/templates/values.yaml.j2 b/roles/argocd/templates/values.yaml.j2 index f9730731..db3cb92e 100644 --- a/roles/argocd/templates/values.yaml.j2 +++ b/roles/argocd/templates/values.yaml.j2 @@ -3,11 +3,7 @@ securityContext: &securityContext runAsUser: null podSecurityContext: fsGroup: null -# TODO variabilize openshift boolean -openshift: - enabled: true -image: - PullPolicy: IfNotPresent + config: {% if dsc.argocd.admin.enabled %} secret: @@ -53,26 +49,10 @@ server: kinds: - TaskRun - PipelineRun - extraEnvVars: -{% if dsc.proxy.enabled %} - - name: HTTP_PROXY - value: "{{ dsc.proxy.http_proxy }}" - - name: HTTPS_PROXY - value: "{{ dsc.proxy.https_proxy }}" - - name: NO_PROXY - value: "{{ dsc.proxy.no_proxy }},argo-argo-cd-repo-server" -{% endif %} + extraEnvVars: [] repoServer: <<: *securityContext - extraEnvVars: -{% if dsc.proxy.enabled %} - - name: HTTP_PROXY - value: "{{ dsc.proxy.http_proxy }}" - - name: HTTPS_PROXY - value: "{{ dsc.proxy.https_proxy }}" - - name: NO_PROXY - value: "{{ dsc.proxy.no_proxy }}" -{% endif %} + extraEnvVars: [] extraDeploy: - apiVersion: v1 data: diff --git a/roles/ca/tasks/additionals_ca.yaml b/roles/ca/tasks/additionals_ca.yaml index 33d669d3..be6c3bab 100644 --- a/roles/ca/tasks/additionals_ca.yaml +++ b/roles/ca/tasks/additionals_ca.yaml @@ -1,3 +1,4 @@ +--- - name: Set empty ca fact ansible.builtin.set_fact: additionals_ca_pem_array: [] diff --git a/roles/ca/tasks/exposed_ca.yaml b/roles/ca/tasks/exposed_ca.yaml index 427bcd7f..cf39ceea 100644 --- a/roles/ca/tasks/exposed_ca.yaml +++ b/roles/ca/tasks/exposed_ca.yaml @@ -1,3 +1,4 @@ +--- - name: No exposed_ca when: dsc.exposedCA.type == 'none' ansible.builtin.set_fact: @@ -37,7 +38,7 @@ - name: Get certmanager secret kubernetes.core.k8s_info: name: "{{ dsc.ingress.tls.ca.secretName }}" - namespace: "cert-manager" + namespace: cert-manager kind: Secret register: exposed_ca_resource @@ -50,10 +51,10 @@ block: - name: Get url ansible.builtin.shell: - cmd: "curl {{ dsc.exposedCA.url }} -s | openssl x509" + cmd: curl {{ dsc.exposedCA.url }} -s | openssl x509 changed_when: false register: exposed_ca_resource - tags: ["skip_ansible_lint"] + tags: [skip_ansible_lint] - name: Extract key ansible.builtin.set_fact: diff --git a/roles/ca/tasks/get-ca.yaml b/roles/ca/tasks/get-ca.yaml index 6c6d72c0..ed09303a 100644 --- a/roles/ca/tasks/get-ca.yaml +++ b/roles/ca/tasks/get-ca.yaml @@ -1,3 +1,4 @@ +--- - name: Get CA cert kubernetes.core.k8s_info: namespace: default @@ -12,7 +13,7 @@ - name: Set ca fact (secret) ansible.builtin.set_fact: - additionals_ca_pem_array: "{{ additionals_ca_pem_array + [( ca_cert.resources[0].data[key] | b64decode )] }}" + additionals_ca_pem_array: "{{ additionals_ca_pem_array + [(ca_cert.resources[0].data[key] | b64decode)] }}" when: kind == 'Secret' and key | length != 0 - name: Set ca fact (cm) @@ -25,7 +26,7 @@ - name: Set ca fact (secret) ansible.builtin.set_fact: - additionals_ca_pem_array: "{{ additionals_ca_pem_array + [( ca_cert.resources[0].data[resKey.key] | b64decode )] }}" + additionals_ca_pem_array: "{{ additionals_ca_pem_array + [(ca_cert.resources[0].data[resKey.key] | b64decode)] }}" loop: "{{ ca_cert.resources[0].data | dict2items }}" when: kind == 'Secret' and key | length == 0 loop_control: diff --git a/roles/ca/tasks/main.yaml b/roles/ca/tasks/main.yaml index 14d3a632..755b43f3 100644 --- a/roles/ca/tasks/main.yaml +++ b/roles/ca/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Additionals_ca tasks ansible.builtin.include_tasks: file: additionals_ca.yaml diff --git a/roles/cert-manager/tasks/main.yaml b/roles/cert-manager/tasks/main.yaml index 23d5415c..3fcbb192 100644 --- a/roles/cert-manager/tasks/main.yaml +++ b/roles/cert-manager/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Add cert-manager helm repo kubernetes.core.helm_repository: name: jetstack @@ -19,7 +20,17 @@ - name: Set cert-manager helm values ansible.builtin.set_fact: - cm_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" + cm_values: {} + +- name: Merge with proxy settings + when: dsc.proxy.enabled + block: + - name: Generate proxy values + ansible.builtin.set_fact: + cm_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with cm proxy values + ansible.builtin.set_fact: + cm_values: "{{ cm_values | combine(cm_proxy_values, recursive=True, list_merge='append') }}" - name: Deploy helm kubernetes.core.helm: diff --git a/roles/cert-manager/templates/proxy-values.yaml.j2 b/roles/cert-manager/templates/proxy-values.yaml.j2 new file mode 100644 index 00000000..df55d9c5 --- /dev/null +++ b/roles/cert-manager/templates/proxy-values.yaml.j2 @@ -0,0 +1,3 @@ +http_proxy: "{{ dsc.proxy.http_proxy }}" +https_proxy: "{{ dsc.proxy.https_proxy }}" +no_proxy: "{{ dsc.proxy.no_proxy }}" diff --git a/roles/cert-manager/templates/values.yaml.j2 b/roles/cert-manager/templates/values.yaml.j2 deleted file mode 100644 index 554d6b9b..00000000 --- a/roles/cert-manager/templates/values.yaml.j2 +++ /dev/null @@ -1,739 +0,0 @@ -# Default values for cert-manager. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -global: - # Reference to one or more secrets to be used when pulling images - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - imagePullSecrets: [] - # - name: "image-pull-secret" - - # Labels to apply to all resources - # Please note that this does not add labels to the resources created dynamically by the controllers. - # For these resources, you have to add the labels in the template in the cert-manager custom resource: - # eg. podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress - # ref: https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress - # eg. secretTemplate in CertificateSpec - # ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec - commonLabels: {} - # team_name: dev - - # Optional priority class to be used for the cert-manager pods - priorityClassName: "" - rbac: - create: true - # Aggregate ClusterRoles to Kubernetes default user-facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles - aggregateClusterRoles: true - - podSecurityPolicy: - enabled: false - useAppArmor: true - - # Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose. - logLevel: 2 - - leaderElection: - # Override the namespace used for the leader election lease - namespace: "kube-system" - - # The duration that non-leader candidates will wait after observing a - # leadership renewal until attempting to acquire leadership of a led but - # unrenewed leader slot. This is effectively the maximum duration that a - # leader can be stopped before it is replaced by another candidate. - # leaseDuration: 60s - - # The interval between attempts by the acting master to renew a leadership - # slot before it stops leading. This must be less than or equal to the - # lease duration. - # renewDeadline: 40s - - # The duration the clients should wait between attempting acquisition and - # renewal of a leadership. - # retryPeriod: 15s - -installCRDs: false - -replicaCount: 1 - -strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - -podDisruptionBudget: - enabled: false - - # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) - # or a percentage value (e.g. 25%) - # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` - # minAvailable: 1 - # maxUnavailable: 1 - -# Comma separated list of feature gates that should be enabled on the -# controller pod. -featureGates: "" - -# The maximum number of challenges that can be scheduled as 'processing' at once -maxConcurrentChallenges: 60 - -image: - repository: quay.io/jetstack/cert-manager-controller - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-controller - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - pullPolicy: IfNotPresent - -# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer -# resources. By default, the same namespace as cert-manager is deployed within is -# used. This namespace will not be automatically created by the Helm chart. -clusterResourceNamespace: "" - -# This namespace allows you to define where the services will be installed into -# if not set then they will use the namespace of the release -# This is helpful when installing cert manager as a chart dependency (sub chart) -namespace: "" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - # Optional additional annotations to add to the controller's ServiceAccount - # annotations: {} - # Automount API credentials for a Service Account. - # Optional additional labels to add to the controller's ServiceAccount - # labels: {} - automountServiceAccountToken: true - -# Automounting API credentials for a particular pod -# automountServiceAccountToken: true - -# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted -enableCertificateOwnerRef: false - -# Used to configure options for the controller pod. -# This allows setting options that'd usually be provided via flags. -# An APIVersion and Kind must be specified in your values.yaml file. -# Flags will override options that are set here. -config: -# apiVersion: controller.config.cert-manager.io/v1alpha1 -# kind: ControllerConfiguration -# logging: -# verbosity: 2 -# format: text -# leaderElectionConfig: -# namespace: kube-system -# kubernetesAPIQPS: 9000 -# kubernetesAPIBurst: 9000 -# numberOfConcurrentWorkers: 200 -# featureGates: -# additionalCertificateOutputFormats: true -# experimentalCertificateSigningRequestControllers: true -# experimentalGatewayAPISupport: true -# serverSideApply: true -# literalCertificateSubject: true -# useCertificateRequestBasicConstraints: true - -# Setting Nameservers for DNS01 Self Check -# See: https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check - -# Comma separated string with host and port of the recursive nameservers cert-manager should query -dns01RecursiveNameservers: "" - -# Forces cert-manager to only use the recursive nameservers for verification. -# Enabling this option could cause the DNS01 self check to take longer due to caching performed by the recursive nameservers -dns01RecursiveNameserversOnly: false - -# Additional command line flags to pass to cert-manager controller binary. -# To see all available flags run docker run quay.io/jetstack/cert-manager-controller: --help -extraArgs: [] - # Use this flag to enable or disable arbitrary controllers, for example, disable the CertificiateRequests approver - # - --controllers=*,-certificaterequests-approver - -extraEnv: [] -# - name: SOME_VAR -# value: 'some value' - -resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - -# Pod Security Context -# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - -# Container Security Context to be set on the controller component container -# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - - -volumes: [] - -volumeMounts: [] - -# Optional additional annotations to add to the controller Deployment -# deploymentAnnotations: {} - -# Optional additional annotations to add to the controller Pods -# podAnnotations: {} - -podLabels: {} - -# Optional annotations to add to the controller Service -# serviceAnnotations: {} - -# Optional additional labels to add to the controller Service -# serviceLabels: {} - -# Optional DNS settings, useful if you have a public and private DNS zone for -# the same domain on Route 53. What follows is an example of ensuring -# cert-manager can access an ingress or DNS TXT records at all times. -# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for -# the cluster to work. -# podDnsPolicy: "None" -# podDnsConfig: -# nameservers: -# - "1.1.1.1" -# - "8.8.8.8" - -nodeSelector: - kubernetes.io/os: linux - -ingressShim: {} - # defaultIssuerName: "" - # defaultIssuerKind: "" - # defaultIssuerGroup: "" - -prometheus: - enabled: true - servicemonitor: - enabled: false - prometheusInstance: default - targetPort: 9402 - path: /metrics - interval: 60s - scrapeTimeout: 30s - labels: {} - annotations: {} - honorLabels: false - endpointAdditionalProperties: {} - -# Use these variables to configure the HTTP_PROXY environment variables -{% if dsc.proxy.enabled %} -http_proxy: "{{ dsc.proxy.http_proxy }}" -https_proxy: "{{ dsc.proxy.https_proxy }}" -no_proxy: "{{ dsc.proxy.no_proxy }}" -{% endif %} - -# A Kubernetes Affinty, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core -# for example: -# affinity: -# nodeAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: foo.bar.com/role -# operator: In -# values: -# - master -affinity: {} - -# A list of Kubernetes Tolerations, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core -# for example: -# tolerations: -# - key: foo.bar.com/role -# operator: Equal -# value: master -# effect: NoSchedule -tolerations: [] - -# A list of Kubernetes TopologySpreadConstraints, if required; see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core -# for example: -# topologySpreadConstraints: -# - maxSkew: 2 -# topologyKey: topology.kubernetes.io/zone -# whenUnsatisfiable: ScheduleAnyway -# labelSelector: -# matchLabels: -# app.kubernetes.io/instance: cert-manager -# app.kubernetes.io/component: controller -topologySpreadConstraints: [] - -# LivenessProbe settings for the controller container of the controller Pod. -# -# Disabled by default, because the controller has a leader election mechanism -# which should cause it to exit if it is unable to renew its leader election -# record. -# LivenessProbe durations and thresholds are based on those used for the Kubernetes -# controller-manager. See: -# https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245 -livenessProbe: - enabled: false - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 15 - successThreshold: 1 - failureThreshold: 8 - -# enableServiceLinks indicates whether information about services should be -# injected into pod's environment variables, matching the syntax of Docker -# links. -enableServiceLinks: false - -webhook: - replicaCount: 1 - timeoutSeconds: 10 - - # Used to configure options for the webhook pod. - # This allows setting options that'd usually be provided via flags. - # An APIVersion and Kind must be specified in your values.yaml file. - # Flags will override options that are set here. - config: - # apiVersion: webhook.config.cert-manager.io/v1alpha1 - # kind: WebhookConfiguration - - # The port that the webhook should listen on for requests. - # In GKE private clusters, by default kubernetes apiservers are allowed to - # talk to the cluster nodes only on 443 and 10250. so configuring - # securePort: 10250, will work out of the box without needing to add firewall - # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. - # This should be uncommented and set as a default by the chart once we graduate - # the apiVersion of WebhookConfiguration past v1alpha1. - # securePort: 10250 - - strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - # Pod Security Context to be set on the webhook component Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - - podDisruptionBudget: - enabled: false - - # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) - # or a percentage value (e.g. 25%) - # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` - # minAvailable: 1 - # maxUnavailable: 1 - - # Container Security Context to be set on the webhook component container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - - # Optional additional annotations to add to the webhook Deployment - # deploymentAnnotations: {} - - # Optional additional annotations to add to the webhook Pods - # podAnnotations: {} - - # Optional additional annotations to add to the webhook Service - # serviceAnnotations: {} - - # Optional additional annotations to add to the webhook MutatingWebhookConfiguration - # mutatingWebhookConfigurationAnnotations: {} - - # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration - # validatingWebhookConfigurationAnnotations: {} - - # Additional command line flags to pass to cert-manager webhook binary. - # To see all available flags run docker run quay.io/jetstack/cert-manager-webhook: --help - extraArgs: [] - # Path to a file containing a WebhookConfiguration object used to configure the webhook - # - --config= - - # Comma separated list of feature gates that should be enabled on the - # webhook pod. - featureGates: "" - - resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - - ## Liveness and readiness probe values - ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes - ## - livenessProbe: - failureThreshold: 3 - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 1 - - nodeSelector: - kubernetes.io/os: linux - - affinity: {} - - tolerations: [] - - topologySpreadConstraints: [] - - # Optional additional labels to add to the Webhook Pods - podLabels: {} - - # Optional additional labels to add to the Webhook Service - serviceLabels: {} - - image: - repository: quay.io/jetstack/cert-manager-webhook - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-webhook - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - pullPolicy: IfNotPresent - - serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - # Optional additional annotations to add to the controller's ServiceAccount - # annotations: {} - # Optional additional labels to add to the webhook's ServiceAccount - # labels: {} - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - - # Automounting API credentials for a particular pod - # automountServiceAccountToken: true - - # The port that the webhook should listen on for requests. - # In GKE private clusters, by default kubernetes apiservers are allowed to - # talk to the cluster nodes only on 443 and 10250. so configuring - # securePort: 10250, will work out of the box without needing to add firewall - # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000 - securePort: 10250 - - # Specifies if the webhook should be started in hostNetwork mode. - # - # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom - # CNI (such as calico), because control-plane managed by AWS cannot communicate - # with pods' IP CIDR and admission webhooks are not working - # - # Since the default port for the webhook conflicts with kubelet on the host - # network, `webhook.securePort` should be changed to an available port if - # running in hostNetwork mode. - hostNetwork: false - - # Specifies how the service should be handled. Useful if you want to expose the - # webhook to outside of the cluster. In some cases, the control plane cannot - # reach internal services. - serviceType: ClusterIP - # loadBalancerIP: - - # Overrides the mutating webhook and validating webhook so they reach the webhook - # service using the `url` field instead of a service. - url: {} - # host: - - # Enables default network policies for webhooks. - networkPolicy: - enabled: false - ingress: - - from: - - ipBlock: - cidr: 0.0.0.0/0 - egress: - - ports: - - port: 80 - protocol: TCP - - port: 443 - protocol: TCP - - port: 53 - protocol: TCP - - port: 53 - protocol: UDP - # On OpenShift and OKD, the Kubernetes API server listens on - # port 6443. - - port: 6443 - protocol: TCP - to: - - ipBlock: - cidr: 0.0.0.0/0 - - volumes: [] - volumeMounts: [] - - # enableServiceLinks indicates whether information about services should be - # injected into pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false - -cainjector: - enabled: true - replicaCount: 1 - - strategy: {} - # type: RollingUpdate - # rollingUpdate: - # maxSurge: 0 - # maxUnavailable: 1 - - # Pod Security Context to be set on the cainjector component Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - - podDisruptionBudget: - enabled: false - - # minAvailable and maxUnavailable can either be set to an integer (e.g. 1) - # or a percentage value (e.g. 25%) - # if neither minAvailable or maxUnavailable is set, we default to `minAvailable: 1` - # minAvailable: 1 - # maxUnavailable: 1 - - # Container Security Context to be set on the cainjector component container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - - - # Optional additional annotations to add to the cainjector Deployment - # deploymentAnnotations: {} - - # Optional additional annotations to add to the cainjector Pods - # podAnnotations: {} - - # Additional command line flags to pass to cert-manager cainjector binary. - # To see all available flags run docker run quay.io/jetstack/cert-manager-cainjector: --help - extraArgs: [] - # Enable profiling for cainjector - # - --enable-profiling=true - - resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - - nodeSelector: - kubernetes.io/os: linux - - affinity: {} - - tolerations: [] - - topologySpreadConstraints: [] - - # Optional additional labels to add to the CA Injector Pods - podLabels: {} - - image: - repository: quay.io/jetstack/cert-manager-cainjector - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-cainjector - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - pullPolicy: IfNotPresent - - serviceAccount: - # Specifies whether a service account should be created - create: true - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - # Optional additional annotations to add to the controller's ServiceAccount - # annotations: {} - # Automount API credentials for a Service Account. - # Optional additional labels to add to the cainjector's ServiceAccount - # labels: {} - automountServiceAccountToken: true - - # Automounting API credentials for a particular pod - # automountServiceAccountToken: true - - volumes: [] - volumeMounts: [] - - # enableServiceLinks indicates whether information about services should be - # injected into pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false - -acmesolver: - image: - repository: quay.io/jetstack/cert-manager-acmesolver - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-acmesolver - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - -# This startupapicheck is a Helm post-install hook that waits for the webhook -# endpoints to become available. -# The check is implemented using a Kubernetes Job- if you are injecting mesh -# sidecar proxies into cert-manager pods, you probably want to ensure that they -# are not injected into this Job's pod. Otherwise the installation may time out -# due to the Job never being completed because the sidecar proxy does not exit. -# See https://github.com/cert-manager/cert-manager/pull/4414 for context. -startupapicheck: - enabled: true - - # Pod Security Context to be set on the startupapicheck component Pod - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - - # Container Security Context to be set on the controller component container - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - - # Timeout for 'kubectl check api' command - timeout: 1m - - # Job backoffLimit - backoffLimit: 4 - - # Optional additional annotations to add to the startupapicheck Job - jobAnnotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "1" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Optional additional annotations to add to the startupapicheck Pods - # podAnnotations: {} - - # Additional command line flags to pass to startupapicheck binary. - # To see all available flags run docker run quay.io/jetstack/cert-manager-ctl: --help - extraArgs: [] - - resources: {} - # requests: - # cpu: 10m - # memory: 32Mi - - nodeSelector: - kubernetes.io/os: linux - - affinity: {} - - tolerations: [] - - # Optional additional labels to add to the startupapicheck Pods - podLabels: {} - - image: - repository: quay.io/jetstack/cert-manager-ctl - # You can manage a registry with - # registry: quay.io - # repository: jetstack/cert-manager-ctl - - # Override the image tag to deploy by setting this variable. - # If no value is set, the chart's appVersion will be used. - # tag: canary - - # Setting a digest will override any tag - # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 - - pullPolicy: IfNotPresent - - rbac: - # annotations for the startup API Check job RBAC and PSP resources - annotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Automounting API credentials for a particular pod - # automountServiceAccountToken: true - - serviceAccount: - # Specifies whether a service account should be created - create: true - - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - # name: "" - - # Optional additional annotations to add to the Job's ServiceAccount - annotations: - helm.sh/hook: post-install - helm.sh/hook-weight: "-5" - helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded - - # Automount API credentials for a Service Account. - automountServiceAccountToken: true - - # Optional additional labels to add to the startupapicheck's ServiceAccount - # labels: {} - - volumes: [] - volumeMounts: [] - - # enableServiceLinks indicates whether information about services should be - # injected into pod's environment variables, matching the syntax of Docker - # links. - enableServiceLinks: false diff --git a/roles/cloudnativepg/tasks/main.yml b/roles/cloudnativepg/tasks/main.yml index fcf089c4..f86f7ac9 100644 --- a/roles/cloudnativepg/tasks/main.yml +++ b/roles/cloudnativepg/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Get cluster infos kubernetes.core.k8s_cluster_info: register: cluster_infos @@ -29,7 +30,7 @@ - name: Set CloudNativePG helm values ansible.builtin.set_fact: - cnpg_values: "{{ lookup('template', 'cnpg-values.yaml.j2') | from_yaml }}" + cnpg_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" - name: Deploy CloudNativePG helm kubernetes.core.helm: diff --git a/roles/cloudnativepg/templates/cnpg-values.yaml.j2 b/roles/cloudnativepg/templates/cnpg-values.yaml.j2 deleted file mode 100644 index 57cce51f..00000000 --- a/roles/cloudnativepg/templates/cnpg-values.yaml.j2 +++ /dev/null @@ -1,514 +0,0 @@ -# -# Copyright The CloudNativePG Contributors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Default values for CloudNativePG. -# This is a YAML-formatted file. -# Please declare variables to be passed to your templates. - -replicaCount: 1 - -image: - repository: ghcr.io/cloudnative-pg/cloudnative-pg - pullPolicy: IfNotPresent - # -- Overrides the image tag whose default is the chart appVersion. - tag: "" - -imagePullSecrets: [] -nameOverride: "" -fullnameOverride: "" - -crds: - # -- Specifies whether the CRDs should be created when installing the chart. - create: true - -# -- The webhook configuration. -webhook: - port: 9443 - mutating: - create: true - failurePolicy: Fail - validating: - create: true - failurePolicy: Fail - livenessProbe: - initialDelaySeconds: 3 - readinessProbe: - initialDelaySeconds: 3 - -# -- Operator configuration. -config: - # -- Specifies whether the secret should be created. - create: true - # -- The name of the configmap/secret to use. - name: cnpg-controller-manager-config - # -- Specifies whether it should be stored in a secret, instead of a configmap. - secret: false - # -- The content of the configmap/secret, see - # https://cloudnative-pg.io/documentation/current/operator_conf/#available-options - # for all the available options. - data: {} - # INHERITED_ANNOTATIONS: categories - # INHERITED_LABELS: environment, workload, app - # WATCH_NAMESPACE: namespace-a,namespace-b - -# -- Additinal arguments to be added to the operator's args list. -additionalArgs: [] - -serviceAccount: - # -- Specifies whether the service account should be created. - create: true - # -- The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template. - name: "" - -rbac: - # -- Specifies whether ClusterRole and ClusterRoleBinding should be created. - create: true - -# -- Annotations to be added to all other resources. -commonAnnotations: {} -# -- Annotations to be added to the pod. -podAnnotations: {} -# -- Labels to be added to the pod. -podLabels: {} - -# -- Container Security Context. -containerSecurityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: null - runAsGroup: null - capabilities: - drop: - - "ALL" - -# -- Security Context for the whole pod. -podSecurityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - # fsGroup: 2000 - -# -- Priority indicates the importance of a Pod relative to other Pods. -priorityClassName: "" - -service: - type: ClusterIP - # -- DO NOT CHANGE THE SERVICE NAME as it is currently used to generate the certificate - # and can not be configured - name: cnpg-webhook-service - port: 443 - -resources: {} - # If you want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # - # limits: - # cpu: 100m - # memory: 200Mi - # requests: - # cpu: 100m - # memory: 100Mi - -# -- Nodeselector for the operator to be installed. -nodeSelector: {} - -# -- Tolerations for the operator to be installed. -tolerations: [] - -# -- Affinity for the operator to be installed. -affinity: {} - -monitoring: - # -- Specifies whether the monitoring should be enabled. Requires Prometheus Operator CRDs. - podMonitorEnabled: false - -# Default monitoring queries -monitoringQueriesConfigMap: - # -- The name of the default monitoring configmap. - name: cnpg-default-monitoring - # -- A string representation of a YAML defining monitoring queries. - queries: | - backends: - query: | - SELECT sa.datname - , sa.usename - , sa.application_name - , states.state - , COALESCE(sa.count, 0) AS total - , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds - FROM ( VALUES ('active') - , ('idle') - , ('idle in transaction') - , ('idle in transaction (aborted)') - , ('fastpath function call') - , ('disabled') - ) AS states(state) - LEFT JOIN ( - SELECT datname - , state - , usename - , COALESCE(application_name, '') AS application_name - , COUNT(*) - , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs - FROM pg_catalog.pg_stat_activity - GROUP BY datname, state, usename, application_name - ) sa ON states.state = sa.state - WHERE sa.usename IS NOT NULL - metrics: - - datname: - usage: "LABEL" - description: "Name of the database" - - usename: - usage: "LABEL" - description: "Name of the user" - - application_name: - usage: "LABEL" - description: "Name of the application" - - state: - usage: "LABEL" - description: "State of the backend" - - total: - usage: "GAUGE" - description: "Number of backends" - - max_tx_duration_seconds: - usage: "GAUGE" - description: "Maximum duration of a transaction in seconds" - - backends_waiting: - query: | - SELECT count(*) AS total - FROM pg_catalog.pg_locks blocked_locks - JOIN pg_catalog.pg_locks blocking_locks - ON blocking_locks.locktype = blocked_locks.locktype - AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database - AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation - AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page - AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple - AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid - AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid - AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid - AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid - AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid - AND blocking_locks.pid != blocked_locks.pid - JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid - WHERE NOT blocked_locks.granted - metrics: - - total: - usage: "GAUGE" - description: "Total number of backends that are currently waiting on other queries" - - pg_database: - query: | - SELECT datname - , pg_catalog.pg_database_size(datname) AS size_bytes - , pg_catalog.age(datfrozenxid) AS xid_age - , pg_catalog.mxid_age(datminmxid) AS mxid_age - FROM pg_catalog.pg_database - metrics: - - datname: - usage: "LABEL" - description: "Name of the database" - - size_bytes: - usage: "GAUGE" - description: "Disk space used by the database" - - xid_age: - usage: "GAUGE" - description: "Number of transactions from the frozen XID to the current one" - - mxid_age: - usage: "GAUGE" - description: "Number of multiple transactions (Multixact) from the frozen XID to the current one" - - pg_postmaster: - query: | - SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time - FROM pg_catalog.pg_postmaster_start_time() - metrics: - - start_time: - usage: "GAUGE" - description: "Time at which postgres started (based on epoch)" - - pg_replication: - query: "SELECT CASE WHEN NOT pg_catalog.pg_is_in_recovery() - THEN 0 - ELSE GREATEST (0, - EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp()))) - END AS lag, - pg_catalog.pg_is_in_recovery() AS in_recovery, - EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up, - (SELECT count(*) FROM pg_stat_replication) AS streaming_replicas" - metrics: - - lag: - usage: "GAUGE" - description: "Replication lag behind primary in seconds" - - in_recovery: - usage: "GAUGE" - description: "Whether the instance is in recovery" - - is_wal_receiver_up: - usage: "GAUGE" - description: "Whether the instance wal_receiver is up" - - streaming_replicas: - usage: "GAUGE" - description: "Number of streaming replicas connected to the instance" - - pg_replication_slots: - query: | - SELECT slot_name, - slot_type, - database, - active, - pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn) - FROM pg_catalog.pg_replication_slots - WHERE NOT temporary - metrics: - - slot_name: - usage: "LABEL" - description: "Name of the replication slot" - - slot_type: - usage: "LABEL" - description: "Type of the replication slot" - - database: - usage: "LABEL" - description: "Name of the database" - - active: - usage: "GAUGE" - description: "Flag indicating whether the slot is active" - - pg_wal_lsn_diff: - usage: "GAUGE" - description: "Replication lag in bytes" - - pg_stat_archiver: - query: | - SELECT archived_count - , failed_count - , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival - , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure - , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time - , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time - , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn - , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn - , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time - FROM pg_catalog.pg_stat_archiver - metrics: - - archived_count: - usage: "COUNTER" - description: "Number of WAL files that have been successfully archived" - - failed_count: - usage: "COUNTER" - description: "Number of failed attempts for archiving WAL files" - - seconds_since_last_archival: - usage: "GAUGE" - description: "Seconds since the last successful archival operation" - - seconds_since_last_failure: - usage: "GAUGE" - description: "Seconds since the last failed archival operation" - - last_archived_time: - usage: "GAUGE" - description: "Epoch of the last time WAL archiving succeeded" - - last_failed_time: - usage: "GAUGE" - description: "Epoch of the last time WAL archiving failed" - - last_archived_wal_start_lsn: - usage: "GAUGE" - description: "Archived WAL start LSN" - - last_failed_wal_start_lsn: - usage: "GAUGE" - description: "Last failed WAL LSN" - - stats_reset_time: - usage: "GAUGE" - description: "Time at which these statistics were last reset" - - pg_stat_bgwriter: - query: | - SELECT checkpoints_timed - , checkpoints_req - , checkpoint_write_time - , checkpoint_sync_time - , buffers_checkpoint - , buffers_clean - , maxwritten_clean - , buffers_backend - , buffers_backend_fsync - , buffers_alloc - FROM pg_catalog.pg_stat_bgwriter - metrics: - - checkpoints_timed: - usage: "COUNTER" - description: "Number of scheduled checkpoints that have been performed" - - checkpoints_req: - usage: "COUNTER" - description: "Number of requested checkpoints that have been performed" - - checkpoint_write_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds" - - checkpoint_sync_time: - usage: "COUNTER" - description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds" - - buffers_checkpoint: - usage: "COUNTER" - description: "Number of buffers written during checkpoints" - - buffers_clean: - usage: "COUNTER" - description: "Number of buffers written by the background writer" - - maxwritten_clean: - usage: "COUNTER" - description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers" - - buffers_backend: - usage: "COUNTER" - description: "Number of buffers written directly by a backend" - - buffers_backend_fsync: - usage: "COUNTER" - description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)" - - buffers_alloc: - usage: "COUNTER" - description: "Number of buffers allocated" - - pg_stat_database: - query: | - SELECT datname - , xact_commit - , xact_rollback - , blks_read - , blks_hit - , tup_returned - , tup_fetched - , tup_inserted - , tup_updated - , tup_deleted - , conflicts - , temp_files - , temp_bytes - , deadlocks - , blk_read_time - , blk_write_time - FROM pg_catalog.pg_stat_database - metrics: - - datname: - usage: "LABEL" - description: "Name of this database" - - xact_commit: - usage: "COUNTER" - description: "Number of transactions in this database that have been committed" - - xact_rollback: - usage: "COUNTER" - description: "Number of transactions in this database that have been rolled back" - - blks_read: - usage: "COUNTER" - description: "Number of disk blocks read in this database" - - blks_hit: - usage: "COUNTER" - description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)" - - tup_returned: - usage: "COUNTER" - description: "Number of rows returned by queries in this database" - - tup_fetched: - usage: "COUNTER" - description: "Number of rows fetched by queries in this database" - - tup_inserted: - usage: "COUNTER" - description: "Number of rows inserted by queries in this database" - - tup_updated: - usage: "COUNTER" - description: "Number of rows updated by queries in this database" - - tup_deleted: - usage: "COUNTER" - description: "Number of rows deleted by queries in this database" - - conflicts: - usage: "COUNTER" - description: "Number of queries canceled due to conflicts with recovery in this database" - - temp_files: - usage: "COUNTER" - description: "Number of temporary files created by queries in this database" - - temp_bytes: - usage: "COUNTER" - description: "Total amount of data written to temporary files by queries in this database" - - deadlocks: - usage: "COUNTER" - description: "Number of deadlocks detected in this database" - - blk_read_time: - usage: "COUNTER" - description: "Time spent reading data file blocks by backends in this database, in milliseconds" - - blk_write_time: - usage: "COUNTER" - description: "Time spent writing data file blocks by backends in this database, in milliseconds" - - pg_stat_replication: - primary: true - query: | - SELECT usename - , COALESCE(application_name, '') AS application_name - , COALESCE(client_addr::text, '') AS client_addr - , EXTRACT(EPOCH FROM backend_start) AS backend_start - , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes - , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes - , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes - , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds - , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds - FROM pg_catalog.pg_stat_replication - metrics: - - usename: - usage: "LABEL" - description: "Name of the replication user" - - application_name: - usage: "LABEL" - description: "Name of the application" - - client_addr: - usage: "LABEL" - description: "Client IP address" - - backend_start: - usage: "COUNTER" - description: "Time when this process was started" - - backend_xmin_age: - usage: "COUNTER" - description: "The age of this standby's xmin horizon" - - sent_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location sent on this connection" - - write_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location written to disk by this standby server" - - flush_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server" - - replay_diff_bytes: - usage: "GAUGE" - description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server" - - write_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it" - - flush_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it" - - replay_lag_seconds: - usage: "GAUGE" - description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it" - - pg_settings: - query: | - SELECT name, - CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting - FROM pg_catalog.pg_settings - WHERE vartype IN ('integer', 'real', 'bool') - ORDER BY 1 - metrics: - - name: - usage: "LABEL" - description: "Name of the setting" - - setting: - usage: "GAUGE" - description: "Setting value" \ No newline at end of file diff --git a/roles/cloudnativepg/templates/values.yaml.j2 b/roles/cloudnativepg/templates/values.yaml.j2 new file mode 100644 index 00000000..5a02fbec --- /dev/null +++ b/roles/cloudnativepg/templates/values.yaml.j2 @@ -0,0 +1,3 @@ +containerSecurityContext: + runAsUser: null + runAsGroup: null diff --git a/roles/confSyncer/tasks/main.yaml b/roles/confSyncer/tasks/main.yaml index 07d375ea..0b0d767f 100644 --- a/roles/confSyncer/tasks/main.yaml +++ b/roles/confSyncer/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Add helm repo kubernetes.core.helm_repository: name: appscode diff --git a/roles/console-dso-config/tasks/main.yml b/roles/console-dso-config/tasks/main.yml index 65f03e2d..eec4d768 100644 --- a/roles/console-dso-config/tasks/main.yml +++ b/roles/console-dso-config/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Create dso-console namespace kubernetes.core.k8s: name: "{{ dsc.console.namespace }}" diff --git a/roles/console-dso/tasks/main.yaml b/roles/console-dso/tasks/main.yaml index cbedbb91..4fbb6594 100644 --- a/roles/console-dso/tasks/main.yaml +++ b/roles/console-dso/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Apply project kubernetes.core.k8s: template: project.j2 @@ -42,13 +43,23 @@ {{ exposed_ca_pem }} when: dsc.exposedCA.type != 'none' -- name: Set extra env vars +- name: Prepare console helm values ansible.builtin.set_fact: console_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" -- name: Merge with argo user values +- name: Merge with proxy settings + when: dsc.proxy.enabled + block: + - name: Generate proxy values + ansible.builtin.set_fact: + console_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with console proxy values + ansible.builtin.set_fact: + console_values: "{{ console_values | combine(console_proxy_values, recursive=True, list_merge='append') }}" + +- name: Merge with console user values ansible.builtin.set_fact: - console_values: "{{ console_values | combine(dsc.console['values'], recursive=True) | to_yaml }}" + console_values: "{{ console_values | combine(dsc.console['values'], recursive=True, list_merge='append') }}" - name: Apply app kubernetes.core.k8s: diff --git a/roles/console-dso/templates/app.yaml.j2 b/roles/console-dso/templates/app.yaml.j2 index eb1600c2..2988a0ba 100644 --- a/roles/console-dso/templates/app.yaml.j2 +++ b/roles/console-dso/templates/app.yaml.j2 @@ -17,8 +17,8 @@ spec: releaseName: console-pi helm: parameters: [] - values: | -{{ console_values | indent(8, true) }} + values: |- +{{ console_values | to_yaml | indent(8, true) }} syncPolicy: automated: {} syncOptions: diff --git a/roles/console-dso/templates/values-proxy.yaml.j2 b/roles/console-dso/templates/values-proxy.yaml.j2 new file mode 100644 index 00000000..b5964879 --- /dev/null +++ b/roles/console-dso/templates/values-proxy.yaml.j2 @@ -0,0 +1,6 @@ +server: + container: + env: + HTTP_PROXY: {{ dsc.proxy.http_proxy }} + HTTPS_PROXY: {{ dsc.proxy.https_proxy }} + NO_PROXY: {{ dsc.proxy.no_proxy }} diff --git a/roles/console-dso/templates/values.yaml.j2 b/roles/console-dso/templates/values.yaml.j2 index 076ca5f8..d2c437c6 100644 --- a/roles/console-dso/templates/values.yaml.j2 +++ b/roles/console-dso/templates/values.yaml.j2 @@ -23,15 +23,9 @@ keycloak: sessionSecret: {{ session_secret }} domain: "{{ keycloak_domain }}" realm: dso -server: - container: - env: -{% if dsc.proxy.enabled %} - HTTP_PROXY: {{ dsc.proxy.http_proxy }} - HTTPS_PROXY: {{ dsc.proxy.https_proxy }} - NO_PROXY: {{ dsc.proxy.no_proxy }} -{% endif %} + {% if dsc.exposedCA.type != 'none' %} +server: extraCa: name: bundle key: ca.pem diff --git a/roles/gitlab-catalog/tasks/main.yaml b/roles/gitlab-catalog/tasks/main.yaml index 1aeb66de..c86415a8 100644 --- a/roles/gitlab-catalog/tasks/main.yaml +++ b/roles/gitlab-catalog/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Find gitlab token in inventory kubernetes.core.k8s_info: namespace: "{{ dsc.console.namespace }}" @@ -13,10 +14,10 @@ - name: Create Catalog community.general.gitlab_project: - api_url: "https://{{ gitlab_domain }}" + api_url: https://{{ gitlab_domain }} api_token: "{{ gitlab_token }}" validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - import_url: "https://github.com/cloud-pi-native/gitlab-ci-catalog.git" + import_url: https://github.com/cloud-pi-native/gitlab-ci-catalog.git name: Catalog path: catalog group: "{{ dsc.global.projectsRootDir | join('/') }}" @@ -26,7 +27,7 @@ - name: Get destination catalog branches ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ gitlab_domain }}/api/v4/projects/{{ catalog_repo.project.id }}/repository/branches" + url: https://{{ gitlab_domain }}/api/v4/projects/{{ catalog_repo.project.id }}/repository/branches headers: PRIVATE-TOKEN: "{{ gitlab_token }}" register: destination_branches @@ -35,7 +36,7 @@ - name: Get destination catalog tags ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ gitlab_domain }}/api/v4/projects/{{ catalog_repo.project.id }}/repository/tags" + url: https://{{ gitlab_domain }}/api/v4/projects/{{ catalog_repo.project.id }}/repository/tags headers: PRIVATE-TOKEN: "{{ gitlab_token }}" register: destination_tags diff --git a/roles/gitlab-runner/tasks/main.yaml b/roles/gitlab-runner/tasks/main.yaml index 6cae1100..6e6ec167 100644 --- a/roles/gitlab-runner/tasks/main.yaml +++ b/roles/gitlab-runner/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Get Gitlab namespace kubernetes.core.k8s_info: kind: Namespace @@ -6,7 +7,7 @@ - name: Fail if Gitlab namespace is not present ansible.builtin.fail: - msg: "Gitlab ne semble pas avoir été provisionné sur le cluster veuillez l'installer avant" + msg: Gitlab ne semble pas avoir été provisionné sur le cluster veuillez l'installer avant when: gitlab_ns | length == 0 - name: Get dso-config inventory @@ -23,14 +24,14 @@ - name: Initiate a runner in GitLab instance ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ gitlab_domain }}/api/v4/user/runners" + url: https://{{ gitlab_domain }}/api/v4/user/runners method: POST headers: - "PRIVATE-TOKEN": "{{ gitlab_token }}" + PRIVATE-TOKEN: "{{ gitlab_token }}" body: runner_type: instance_type description: dso-runner - platform: null + platform: run_untagged: true body_format: form-urlencoded status_code: [201] @@ -42,13 +43,27 @@ name: gitlab repo_url: https://charts.gitlab.io +- name: Create gitlab-runner role + kubernetes.core.k8s: + template: gitlab-runner-auth.yaml.j2 + - name: Set GitLab Runner helm values ansible.builtin.set_fact: runner_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" -- name: Create gitlab-runner role - kubernetes.core.k8s: - template: gitlab-runner-auth.yaml.j2 +- name: Merge with proxy settings + when: dsc.proxy.enabled + block: + - name: Generate proxy values + ansible.builtin.set_fact: + runner_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with runner proxy values + ansible.builtin.set_fact: + runner_values: "{{ runner_values | combine(runner_proxy_values, recursive=True, list_merge='append') }}" + +- name: Merge with runner user values + ansible.builtin.set_fact: + gitlab_values: "{{ gitlab_values | combine(dsc.gitlab['values'], recursive=True, list_merge='append') }}" - name: Deploy GitLab Runner helm kubernetes.core.helm: diff --git a/roles/gitlab-runner/templates/values-proxy.yaml.j2 b/roles/gitlab-runner/templates/values-proxy.yaml.j2 new file mode 100644 index 00000000..cf5113b6 --- /dev/null +++ b/roles/gitlab-runner/templates/values-proxy.yaml.j2 @@ -0,0 +1,10 @@ +envVars: + +{% if dsc.proxy.enabled %} + - name: HTTP_PROXY + value: "{{ dsc.proxy.http_proxy }}" + - name: HTTPS_PROXY + value: "{{ dsc.proxy.https_proxy }}" + - name: NO_PROXY + value: "{{ dsc.proxy.no_proxy }}" +{% endif %} diff --git a/roles/gitlab-runner/templates/values.yaml.j2 b/roles/gitlab-runner/templates/values.yaml.j2 index f628a7c1..bc8d2b9c 100644 --- a/roles/gitlab-runner/templates/values.yaml.j2 +++ b/roles/gitlab-runner/templates/values.yaml.j2 @@ -1,9 +1,3 @@ -image: - registry: registry.gitlab.com - image: gitlab-org/gitlab-runner - # tag: alpine-v11.6.0 - -imagePullPolicy: IfNotPresent replicas: 1 @@ -14,23 +8,10 @@ gitlabUrl: https://{{ gitlab_domain }}/ runnerToken: "{{ default_runner.json.token }}" -terminationGracePeriodSeconds: 3600 - -## Set the certsSecretName in order to pass custom certficates for GitLab Runner to use -## Provide resource name for a Kubernetes Secret Object in the same namespace, -## this is used to populate the /home/gitlab-runner/.gitlab-runner/certs/ directory -## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates-targeting-the-gitlab-server - -## -#certsSecretName: gitlab-wildcard-tls-chain {% if dsc.exposedCA != 'none' %} certsSecretName: exposed-ca {% endif %} -concurrent: 10 - -checkInterval: 30 - rbac: create: true rules: @@ -48,15 +29,10 @@ rbac: - apiGroups: [""] resources: ["pods/exec"] verbs: ["create", "patch", "delete"] - clusterWideAccess: false serviceAccountName: gitlab-runner podSecurityPolicy: enabled: true - resourceNames: - - gitlab-runner -## Configuration for the Pods that the runner launches for each new job -## runners: # runner configuration, where the multi line strings is evaluated as # template so you can specify helm values inside of it. @@ -69,53 +45,12 @@ runners: namespace = "{{ dsc.gitlab.namespace }}" image = "ubuntu:22.04" - ## Absolute path for an existing runner configuration file - ## Can be used alongside "volumes" and "volumeMounts" to use an external config file - ## Active if runners.config is empty or null - configPath: "" - - ## Which executor should be used - ## - # executor: kubernetes - ## Specify the name for the runner. - ## name: gitlab-runner ## The name of the secret containing runner-token and runner-registration-token secret: gitlab-runner-secret - cache: {} - -## Configure securitycontext for the main container -## ref: https://kubernetes.io/docs/concepts/security/pod-security-standards/ -## -securityContext: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: false - runAsNonRoot: true - privileged: false - capabilities: - drop: ["ALL"] - -## Configure securitycontext valid for the whole pod -## ref: https://kubernetes.io/docs/concepts/security/pod-security-standards/ -## -podSecurityContext: - runAsUser: 100 - fsGroup: 65533 - -## Configure resource requests and limits -## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ -## -resources: {} - # limits: - # memory: 256Mi - # cpu: 200m - # requests: - # memory: 128Mi - # cpu: 100m - ## Configure environment variables that will be present when the registration command runs ## This provides further control over the registration process and the config.toml file ## ref: `gitlab-runner register --help` @@ -124,42 +59,7 @@ resources: {} envVars: - name: RUNNER_EXECUTOR value: kubernetes -{% if dsc.proxy.enabled %} - - name: HTTP_PROXY - value: "{{ dsc.proxy.http_proxy }}" - - name: HTTPS_PROXY - value: "{{ dsc.proxy.https_proxy }}" - - name: NO_PROXY - value: "{{ dsc.proxy.no_proxy }}" -{% endif %} - -## Annotations to be added to deployment -## -deploymentAnnotations: {} - # Example: - # downscaler/uptime: - -## Labels to be added to deployment -## -deploymentLabels: {} - # Example: - # owner.team: - -## Annotations to be added to manager pod -## -podAnnotations: {} - # Example: - # iam.amazonaws.com/role: - -## Labels to be added to manager pod -## -podLabels: {} - # Example: - # owner.team: -## Configure priorityClassName for manager pod. See k8s docs for more info on how pod priority works: -## https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: "" volumeMounts: - name: root-gitlab-runner diff --git a/roles/gitlab/tasks/create-group.yaml b/roles/gitlab/tasks/create-group.yaml index 96874ae9..401684bd 100644 --- a/roles/gitlab/tasks/create-group.yaml +++ b/roles/gitlab/tasks/create-group.yaml @@ -1,16 +1,17 @@ +--- - name: Set group name ansible.builtin.set_fact: name: "{{ dsc.global.projectsRootDir[index | int] }}" -- name: "Create group {{ name }}" +- name: Create group {{ name }} community.general.gitlab_group: - api_url: "https://{{ gitlab_domain }}" + api_url: https://{{ gitlab_domain }} api_token: "{{ gitlab_token }}" validate_certs: "{{ dsc.exposedCA.type == 'none' }}" name: "{{ name }}" parent: "{{ parent }}" - subgroup_creation_level: "owner" - project_creation_level: "developer" + subgroup_creation_level: owner + project_creation_level: developer state: present visibility: internal diff --git a/roles/gitlab/tasks/create-token-7.2.0+.yaml b/roles/gitlab/tasks/create-token-7.2.0+.yaml index 6dfc657d..ac1bde0a 100644 --- a/roles/gitlab/tasks/create-token-7.2.0+.yaml +++ b/roles/gitlab/tasks/create-token-7.2.0+.yaml @@ -1,3 +1,4 @@ +--- - name: Create Token for 7.2.0 chart version and higher kubernetes.core.k8s_exec: pod: "{{ tb_pod }}" @@ -7,4 +8,4 @@ , name: \"ANSIBLE-DSO\" , scopes: [:api, :read_repository, :write_repository, :create_runner] , expires_at: 365.days.from_now).token' | gitlab-rails console" - register: token \ No newline at end of file + register: token diff --git a/roles/gitlab/tasks/create-token-7.2.0-.yaml b/roles/gitlab/tasks/create-token-7.2.0-.yaml index e848c75b..2a0c4cab 100644 --- a/roles/gitlab/tasks/create-token-7.2.0-.yaml +++ b/roles/gitlab/tasks/create-token-7.2.0-.yaml @@ -1,3 +1,4 @@ +--- - name: Create Token for chart versions lower than 7.2.0 kubernetes.core.k8s_exec: pod: "{{ tb_pod }}" @@ -7,4 +8,4 @@ , name: \"ANSIBLE-DSO\" , scopes: [:api, :read_repository, :write_repository] , expires_at: 365.days.from_now).token' | gitlab-rails console" - register: token \ No newline at end of file + register: token diff --git a/roles/gitlab/tasks/main.yaml b/roles/gitlab/tasks/main.yaml index a85e91f2..f5a70410 100644 --- a/roles/gitlab/tasks/main.yaml +++ b/roles/gitlab/tasks/main.yaml @@ -1,6 +1,8 @@ -- ansible.builtin.set_fact: - vault_auth_path: "jwt" - vault_auth_role: "default-ci" +--- +- name: Set some facts + ansible.builtin.set_fact: + vault_auth_path: jwt + vault_auth_role: default-ci npm_file: "{{ lookup('ansible.builtin.template', 'npm_file.j2') }}" mvn_config_file: "{{ lookup('ansible.builtin.template', 'mvn_conf_file.j2') }}" @@ -90,11 +92,21 @@ - name: Prepare Gitlab helm values ansible.builtin.set_fact: - gitlab_values: "{{ lookup('template', 'gitlab-instance.yaml.j2') | from_yaml }}" + gitlab_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" + +- name: Merge with proxy settings + when: dsc.proxy.enabled + block: + - name: Generate proxy values + ansible.builtin.set_fact: + gitlab_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with gitlab proxy values + ansible.builtin.set_fact: + gitlab_values: "{{ gitlab_values | combine(gitlab_proxy_values, recursive=True, list_merge='append') }}" - name: Merge with gitlab user values ansible.builtin.set_fact: - gitlab_values: "{{ gitlab_values | combine(dsc.gitlab['values'], recursive=True) }}" + gitlab_values: "{{ gitlab_values | combine(dsc.gitlab['values'], recursive=True, list_merge='append') }}" - name: Install gitlab instance kubernetes.core.k8s: @@ -135,9 +147,9 @@ when: set_token_inv.skipped is not defined ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ gitlab_domain }}/api/v4/admin/ci/variables" + url: https://{{ gitlab_domain }}/api/v4/admin/ci/variables headers: - "PRIVATE-TOKEN": "{{ gitlab_token }}" + PRIVATE-TOKEN: "{{ gitlab_token }}" register: test_token ignore_errors: true @@ -158,7 +170,8 @@ - name: Create token ansible.builtin.include_tasks: - file: "{{ dsc.gitlab.chartVersion is version('7.2.0', operator='lt', version_type='loose') | ternary('create-token-7.2.0-.yaml', 'create-token-7.2.0+.yaml') }}" + file: "{{ dsc.gitlab.chartVersion is version('7.2.0', operator='lt', version_type='loose') | ternary('create-token-7.2.0-.yaml', 'create-token-7.2.0+.yaml') + }}" - name: Set new gitlab token ansible.builtin.set_fact: @@ -177,19 +190,19 @@ - name: Get settings ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ gitlab_domain }}/api/v4/application/settings" + url: https://{{ gitlab_domain }}/api/v4/application/settings headers: - "PRIVATE-TOKEN": "{{ gitlab_token }}" + PRIVATE-TOKEN: "{{ gitlab_token }}" register: get_settings ignore_errors: true - name: Set some parameters ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ gitlab_domain }}/api/v4/application/settings" + url: https://{{ gitlab_domain }}/api/v4/application/settings method: PUT headers: - "PRIVATE-TOKEN": "{{ gitlab_token }}" + PRIVATE-TOKEN: "{{ gitlab_token }}" body: signup_enabled: false outbound_local_requests_allowlist_raw: "0.0.0.0/0" @@ -208,9 +221,9 @@ ansible.builtin.include_tasks: file: create-group.yaml -- name: "Set or update some CI/CD variables" +- name: Set or update some CI/CD variables community.general.gitlab_group_variable: - api_url: "https://{{ gitlab_domain }}" + api_url: https://{{ gitlab_domain }} api_token: "{{ gitlab_token }}" validate_certs: "{{ dsc.exposedCA.type == 'none' }}" group: "{{ dsc.global.projectsRootDir | join('/') }}" @@ -222,83 +235,83 @@ value: "{{ dsc.global.projectsRootDir | join('/') }}/catalog" - name: PROJECTS_ROOT_DIR value: "{{ dsc.global.projectsRootDir | join('/') }}" - - name: "NEXUS_HOST_URL" - value: "https://{{ nexus_domain }}" - - name: "NEXUS_HOSTNAME" + - name: NEXUS_HOST_URL + value: https://{{ nexus_domain }} + - name: NEXUS_HOSTNAME value: "{{ nexus_domain }}" - - name: "SONAR_HOST_URL" - value: "https://{{ sonar_domain }}" - - name: "VAULT_AUTH_PATH" + - name: SONAR_HOST_URL + value: https://{{ sonar_domain }} + - name: VAULT_AUTH_PATH value: "{{ vault_auth_path }}" - - name: "VAULT_AUTH_ROLE" + - name: VAULT_AUTH_ROLE value: "{{ vault_auth_role }}" - - name: "VAULT_SERVER_URL" - value: "https://{{ vault_domain }}" - - name: "MVN_CONFIG_FILE" - variable_type: "file" + - name: VAULT_SERVER_URL + value: https://{{ vault_domain }} + - name: MVN_CONFIG_FILE + variable_type: file value: "{{ mvn_config_file }}" - - name: "NPM_FILE" - variable_type: "file" + - name: NPM_FILE + variable_type: file value: "{{ npm_file }}" -- name: "Set or update proxy CI/CD variables" +- name: Set or update proxy CI/CD variables community.general.gitlab_group_variable: - api_url: "https://{{ gitlab_domain }}" + api_url: https://{{ gitlab_domain }} api_token: "{{ gitlab_token }}" validate_certs: "{{ dsc.exposedCA.type == 'none' }}" group: "{{ dsc.global.projectsRootDir | join('/') }}" purge: false state: "{{ dsc.proxy.enabled | ternary('present', 'absent') }}" variables: - - name: "HTTP_PROXY" - value: "{{ dsc.proxy.http_proxy | default ('') }}" - - name: "HTTPS_PROXY" - value: "{{ dsc.proxy.https_proxy | default ('') }}" - - name: "NO_PROXY" - value: "{{ dsc.proxy.no_proxy | default ('') }}" - - name: "PROXY_HOST" - value: "{{ dsc.proxy.host | default ('') }}" - - name: "PROXY_PORT" - value: "{{ dsc.proxy.port | default ('') }}" - - name: "http_proxy" - value: "{{ dsc.proxy.http_proxy | default ('') }}" - - name: "https_proxy" - value: "{{ dsc.proxy.https_proxy | default ('') }}" - - name: "no_proxy" - value: "{{ dsc.proxy.no_proxy | default ('') }}" + - name: HTTP_PROXY + value: "{{ dsc.proxy.http_proxy | default('') }}" + - name: HTTPS_PROXY + value: "{{ dsc.proxy.https_proxy | default('') }}" + - name: NO_PROXY + value: "{{ dsc.proxy.no_proxy | default('') }}" + - name: PROXY_HOST + value: "{{ dsc.proxy.host | default('') }}" + - name: PROXY_PORT + value: "{{ dsc.proxy.port | default('') }}" + - name: http_proxy + value: "{{ dsc.proxy.http_proxy | default('') }}" + - name: https_proxy + value: "{{ dsc.proxy.https_proxy | default('') }}" + - name: no_proxy + value: "{{ dsc.proxy.no_proxy | default('') }}" -- name: "Set or update CA_BUNDLE variable" +- name: Set or update CA_BUNDLE variable community.general.gitlab_group_variable: - api_url: "https://{{ gitlab_domain }}" + api_url: https://{{ gitlab_domain }} api_token: "{{ gitlab_token }}" validate_certs: "{{ dsc.exposedCA.type == 'none' }}" group: "{{ dsc.global.projectsRootDir | join('/') }}" purge: false state: "{{ dsc.additionalsCA | ternary('present', 'absent') }}" variables: - - name: "CA_BUNDLE" + - name: CA_BUNDLE value: "{{ [additionals_ca_pem, exposed_ca_pem] | join('\n') }}" variable_type: file -- name: "Set or update insecure args variables" +- name: Set or update insecure args variables community.general.gitlab_group_variable: - api_url: "https://{{ gitlab_domain }}" + api_url: https://{{ gitlab_domain }} api_token: "{{ gitlab_token }}" validate_certs: "{{ dsc.exposedCA.type == 'none' }}" group: "{{ dsc.global.projectsRootDir | join('/') }}" purge: false state: "{{ dsc.gitlab.insecureCI | ternary('present', 'absent') }}" variables: - - name: "EXTRA_KANIKO_ARGS" - value: "--skip-tls-verify" - - name: "EXTRA_GIT_ARGS" - value: "-c http.sslVerify=false" - - name: "EXTRA_VAULT_ARGS" - value: "-tls-skip-verify" + - name: EXTRA_KANIKO_ARGS + value: --skip-tls-verify + - name: EXTRA_GIT_ARGS + value: -c http.sslVerify=false + - name: EXTRA_VAULT_ARGS + value: -tls-skip-verify -- name: "Set or update additional CI/CD variables" +- name: Set or update additional CI/CD variables community.general.gitlab_group_variable: - api_url: "https://{{ gitlab_domain }}" + api_url: https://{{ gitlab_domain }} api_token: "{{ gitlab_token }}" validate_certs: "{{ dsc.exposedCA.type == 'none' }}" group: "{{ dsc.global.projectsRootDir | join('/') }}" diff --git a/roles/gitlab/templates/values-proxy.yaml.j2 b/roles/gitlab/templates/values-proxy.yaml.j2 new file mode 100644 index 00000000..7591414e --- /dev/null +++ b/roles/gitlab/templates/values-proxy.yaml.j2 @@ -0,0 +1,9 @@ + +global: + extraEnv: + http_proxy: {{ dsc.proxy.http_proxy }} + https_proxy: {{ dsc.proxy.https_proxy }} + no_proxy: {{ dsc.proxy.no_proxy }} + HTTP_PROXY: {{ dsc.proxy.http_proxy }} + HTTPS_PROXY: {{ dsc.proxy.https_proxy }} + NO_PROXY: {{ dsc.proxy.no_proxy }} diff --git a/roles/gitlab/templates/gitlab-instance.yaml.j2 b/roles/gitlab/templates/values.yaml.j2 similarity index 88% rename from roles/gitlab/templates/gitlab-instance.yaml.j2 rename to roles/gitlab/templates/values.yaml.j2 index 5d53d0ef..94e29b4d 100644 --- a/roles/gitlab/templates/gitlab-instance.yaml.j2 +++ b/roles/gitlab/templates/values.yaml.j2 @@ -91,15 +91,6 @@ global: {% if dsc.ingress.tls.type == 'tlsSecret' %} secretName: {{ dsc.ingress.tls.tlsSecret.name }} {% endif %} -{% endif %} - extraEnv: -{% if dsc.proxy.enabled %} - http_proxy: {{ dsc.proxy.http_proxy }} - https_proxy: {{ dsc.proxy.https_proxy }} - no_proxy: {{ dsc.proxy.no_proxy }} - HTTP_PROXY: {{ dsc.proxy.http_proxy }} - HTTPS_PROXY: {{ dsc.proxy.https_proxy }} - NO_PROXY: {{ dsc.proxy.no_proxy }} {% endif %} appConfig: defaultProjectsFeatures: diff --git a/roles/harbor/tasks/main.yaml b/roles/harbor/tasks/main.yaml index 3b32d391..43504799 100644 --- a/roles/harbor/tasks/main.yaml +++ b/roles/harbor/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Create harbor Namespace kubernetes.core.k8s: definition: @@ -19,7 +20,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - creationTimestamp: null + creationTimestamp: name: system:openshift:scc:privileged-harbor roleRef: apiGroup: rbac.authorization.k8s.io @@ -39,9 +40,19 @@ ansible.builtin.set_fact: harbor_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" +- name: Merge with proxy settings + when: dsc.proxy.enabled + block: + - name: Generate proxy values + ansible.builtin.set_fact: + harbor_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with harbor proxy values + ansible.builtin.set_fact: + harbor_values: "{{ harbor_values | combine(harbor_proxy_values, recursive=True, list_merge='append') }}" + - name: Merge with harbor user values ansible.builtin.set_fact: - harbor_values: "{{ harbor_values | combine(dsc.harbor['values'], recursive=True) }}" + harbor_values: "{{ harbor_values | combine(dsc.harbor['values'], recursive=True, list_merge='append') }}" - name: Deploy helm kubernetes.core.helm: @@ -85,25 +96,25 @@ - name: Set Harbor config ansible.builtin.set_fact: harbor_config: - "auth_mode": "oidc_auth" - "notification_enable": true - "oidc_admin_group": "/admin" - "oidc_auto_onboard": true - "oidc_client_id": "{{ harbor_secret.resources[0].data.CLIENT_ID | b64decode }}" - "oidc_endpoint": "https://{{ keycloak_domain }}/realms/dso" - "oidc_extra_redirect_parms": "{}" - "oidc_group_filter": "" - "oidc_groups_claim": "groups" - "oidc_name": "keycloak" - "oidc_scope": "openid,generic" - "oidc_user_claim": "email" - "oidc_verify_cert": "{{ dsc.exposedCA.type == 'none' }}" - "project_creation_restriction": "adminonly" - "quota_per_project_enable": true - "read_only": false - "robot_name_prefix": "robot$" - "robot_token_duration": 30 - "self_registration": false + auth_mode: oidc_auth + notification_enable: true + oidc_admin_group: /admin + oidc_auto_onboard: true + oidc_client_id: "{{ harbor_secret.resources[0].data.CLIENT_ID | b64decode }}" + oidc_endpoint: https://{{ keycloak_domain }}/realms/dso + oidc_extra_redirect_parms: "{}" + oidc_group_filter: "" + oidc_groups_claim: groups + oidc_name: keycloak + oidc_scope: openid,generic + oidc_user_claim: email + oidc_verify_cert: "{{ dsc.exposedCA.type == 'none' }}" + project_creation_restriction: adminonly + quota_per_project_enable: true + read_only: false + robot_name_prefix: robot$ + robot_token_duration: 30 + self_registration: false - name: Assert element ansible.builtin.assert: diff --git a/roles/harbor/templates/proxy-values.yaml.j2 b/roles/harbor/templates/proxy-values.yaml.j2 new file mode 100644 index 00000000..20a4da61 --- /dev/null +++ b/roles/harbor/templates/proxy-values.yaml.j2 @@ -0,0 +1,14 @@ +proxy: + httpProxy: {{ dsc.proxy.http_proxy }} + httpsProxy: {{ dsc.proxy.https_proxy }} + noProxy: {{ dsc.proxy.no_proxy }},.local,.internal + components: + - nginx + - portal + - core + - jobservice + - registry + - trivy + - database + - redis + - exporter \ No newline at end of file diff --git a/roles/harbor/templates/values.yaml.j2 b/roles/harbor/templates/values.yaml.j2 index b9e14570..0c515b8b 100644 --- a/roles/harbor/templates/values.yaml.j2 +++ b/roles/harbor/templates/values.yaml.j2 @@ -58,22 +58,6 @@ persistence: trivy: size: 10Gi harborAdminPassword: "{{ dsc.harbor.adminPassword }}" -{% if dsc.proxy.enabled %} -proxy: - httpProxy: {{ dsc.proxy.http_proxy }} - httpsProxy: {{ dsc.proxy.https_proxy }} - noProxy: {{ dsc.proxy.no_proxy }},.local,.internal - components: - - nginx - - portal - - core - - jobservice - - registry - - trivy - - database - - redis - - exporter -{% endif %} nginx: serviceAccountName: harbor-sa image: diff --git a/roles/keycloak/tasks/client.yaml b/roles/keycloak/tasks/client.yaml index 4a8394ac..98ba5c8a 100644 --- a/roles/keycloak/tasks/client.yaml +++ b/roles/keycloak/tasks/client.yaml @@ -1,13 +1,14 @@ +--- - name: Create Keycloak clients community.general.keycloak_client: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - auth_client_id: "admin-cli" - auth_keycloak_url: "https://{{ keycloak_domain }}" - auth_realm: "master" + auth_client_id: admin-cli + auth_keycloak_url: https://{{ keycloak_domain }} + auth_realm: master auth_username: "{{ keycloak_admin }}" auth_password: "{{ keycloak_admin_password }}" - state: "present" - realm: "dso" + state: present + realm: dso clientId: "{{ item.clientId }}" clientAuthenticatorType: "{{ item.clientAuthenticatorType | default(omit) }}" standardFlowEnabled: "{{ item.standardFlowEnabled }}" @@ -37,10 +38,10 @@ apiVersion: v1 kind: Secret data: - CLIENT_ID: "{{ item.clientId|b64encode }}" - CLIENT_SECRET: "{{ kc_clients_secrets.clientsecret_info.value|b64encode }}" + CLIENT_ID: "{{ item.clientId | b64encode }}" + CLIENT_SECRET: "{{ kc_clients_secrets.clientsecret_info.value | b64encode }}" metadata: - name: "keycloak-client-secret-{{ item.clientId }}" + name: keycloak-client-secret-{{ item.clientId }} namespace: "{{ dsc.keycloak.namespace }}" type: Opaque @@ -52,8 +53,8 @@ apiVersion: v1 kind: Secret data: - CLIENT_ID: "{{ item.clientId|b64encode }}" + CLIENT_ID: "{{ item.clientId | b64encode }}" metadata: - name: "keycloak-client-secret-{{ item.clientId }}" + name: keycloak-client-secret-{{ item.clientId }} namespace: "{{ dsc.keycloak.namespace }}" type: Opaque diff --git a/roles/keycloak/tasks/main.yml b/roles/keycloak/tasks/main.yml index 9e8b06b9..fd23e8be 100644 --- a/roles/keycloak/tasks/main.yml +++ b/roles/keycloak/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Create Keycloak namespace kubernetes.core.k8s: name: "{{ dsc.keycloak.namespace }}" @@ -45,7 +46,7 @@ kubernetes.core.k8s_info: namespace: "{{ dsc.keycloak.namespace }}" kind: Secret - name: "keycloak" + name: keycloak register: kc_adm_pass_secret - name: Set Keycloak admin password secret @@ -56,7 +57,7 @@ apiVersion: v1 kind: Secret data: - admin-password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits')|b64encode }}" + admin-password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits') | b64encode }}" metadata: name: keycloak namespace: "{{ dsc.keycloak.namespace }}" @@ -73,7 +74,7 @@ - name: Merge with Keycloak user values ansible.builtin.set_fact: - kc_values: "{{ kc_values | combine(dsc.keycloak['values'], recursive=True) }}" + kc_values: "{{ kc_values | combine(dsc.keycloak['values'], recursive=True, list_merge='append') }}" - name: Deploy helm kubernetes.core.helm: @@ -85,7 +86,7 @@ - name: Wait Keycloak URL ansible.builtin.uri: - url: "https://{{ keycloak_domain }}" + url: https://{{ keycloak_domain }} validate_certs: "{{ dsc.exposedCA.type == 'none' }}" method: GET status_code: [200, 202] @@ -99,13 +100,13 @@ kubernetes.core.k8s_info: namespace: "{{ dsc.keycloak.namespace }}" kind: Secret - name: "keycloak" + name: keycloak register: kc_adm_pass - name: Set Keycloak admin credentials facts ansible.builtin.set_fact: - keycloak_admin_password: "{{ kc_adm_pass.resources[0].data['admin-password']|b64decode }}" - keycloak_admin: "admin" + keycloak_admin_password: "{{ kc_adm_pass.resources[0].data['admin-password'] | b64decode }}" + keycloak_admin: admin - name: Update console inventory kubernetes.core.k8s: @@ -120,12 +121,12 @@ - name: Get Keycloak API token ansible.builtin.uri: - url: "https://{{ keycloak_domain }}/realms/master/protocol/openid-connect/token" + url: https://{{ keycloak_domain }}/realms/master/protocol/openid-connect/token method: POST status_code: [200, 202] validate_certs: "{{ dsc.exposedCA.type == 'none' }}" return_content: true - body: "username={{ keycloak_admin }}&password={{ keycloak_admin_password }}&grant_type=password&client_id=admin-cli" + body: username={{ keycloak_admin }}&password={{ keycloak_admin_password }}&grant_type=password&client_id=admin-cli register: kc_token - name: Set kc_access_token fact @@ -134,14 +135,14 @@ - name: Get keycloak realms from API ansible.builtin.uri: - url: "https://{{ keycloak_domain }}/admin/realms" + url: https://{{ keycloak_domain }}/admin/realms method: GET validate_certs: "{{ dsc.exposedCA.type == 'none' }}" status_code: [200, 202] return_content: true body_format: json headers: - Authorization: "bearer {{ kc_access_token }}" + Authorization: bearer {{ kc_access_token }} register: kc_realms - name: Create dso realm @@ -157,19 +158,19 @@ id: dso realm: dso display_name: Dso Realm - password_policy: "length(8) and lowerCase(1) and upperCase(1) and specialChars(1) and digits(1) and passwordHistory(1) and notUsername() and forceExpiredPasswordChange(365)" + password_policy: length(8) and lowerCase(1) and upperCase(1) and specialChars(1) and digits(1) and passwordHistory(1) and notUsername() and forceExpiredPasswordChange(365) enabled: true - name: Get keycloak dso realm users from API ansible.builtin.uri: - url: "https://{{ keycloak_domain }}/admin/realms/dso/users" + url: https://{{ keycloak_domain }}/admin/realms/dso/users method: GET status_code: [200, 202] return_content: true validate_certs: "{{ dsc.exposedCA.type == 'none' }}" body_format: json headers: - Authorization: "bearer {{ kc_access_token }}" + Authorization: bearer {{ kc_access_token }} register: kc_dso_users - name: Find dso admin secret @@ -180,7 +181,8 @@ register: dso_admin_secret - name: Create dso admin secret and user - when: (dso_admin_secret.resources | length == 0) or (kc_dso_users.json | selectattr('username', 'equalto', dso_admin_secret.resources[0].data.ADMIN_USER|b64decode) | length == 0) + when: (dso_admin_secret.resources | length == 0) or (kc_dso_users.json | selectattr('username', 'equalto', dso_admin_secret.resources[0].data.ADMIN_USER|b64decode) + | length == 0) block: - name: Generate admin user password ansible.builtin.set_fact: @@ -194,8 +196,8 @@ namespace: "{{ dsc.keycloak.namespace }}" name: dso-admin-user-secret data: - ADMIN_USER: "{{ 'admin@example.com'|b64encode }}" - ADMIN_USER_PASSWORD: "{{ admin_user_password|b64encode }}" + ADMIN_USER: "{{ 'admin@example.com' | b64encode }}" + ADMIN_USER_PASSWORD: "{{ admin_user_password | b64encode }}" type: Opaque - name: Remove dso admin user @@ -234,14 +236,14 @@ - name: Get dso keycloak client scopes from API ansible.builtin.uri: - url: "https://{{ keycloak_domain }}/admin/realms/dso/client-scopes" + url: https://{{ keycloak_domain }}/admin/realms/dso/client-scopes method: GET status_code: [200, 202] return_content: true body_format: json validate_certs: "{{ dsc.exposedCA.type == 'none' }}" headers: - Authorization: "bearer {{ kc_access_token }}" + Authorization: bearer {{ kc_access_token }} register: kc_client_scopes - name: Create generic keycloak client scope @@ -264,7 +266,8 @@ consent.screen.text: "" protocolMappers: "{{ lookup('ansible.builtin.file', 'generic-client-scope-protocolMappers.yaml') | from_yaml }}" -- ansible.builtin.include_tasks: +- name: Create clients + ansible.builtin.include_tasks: file: client.yaml with_items: "{{ keycloak_clients }}" diff --git a/roles/keycloak/vars/main.yaml b/roles/keycloak/vars/main.yaml index 706da668..85015540 100644 --- a/roles/keycloak/vars/main.yaml +++ b/roles/keycloak/vars/main.yaml @@ -1,14 +1,14 @@ --- keycloak_clients: - - clientId: "gitlab-client" - clientAuthenticatorType: "client-secret" + - clientId: gitlab-client + clientAuthenticatorType: client-secret standardFlowEnabled: true redirectUris: - - "https://{{ gitlab_domain }}/users/auth/openid_connect/callback" + - https://{{ gitlab_domain }}/users/auth/openid_connect/callback webOrigins: - - "https://{{ gitlab_domain }}" + - https://{{ gitlab_domain }} defaultClientScopes: - - "generic" + - generic publicClient: false - clientId: console-frontend diff --git a/roles/nexus/tasks/main.yaml b/roles/nexus/tasks/main.yaml index 40b8dbf5..743aefda 100644 --- a/roles/nexus/tasks/main.yaml +++ b/roles/nexus/tasks/main.yaml @@ -1,10 +1,11 @@ +--- - name: Create nexus Namespace kubernetes.core.k8s: definition: apiVersion: v1 kind: Namespace metadata: - creationTimestamp: null + creationTimestamp: name: "{{ dsc.nexus.namespace }}" - name: Create nexus Service Account @@ -86,7 +87,7 @@ ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" force_basic_auth: true - url: "https://{{ nexus_domain }}/service/rest/v1/security/users/admin/change-password" + url: https://{{ nexus_domain }}/service/rest/v1/security/users/admin/change-password method: PUT user: admin password: "{{ nexus_admin_password }}" @@ -126,14 +127,14 @@ ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" force_basic_auth: true - url: "https://{{ nexus_domain }}/service/rest/v1/security/anonymous" + url: https://{{ nexus_domain }}/service/rest/v1/security/anonymous method: PUT user: admin password: "{{ nexus_admin_password }}" body: enabled: true - userId: "anonymous" - realmName: "NexusAuthorizingRealm" + userId: anonymous + realmName: NexusAuthorizingRealm body_format: json status_code: [200] @@ -141,7 +142,7 @@ ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" force_basic_auth: true - url: "https://{{ nexus_domain }}/service/rest/v1/security/roles/new-anon" + url: https://{{ nexus_domain }}/service/rest/v1/security/roles/new-anon method: GET user: admin password: "{{ nexus_admin_password }}" @@ -153,14 +154,14 @@ ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" force_basic_auth: true - url: "https://{{ nexus_domain }}/service/rest/v1/security/roles" + url: https://{{ nexus_domain }}/service/rest/v1/security/roles method: POST user: admin password: "{{ nexus_admin_password }}" body: - id: "new-anon" - name: "new-anon" - description: "new-anon" + id: new-anon + name: new-anon + description: new-anon privileges: - nx-repository-view-docker-*-browse - nx-repository-view-docker-*-read @@ -173,7 +174,7 @@ ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" force_basic_auth: true - url: "https://{{ nexus_domain }}/service/rest/v1/security/users?userId=anonymous&source=default" + url: https://{{ nexus_domain }}/service/rest/v1/security/users?userId=anonymous&source=default method: GET user: admin password: "{{ nexus_admin_password }}" @@ -185,17 +186,17 @@ ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" force_basic_auth: true - url: "https://{{ nexus_domain }}/service/rest/v1/security/users/anonymous" + url: https://{{ nexus_domain }}/service/rest/v1/security/users/anonymous method: PUT user: admin password: "{{ nexus_admin_password }}" body: - userId: "anonymous" - firstName: "Anonymous" - lastName: "User" - emailAddress: "anonymous@example.org" - source: "default" - status: "active" + userId: anonymous + firstName: Anonymous + lastName: User + emailAddress: anonymous@example.org + source: default + status: active readOnly: true roles: - new-anon diff --git a/roles/nexus/tasks/manage-pvc.yaml b/roles/nexus/tasks/manage-pvc.yaml index d4cfeb86..752ef55d 100644 --- a/roles/nexus/tasks/manage-pvc.yaml +++ b/roles/nexus/tasks/manage-pvc.yaml @@ -1,3 +1,4 @@ +--- - name: Get PVC info kubernetes.core.k8s_info: namespace: "{{ dsc.nexus.namespace }}" @@ -5,7 +6,6 @@ kind: PersistentVolumeClaim register: nexus_pvc - - name: Manage incorrect pvc size when: (nexus_pvc.resources | length) and (nexus_pvc.resources[0].status.capacity.storage != dsc.nexus.storageSize) block: diff --git a/roles/socle-config/tasks/main.yaml b/roles/socle-config/tasks/main.yaml index 4836a3d0..6cb1f15f 100644 --- a/roles/socle-config/tasks/main.yaml +++ b/roles/socle-config/tasks/main.yaml @@ -1,8 +1,9 @@ +--- - name: Create or update DsoSocleConfig CRD kubernetes.core.k8s: definition: "{{ lookup('ansible.builtin.file', 'crd-conf-dso.yaml') | from_yaml }}" -- name: "Get socle config from conf-dso dsc (default)" +- name: Get socle config from conf-dso dsc (default) kubernetes.core.k8s_info: kind: dsc name: conf-dso @@ -24,16 +25,16 @@ - name: Warning message ansible.builtin.debug: msg: - - "Attention ! Vous avez lancé le playbook d'installation avec l'option '-e dsc_cr={{ dsc_cr }}'" - - "mais la ressource dsc nommée '{{ dsc_cr }}' est vide ou inexistante côté cluster !" + - Attention ! Vous avez lancé le playbook d'installation avec l'option '-e dsc_cr={{ dsc_cr }}' + - mais la ressource dsc nommée '{{ dsc_cr }}' est vide ou inexistante côté cluster ! - "" - - "Si votre intention est bien d'utiliser votre propre configuration plutôt que 'conf-dso', ressource dsc par défaut," - - "alors veuiller déclarer votre resource dsc '{{ dsc_cr }}' dans un fichier YAML" + - Si votre intention est bien d'utiliser votre propre configuration plutôt que 'conf-dso', ressource dsc par défaut, + - alors veuiller déclarer votre resource dsc '{{ dsc_cr }}' dans un fichier YAML - "nommé par exemple 'ma-conf-dso.yaml' et la créer via la commande suivante :" - "" - " kubectl apply -f ma-conf-dso.yaml " - "" - - "Puis relancer l'installation." + - Puis relancer l'installation. - name: Exit playbook ansible.builtin.meta: end_play @@ -57,29 +58,29 @@ - name: Disclaimer ansible.builtin.debug: msg: - - "Il semblerait que vous n'ayez jamais installé le socle sur ce cluster ou que la configuration par défaut ait été supprimée." - - "Veuillez modifier la resource de type dsc par défaut nommée 'conf-dso' et de scope cluster" + - Il semblerait que vous n'ayez jamais installé le socle sur ce cluster ou que la configuration par défaut ait été supprimée. + - Veuillez modifier la resource de type dsc par défaut nommée 'conf-dso' et de scope cluster - "via la commande suivante :" - "" - " kubectl edit dsc conf-dso " - "" - - "Puis relancer l'installation." + - Puis relancer l'installation. - "" - - "Attention ! Si vous relancez l'installation sans modifier la configuration par défaut, celle-ci s'appliquera telle qu'elle." - - "Ce n'est probablement pas ce que vous souhaitez faire." + - Attention ! Si vous relancez l'installation sans modifier la configuration par défaut, celle-ci s'appliquera telle qu'elle. + - Ce n'est probablement pas ce que vous souhaitez faire. - "" - - "Alternativement, vous pouvez aussi déclarer la resource dsc de nom conf-dso (ou tout autre nom à votre convenance)" + - Alternativement, vous pouvez aussi déclarer la resource dsc de nom conf-dso (ou tout autre nom à votre convenance) - "dans un fichier YAML nommé par exemple 'ma-conf-dso.yaml', pour ensuite la créer via la commande suivante :" - "" - " kubectl apply -f ma-conf-dso.yaml " - "" - - "Puis relancer l'installation." + - Puis relancer l'installation. - "" - "Si vous utilisez la resource dsc par défaut nommée conf-dso, vous relancerez le playbook d'installation comme ceci :" - "" - " ansible-playbook install.yaml " - "" - - "Si au contraire vous souhaitez utiliser un nom de resource différent, associé à une configuration différente," + - Si au contraire vous souhaitez utiliser un nom de resource différent, associé à une configuration différente, - "alors vous devrez utiliser la commande suivante pour relancer l'installation (exemple avec une dsc nommée conf-perso) :" - "" - " ansible-playbook install.yaml -e dsc_cr=conf-perso " @@ -95,13 +96,16 @@ ansible.builtin.set_fact: dsc: "{{ socle_config.resources[0] }}" -- ansible.builtin.set_fact: +- name: Set some facts + ansible.builtin.set_fact: dsc_default_config: "{{ lookup('ansible.builtin.file', 'config.yaml') | from_yaml }}" dsc_default_releases: "{{ lookup('ansible.builtin.file', 'releases.yaml') | from_yaml }}" -- ansible.builtin.set_fact: - dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True)}}" -- ansible.builtin.set_fact: +- name: Set some facts + ansible.builtin.set_fact: + dsc: "{{ dsc_default_releases | combine(dsc_default_config, recursive=True) | combine(dsc, recursive=True) }}" +- name: Set some facts + ansible.builtin.set_fact: dsc: "{{ dsc.spec }}" - name: Set root_domain fact diff --git a/roles/sonarqube/filter_plugins/settings_filter.py b/roles/sonarqube/filter_plugins/settings_filter.py deleted file mode 100644 index 4be3220e..00000000 --- a/roles/sonarqube/filter_plugins/settings_filter.py +++ /dev/null @@ -1,37 +0,0 @@ -import urllib.parse - -def settings_filter(to_verify_settings, sonar_settings): - to_update_settings = [] - for setting in sonar_settings: - key=setting['key'] - if (key in to_verify_settings): - if ('value' in setting): - value=setting['value'] - if (setting['value'] != to_verify_settings[key]): - to_update_settings.append({'key': key, 'value': to_verify_settings[key]}) - del to_verify_settings[key] - return to_update_settings - -def to_query_string(queries): - queries_strings = [] - for query in queries: - queries_strings.append('key='+ - urllib.parse.quote_plus(query['key'])+ - '&value='+ - urllib.parse.quote_plus(query['value']) - ) - return queries_strings - -def plugins_includes(sonar_plugins_list, key_to_search): - for plugin in sonar_plugins_list: - if (plugin['key'] == key_to_search): - return 'yes' - return 'no' - -class FilterModule(object): - def filters(self): - return { - 'to_query_string': to_query_string, - 'plugins_includes': plugins_includes, - 'settings_filter': settings_filter, - } \ No newline at end of file diff --git a/roles/sonarqube/tasks/main.yaml b/roles/sonarqube/tasks/main.yaml index 4be72fb6..341aec64 100644 --- a/roles/sonarqube/tasks/main.yaml +++ b/roles/sonarqube/tasks/main.yaml @@ -1,3 +1,4 @@ +--- - name: Create Sonarqube namespace kubernetes.core.k8s: kind: Namespace @@ -49,8 +50,8 @@ apiVersion: v1 kind: Secret data: - password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits')|b64encode }}" - currentPassword: "{{ 'admin'|b64encode }}" + password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits') | b64encode }}" + currentPassword: "{{ 'admin' | b64encode }}" metadata: name: sonarqube namespace: "{{ dsc.sonarqube.namespace }}" @@ -61,7 +62,7 @@ kubernetes.core.k8s_info: namespace: "{{ dsc.sonarqube.namespace }}" kind: Secret - name: "sonar-monitoring-password" + name: sonar-monitoring-password register: sonar_monitoring_secret - name: Set SonarQube monitoring password secret @@ -73,7 +74,7 @@ apiVersion: v1 kind: Secret data: - monitoring-password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits')|b64encode }}" + monitoring-password: "{{ lookup('password', '/dev/null length=24 chars=ascii_letters,digits') | b64encode }}" metadata: name: sonar-monitoring-password namespace: "{{ dsc.sonarqube.namespace }}" @@ -88,7 +89,7 @@ failed_when: sonar_secret.resources | length == 0 - name: Set settings fact - set_fact: + ansible.builtin.set_fact: keycloak_secret: "{{ sonar_secret.resources[0].data.CLIENT_SECRET | b64decode }}" keycloak_client: "{{ sonar_secret.resources[0].data.CLIENT_ID | b64decode }}" @@ -96,9 +97,19 @@ ansible.builtin.set_fact: sonar_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" +- name: Merge with proxy settings + when: dsc.proxy.enabled + block: + - name: Generate proxy values + ansible.builtin.set_fact: + sonar_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with sonar proxy values + ansible.builtin.set_fact: + sonar_values: "{{ sonar_values | combine(sonar_proxy_values, recursive=True, list_merge='append') }}" + - name: Merge with sonarqube user values ansible.builtin.set_fact: - sonar_values: "{{ sonar_values | combine(dsc.sonarqube['values'], recursive=True) }}" + sonar_values: "{{ sonar_values | combine(dsc.sonarqube['values'], recursive=True, list_merge='append') }}" - name: Deploy helm kubernetes.core.helm: @@ -129,125 +140,25 @@ - name: Get SonarQube version ansible.builtin.uri: - url: "https://{{ sonar_domain }}/api/server/version" + url: https://{{ sonar_domain }}/api/server/version method: GET return_content: true register: sonar_version -- name: Reset admin token +- name: Reset Admin Password procedure when: ansible_inventory.resources[0].data.SONAR_API_TOKEN is undefined - block: - - name: Missing Token disclaimer - ansible.builtin.debug: - msg: "Impossible de retrouver le TOKEN du compte admin, initialisation en cours …" - - - name: Get postgres endpoint - kubernetes.core.k8s_info: - kind: Endpoints - namespace: "{{ dsc.sonarqube.namespace }}" - name: pg-cluster-sonar-rw - register: pg_ep - until: pg_ep.resources[0].subsets[0].addresses[0] is defined - retries: 15 - delay: 5 - - - name: Set pg_pod - ansible.builtin.set_fact: - pg_pod: "{{ pg_ep.resources[0].subsets[0].addresses[0].targetRef.name }}" - - - name: Get pg-cluster-sonar-app secret - kubernetes.core.k8s_info: - namespace: "{{ dsc.sonarqube.namespace }}" - kind: Secret - name: pg-cluster-sonar-app - register: pg_admin_secret - - - name: Set pg admin facts - ansible.builtin.set_fact: - pg_admin: "{{ pg_admin_secret.resources[0].data.username|b64decode }}" - pg_admin_pass: "{{ pg_admin_secret.resources[0].data.password|b64decode }}" - no_log: true - - - name: Get admin account - kubernetes.core.k8s_exec: - pod: "{{ pg_pod }}" - namespace: "{{ dsc.sonarqube.namespace }}" - command: psql postgresql://"{{ pg_admin }}":"{{ pg_admin_pass }}"@127.0.0.1:5432/sonardb -c "{{ query }}" -t - vars: - query: select row_to_json(row) from (SELECT * FROM users WHERE login = 'admin') row; - register: admin_account - until: "admin_account is not failed" - retries: 10 - delay: 5 - no_log: true - - - name: Generate random token - ansible.builtin.set_fact: - admin: "{{ admin_account.stdout | from_json }}" - token_pass: "{{ lookup('password', '/dev/null length=32 chars=ascii_letters') }}" - no_log: true - - - name: Get sha384sum of token - ansible.builtin.shell: "set -o pipefail && echo -n {{ token_pass }} | sha384sum | awk '{ print $1 }'" - args: - executable: /bin/bash - register: token_sha - changed_when: true - no_log: true - - - name: Set query fact (for 10.x branch and higher) - when: sonar_version.content is version('10.0.0', operator='ge', version_type='loose') - ansible.builtin.set_fact: - query: > - INSERT INTO user_tokens - ("uuid", "user_uuid", "name", "token_hash", last_connection_date, created_at, "type", expiration_date, "project_uuid") - VALUES('mysuperuuid', '{{ admin.uuid }}', 'DSO', '{{ token_sha.stdout_lines[0] }}', null, 0, 'USER_TOKEN', null, '') - ON CONFLICT(uuid) DO UPDATE SET token_hash = '{{ token_sha.stdout_lines[0] }}'; - - - name: Set query fact (for 9.x branch and lower) - when: sonar_version.content is version('10.0.0', operator='lt', version_type='loose') - ansible.builtin.set_fact: - query: > - INSERT INTO user_tokens - (uuid, user_uuid, "name", token_hash, last_connection_date, created_at, project_key, "type", expiration_date) - VALUES('mysuperuuid', '{{ admin.uuid }}', 'DSO', '{{ token_sha.stdout_lines[0] }}', null, 0, '', 'USER_TOKEN', null) - ON CONFLICT(uuid) DO UPDATE SET token_hash = '{{ token_sha.stdout_lines[0] }}'; - - - name: Adding admin token - kubernetes.core.k8s_exec: - pod: "{{ pg_pod }}" - namespace: "{{ dsc.sonarqube.namespace }}" - command: psql postgresql://"{{ pg_admin }}":"{{ pg_admin_pass }}"@127.0.0.1:5432/sonardb -c "{{ query }}" -t - register: admin_token_check - until: "admin_token_check is not failed" - retries: 5 - delay: 5 - no_log: true - - - name: Update inventory - kubernetes.core.k8s: - kind: ConfigMap - name: dso-config - namespace: "{{ dsc.console.namespace }}" - state: patched - definition: - data: - SONAR_API_TOKEN: "{{ token_pass }}" - no_log: true - -# - name: Reset password disclaimer -# ansible.builtin.debug: -# msg: /!\ PLEASE ENSURE YOU RESET ADMIN PASSWORD /!\ + ansible.builtin.include_tasks: + file: reset-admin-password.yaml - name: Set fact sonar token when: ansible_inventory.resources[0].data.SONAR_API_TOKEN is defined - ansible.builtin.set_fact: + ansible.builtin.set_fact: token_pass: "{{ ansible_inventory.resources[0].data.SONAR_API_TOKEN }}" no_log: true - name: Remove permissions for sonar-users ansible.builtin.uri: - url: "https://{{ sonar_domain }}/api/permissions/remove_group?groupName=sonar-users&permission={{ item }}" + url: https://{{ sonar_domain }}/api/permissions/remove_group?groupName=sonar-users&permission={{ item }} user: "{{ token_pass }}" force_basic_auth: true validate_certs: "{{ dsc.exposedCA.type == 'none' }}" @@ -255,4 +166,4 @@ status_code: 204 with_items: - scan - - provisioning \ No newline at end of file + - provisioning diff --git a/roles/sonarqube/tasks/reset-admin-password.yaml b/roles/sonarqube/tasks/reset-admin-password.yaml new file mode 100644 index 00000000..150b4e6a --- /dev/null +++ b/roles/sonarqube/tasks/reset-admin-password.yaml @@ -0,0 +1,98 @@ +--- +- name: Missing Token disclaimer + ansible.builtin.debug: + msg: Impossible de retrouver le TOKEN du compte admin, initialisation en cours … + +- name: Get postgres endpoint + kubernetes.core.k8s_info: + kind: Endpoints + namespace: "{{ dsc.sonarqube.namespace }}" + name: pg-cluster-sonar-rw + register: pg_ep + until: pg_ep.resources[0].subsets[0].addresses[0] is defined + retries: 15 + delay: 5 + +- name: Set pg_pod + ansible.builtin.set_fact: + pg_pod: "{{ pg_ep.resources[0].subsets[0].addresses[0].targetRef.name }}" + +- name: Get pg-cluster-sonar-app secret + kubernetes.core.k8s_info: + namespace: "{{ dsc.sonarqube.namespace }}" + kind: Secret + name: pg-cluster-sonar-app + register: pg_admin_secret + +- name: Set pg admin facts + ansible.builtin.set_fact: + pg_admin: "{{ pg_admin_secret.resources[0].data.username | b64decode }}" + pg_admin_pass: "{{ pg_admin_secret.resources[0].data.password | b64decode }}" + no_log: true + +- name: Get admin account + kubernetes.core.k8s_exec: + pod: "{{ pg_pod }}" + namespace: "{{ dsc.sonarqube.namespace }}" + command: psql postgresql://"{{ pg_admin }}":"{{ pg_admin_pass }}"@127.0.0.1:5432/sonardb -c "{{ query }}" -t + vars: + query: select row_to_json(row) from (SELECT * FROM users WHERE login = 'admin') row; + register: admin_account + until: admin_account is not failed + retries: 10 + delay: 5 + no_log: true + +- name: Generate random token + ansible.builtin.set_fact: + admin: "{{ admin_account.stdout | from_json }}" + token_pass: "{{ lookup('password', '/dev/null length=32 chars=ascii_letters') }}" + no_log: true + +- name: Get sha384sum of token + ansible.builtin.shell: set -o pipefail && echo -n {{ token_pass }} | sha384sum | awk '{ print $1 }' + args: + executable: /bin/bash + register: token_sha + changed_when: true + no_log: true + +- name: Set query fact (for 10.x branch and higher) + when: sonar_version.content is version('10.0.0', operator='ge', version_type='loose') + ansible.builtin.set_fact: + query: > + INSERT INTO user_tokens + ("uuid", "user_uuid", "name", "token_hash", last_connection_date, created_at, "type", expiration_date, "project_uuid") + VALUES('mysuperuuid', '{{ admin.uuid }}', 'DSO', '{{ token_sha.stdout_lines[0] }}', null, 0, 'USER_TOKEN', null, '') + ON CONFLICT(uuid) DO UPDATE SET token_hash = '{{ token_sha.stdout_lines[0] }}'; + +- name: Set query fact (for 9.x branch and lower) + when: sonar_version.content is version('10.0.0', operator='lt', version_type='loose') + ansible.builtin.set_fact: + query: > + INSERT INTO user_tokens + (uuid, user_uuid, "name", token_hash, last_connection_date, created_at, project_key, "type", expiration_date) + VALUES('mysuperuuid', '{{ admin.uuid }}', 'DSO', '{{ token_sha.stdout_lines[0] }}', null, 0, '', 'USER_TOKEN', null) + ON CONFLICT(uuid) DO UPDATE SET token_hash = '{{ token_sha.stdout_lines[0] }}'; + +- name: Adding admin token + kubernetes.core.k8s_exec: + pod: "{{ pg_pod }}" + namespace: "{{ dsc.sonarqube.namespace }}" + command: psql postgresql://"{{ pg_admin }}":"{{ pg_admin_pass }}"@127.0.0.1:5432/sonardb -c "{{ query }}" -t + register: admin_token_check + until: admin_token_check is not failed + retries: 5 + delay: 5 + no_log: true + +- name: Update inventory + kubernetes.core.k8s: + kind: ConfigMap + name: dso-config + namespace: "{{ dsc.console.namespace }}" + state: patched + definition: + data: + SONAR_API_TOKEN: "{{ token_pass }}" + no_log: true diff --git a/roles/sonarqube/tasks/setup.yaml b/roles/sonarqube/tasks/setup.yaml deleted file mode 100644 index 0eaec801..00000000 --- a/roles/sonarqube/tasks/setup.yaml +++ /dev/null @@ -1,98 +0,0 @@ -- name: Remove permissions for sonar-users - ansible.builtin.uri: - url: "https://{{ sonar_domain }}/api/permissions/remove_group?groupName=sonar-users&permission={{ item }}" - user: "{{ token_pass }}" - force_basic_auth: true - validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - method: post - status_code: 204 - with_items: - - scan - - provisioning - -- name: Get Sonar client secret - kubernetes.core.k8s_info: - kind: Secret - namespace: "{{ dsc.keycloak.namespace }}" - name: keycloak-client-secret-sonar-client - register: sonar_secret - failed_when: sonar_secret.resources | length == 0 - -- name: Set settings fact - set_fact: - keycloak_secret: "{{ sonar_secret.resources[0].data.CLIENT_SECRET | b64decode }}" - keycloak_client: "{{ sonar_secret.resources[0].data.CLIENT_ID | b64decode }}" - sonar_settings: - sonar.core.serverBaseURL: https://{{ sonar_domain }} - sonar.plugins.risk.consent: "ACCEPTED" - -- name: Get settings - ansible.builtin.uri: - url: "https://{{ sonar_domain }}/api/settings/values" - return_content: true - user: "{{ token_pass }}" - validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - force_basic_auth: true - register: current_sonar_settings - -- name: Filter need update setting - set_fact: - to_update_settings: "{{ sonar_settings | settings_filter(current_sonar_settings.json.settings) | to_query_string }}" - -- name: Set invalid settings - ansible.builtin.uri: - url: "https://{{ sonar_domain }}/api/settings/set?{{ item }}" - user: "{{ token_pass }}" - force_basic_auth: true - method: POST - validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - status_code: 204 - with_items: "{{ to_update_settings }}" - -# now install plugin -- name: Install oidc plugin - ansible.builtin.include_tasks: - file: plugin.yaml - -- name: Get settings - ansible.builtin.uri: - url: "https://{{ sonar_domain }}/api/settings/values" - return_content: true - user: "{{ token_pass }}" - validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - force_basic_auth: true - register: current_sonar_settings - -- name: Set settings fact - set_fact: - oidc_settings: - sonar.auth.oidc.enabled: "true" - sonar.auth.oidc.allowUsersToSignUp: "true" - sonar.auth.oidc.autoLogin: "false" - # sonar.auth.oidc.loginButtonText: "ConnectwithKeycloak" - sonar.auth.oidc.groupsSync.claimName: "groups" - sonar.auth.oidc.loginStrategy: "Email" - sonar.auth.oidc.groupsSync: "true" - sonar.auth.oidc.scopes: "openid generic" - sonar.auth.oidc.issuerUri: "https://{{ keycloak_domain }}/realms/dso" - # TODO upn par défaut, voir si besoin de changer - sonar.auth.oidc.loginStrategy.customClaim.name: "upn" - oidc_settings_secured: - - key: sonar.auth.oidc.clientId.secured - value: "{{ keycloak_client }}" - - key: sonar.auth.oidc.clientSecret.secured - value: "{{ keycloak_secret }}" - -- name: Filter oidc need update setting - set_fact: - to_update_settings: "{{ (oidc_settings | settings_filter(current_sonar_settings.json.settings) | to_query_string) + (oidc_settings_secured | to_query_string) }}" - -- name: Set invalid oidc settings - ansible.builtin.uri: - url: "https://{{ sonar_domain }}/api/settings/set?{{ item }}" - user: "{{ token_pass }}" - validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - force_basic_auth: true - method: POST - status_code: 204 - with_items: "{{ to_update_settings }}" diff --git a/roles/sonarqube/templates/proxy-values.yaml.j2 b/roles/sonarqube/templates/proxy-values.yaml.j2 new file mode 100644 index 00000000..327d0bfb --- /dev/null +++ b/roles/sonarqube/templates/proxy-values.yaml.j2 @@ -0,0 +1,12 @@ +plugins: + httpProxy: "{{ dsc.proxy.http_proxy }}" + httpsProxy: "{{ dsc.proxy.https_proxy }}" + noProxy: "{{ dsc.proxy.no_proxy }}" + +env: + - name: http_proxy + value: "{{ dsc.proxy.http_proxy }}" + - name: https_proxy + value: "{{ dsc.proxy.https_proxy }}" + - name: no_proxy + value: "{{ dsc.proxy.no_proxy }}" diff --git a/roles/sonarqube/templates/values.yaml.j2 b/roles/sonarqube/templates/values.yaml.j2 index a9786e05..64668a1e 100644 --- a/roles/sonarqube/templates/values.yaml.j2 +++ b/roles/sonarqube/templates/values.yaml.j2 @@ -28,11 +28,6 @@ plugins: install: - https://github.com/vaulttec/sonar-auth-oidc/releases/download/v2.1.1/sonar-auth-oidc-plugin-2.1.1.jar - https://github.com/mc1arke/sonarqube-community-branch-plugin/releases/download/1.14.0/sonarqube-community-branch-plugin-1.14.0.jar -{% if dsc.proxy.enabled %} - httpProxy: "{{ dsc.proxy.http_proxy }}" - httpsProxy: "{{ dsc.proxy.https_proxy }}" - noProxy: "{{ dsc.proxy.no_proxy }}" -{% endif %} jvmOpts: "-javaagent:/opt/sonarqube/extensions/plugins/sonarqube-community-branch-plugin-1.14.0.jar=web" jvmCeOpts: "-javaagent:/opt/sonarqube/extensions/plugins/sonarqube-community-branch-plugin-1.14.0.jar=ce" @@ -73,16 +68,6 @@ initFs: monitoringPasscodeSecretName: "sonar-monitoring-password" monitoringPasscodeSecretKey: "monitoring-password" -env: -{% if dsc.proxy.enabled %} - - name: http_proxy - value: "{{ dsc.proxy.http_proxy }}" - - name: https_proxy - value: "{{ dsc.proxy.https_proxy }}" - - name: no_proxy - value: "{{ dsc.proxy.no_proxy }}" -{% endif %} - ## Override JDBC values ## for external Databases jdbcOverwrite: diff --git a/roles/sops/tasks/main.yml b/roles/sops/tasks/main.yml index 1e49aae1..4a6944a2 100644 --- a/roles/sops/tasks/main.yml +++ b/roles/sops/tasks/main.yml @@ -43,7 +43,7 @@ - name: Merge with SOPS user values ansible.builtin.set_fact: - sops_values: "{{ sops_values | combine(dsc.sops['values'], recursive=True) }}" + sops_values: "{{ sops_values | combine(dsc.sops['values'], recursive=True, list_merge='append') }}" - name: Deploy helm kubernetes.core.helm: diff --git a/roles/vault/tasks/check.yml b/roles/vault/tasks/check.yml index 05e958c3..12af53c8 100644 --- a/roles/vault/tasks/check.yml +++ b/roles/vault/tasks/check.yml @@ -1,7 +1,8 @@ +--- - name: Get vault health ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/health?sealedcode=200&uninitcode=200" + url: https://{{ vault_domain }}/v1/sys/health?sealedcode=200&uninitcode=200 status_code: [200, 503] register: vault_health retries: 12 @@ -20,12 +21,12 @@ ansible.builtin.set_fact: vault_status: sealed when: vault_health.json.sealed - + - name: Set vault_status to "not init" ansible.builtin.set_fact: vault_status: not init when: not vault_health.json.initialized - + - name: Set vault_status to OK ansible.builtin.set_fact: vault_status: OK diff --git a/roles/vault/tasks/main.yml b/roles/vault/tasks/main.yml index deda7e71..0fde5575 100644 --- a/roles/vault/tasks/main.yml +++ b/roles/vault/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: Create Vault namespace kubernetes.core.k8s: state: present @@ -10,55 +11,23 @@ name: hashicorp repo_url: https://helm.releases.hashicorp.com -- name: Set values +- name: Set Vault helm values ansible.builtin.set_fact: - values: - global: - openshift: true - injector: - image: - repository: docker.io/hashicorp/vault-k8s - agentImage: - repository: docker.io/hashicorp/vault - server: - route: - enabled: false - ha: - enabled: false - standalone: - enabled: true - auditStorage: - enable: true - dataStorage: - enable: true - size: 23Gi - image: - repository: docker.io/hashicorp/vault - csi: - image: - repository: docker.io/hashicorp/vault-csi-provider - agent: - image: - repository: docker.io/hashicorp/vault + vault_values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" -- name: Set proxy_fact - ansible.builtin.set_fact: - proxy_fact: - server: - extraEnvironmentVars: - HTTP_PROXY: "{{ dsc.proxy.http_proxy }}" - HTTPS_PROXY: "{{ dsc.proxy.https_proxy }}" - NO_PROXY: "{{ dsc.proxy.no_proxy }}" - when: dsc.proxy.enabled - -- name: Set extraEnv +- name: Merge with proxy settings when: dsc.proxy.enabled - ansible.builtin.set_fact: - values: "{{ values | combine(proxy_fact, recursive=True) }}" + block: + - name: Generate proxy values + ansible.builtin.set_fact: + vault_proxy_values: "{{ lookup('template', 'proxy-values.yaml.j2') | from_yaml }}" + - name: Merge with vault proxy values + ansible.builtin.set_fact: + vault_values: "{{ vault_values | combine(vault_proxy_values, recursive=True, list_merge='append') }}" -- name: Set Vault values +- name: Merge with Vault user values ansible.builtin.set_fact: - values: "{{ values | combine(dsc.vault['values'], recursive=True) }}" + vault_values: "{{ vault_values | combine(dsc.vault['values'], recursive=True, list_merge='append') }}" - name: Deploy helm kubernetes.core.helm: @@ -67,7 +36,7 @@ chart_version: "{{ dsc.vault.chartVersion }}" release_namespace: "{{ dsc.vault.namespace }}" create_namespace: true - values: "{{ values }}" + values: "{{ vault_values }}" - name: Create route and certs kubernetes.core.k8s: @@ -97,7 +66,7 @@ - name: Wait Vault URL ansible.builtin.uri: - url: "https://{{ vault_domain }}" + url: https://{{ vault_domain }} method: GET validate_certs: "{{ dsc.exposedCA.type == 'none' }}" status_code: [200, 202] @@ -116,9 +85,9 @@ that: - ((vault_status in ['sealed', 'OK']) and (vault_keys.resources | length > 0)) or ((vault_status == 'not init') and (vault_keys.resources | length == 0)) fail_msg: - - "Attention ! Soit le vault n'est pas initialisé mais vous avez un secret vault-keys dans {{ dsc.vault.namespace }}" - - "Veuillez le suppripmer et relancer si vous souhaitez lancer une initialisation" - - "Soit le vault est initialisé mais vous n'avez pas de secret vault-keys dans {{ dsc.vault.namespace }}, et c'est inquiétant !" + - Attention ! Soit le vault n'est pas initialisé mais vous avez un secret vault-keys dans {{ dsc.vault.namespace }} + - Veuillez le suppripmer et relancer si vous souhaitez lancer une initialisation + - Soit le vault est initialisé mais vous n'avez pas de secret vault-keys dans {{ dsc.vault.namespace }}, et c'est inquiétant ! - name: Init vault kubernetes.core.k8s_exec: @@ -128,7 +97,7 @@ command: vault operator init -key-shares=3 -key-threshold=2 when: vault_status == 'not init' register: init - until: "init is not failed" + until: init is not failed retries: 1 delay: 10 @@ -186,17 +155,18 @@ kind: Pod name: "{{ dsc_name }}-vault-0" register: vault_pod - until: (vault_pod.resources[0].status.conditions is defined) and (vault_pod.resources[0].status.conditions | selectattr('status', 'equalto', 'True') | selectattr('type', 'equalto', 'Ready')) + until: (vault_pod.resources[0].status.conditions is defined) and (vault_pod.resources[0].status.conditions | selectattr('status', 'equalto', 'True') | selectattr('type', + 'equalto', 'Ready')) retries: 15 delay: 5 - name: "Config: get auth method" ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/auth/jwt" + url: https://{{ vault_domain }}/v1/sys/auth/jwt status_code: [200, 400] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" register: jwt_state retries: 5 @@ -204,11 +174,11 @@ when: jwt_state.status == 400 ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/auth/jwt" + url: https://{{ vault_domain }}/v1/sys/auth/jwt method: POST status_code: [204] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" body: type: jwt description: Login for Gitlab-ci @@ -217,11 +187,11 @@ - name: "Config: add role default-ci" ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/auth/jwt/role/default-ci" + url: https://{{ vault_domain }}/v1/auth/jwt/role/default-ci method: POST status_code: [204] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" Content-Type: application/json body: role_type: jwt @@ -242,13 +212,13 @@ - name: "Config: set jwt config" ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/auth/jwt/config" + url: https://{{ vault_domain }}/v1/auth/jwt/config method: POST status_code: [204] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" body: - oidc_discovery_url: "https://{{ gitlab_domain }}" + oidc_discovery_url: https://{{ gitlab_domain }} oidc_discovery_ca_pem: "{{ exposed_ca_pem }}" default_role: default-ci namespace_in_state: false @@ -257,20 +227,20 @@ - name: "Config: get accessor jwt config" ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/auth/jwt" + url: https://{{ vault_domain }}/v1/sys/auth/jwt status_code: [200] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" register: get_accessor - name: "Config: create policy" ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/policy/default-ci" + url: https://{{ vault_domain }}/v1/sys/policy/default-ci method: POST status_code: [204] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" body: policy: | path "forge-dso/+/{{ '{{' }}identity.entity.aliases.{{ get_accessor.json.accessor }}.metadata.namespace_path{{ '}}' }}/*" { @@ -284,21 +254,21 @@ - name: "Config: get kv engines" ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/mounts/forge-dso" + url: https://{{ vault_domain }}/v1/sys/mounts/forge-dso status_code: [200, 400] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" register: get_engines - name: "Config: create kv engine" when: get_engines.status == 400 ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/mounts/forge-dso" + url: https://{{ vault_domain }}/v1/sys/mounts/forge-dso method: POST status_code: [204] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" body: type: kv options: diff --git a/roles/vault/tasks/unseal.yml b/roles/vault/tasks/unseal.yml index 36c4697c..ad9e0844 100644 --- a/roles/vault/tasks/unseal.yml +++ b/roles/vault/tasks/unseal.yml @@ -1,3 +1,4 @@ +--- - name: Set seal count ansible.builtin.set_fact: num: "{{ num | int + 1 }}" @@ -5,11 +6,11 @@ - name: Unseal Vault ansible.builtin.uri: validate_certs: "{{ dsc.exposedCA.type == 'none' }}" - url: "https://{{ vault_domain }}/v1/sys/unseal" + url: https://{{ vault_domain }}/v1/sys/unseal method: POST status_code: [200] headers: - "X-Vault-Token": "{{ root_token }}" + X-Vault-Token: "{{ root_token }}" body: key: "{{ vault_keys.resources[0].data['key' + (num)] | b64decode }}" body_format: json diff --git a/roles/vault/templates/values-proxy.yaml.j2 b/roles/vault/templates/values-proxy.yaml.j2 new file mode 100644 index 00000000..435fd740 --- /dev/null +++ b/roles/vault/templates/values-proxy.yaml.j2 @@ -0,0 +1,5 @@ +server: + extraEnvironmentVars: + HTTP_PROXY: "{{ dsc.proxy.http_proxy }}" + HTTPS_PROXY: "{{ dsc.proxy.https_proxy }}" + NO_PROXY: "{{ dsc.proxy.no_proxy }}" \ No newline at end of file diff --git a/roles/vault/templates/values.yaml.j2 b/roles/vault/templates/values.yaml.j2 new file mode 100644 index 00000000..863ecfc4 --- /dev/null +++ b/roles/vault/templates/values.yaml.j2 @@ -0,0 +1,27 @@ +global: + openshift: true +injector: + image: + repository: docker.io/hashicorp/vault-k8s + agentImage: + repository: docker.io/hashicorp/vault +server: + route: + enabled: false + ha: + enabled: false + standalone: + enabled: true + auditStorage: + enable: true + dataStorage: + enable: true + size: 23Gi + image: + repository: docker.io/hashicorp/vault +csi: + image: + repository: docker.io/hashicorp/vault-csi-provider + agent: + image: + repository: docker.io/hashicorp/vault \ No newline at end of file