diff --git a/kubernetes/apps/demo/nginx-deployment.yml b/kubernetes/apps/demo/nginx-deployment.yml index b3ba99ab..bb111af9 100644 --- a/kubernetes/apps/demo/nginx-deployment.yml +++ b/kubernetes/apps/demo/nginx-deployment.yml @@ -29,7 +29,7 @@ spec: --- apiVersion: v1 -kind: Service +kind: Service metadata: name: nginx namespace: nginx @@ -48,7 +48,7 @@ metadata: name: nginx-ingress namespace: nginx annotations: - cert-manager.io/cluster-issuer: letsencrypt-prod + cert-manager.io/cluster-issuer: letsencrypt-http ingress.kubernetes.io/force-ssl-redirect: "true" kubernetes.io/tls-acme: "true" spec: @@ -56,7 +56,7 @@ spec: tls: - hosts: - nginx.ninebasetwo.xyz - secretName: nginx-tls + secretName: aciacfia-tls rules: - host: nginx.ninebasetwo.xyz http: @@ -67,4 +67,22 @@ spec: service: name: nginx port: - number: 80 \ No newline at end of file + number: 80 + +# --- +# apiVersion: gateway.networking.k8s.io/v1beta1 +# kind: HTTPRoute +# metadata: +# name: nginx-http-route +# namespace: nginx +# spec: +# parentRefs: +# - name: gateway-gke-l7-rilb +# rules: +# - matches: +# - path: +# type: PathPrefix +# value: "/" +# backendRefs: +# - name: nginx +# port: 80 \ No newline at end of file diff --git a/kubernetes/apps/nachet/nachet-deployment.yml b/kubernetes/apps/nachet/nachet-deployment.yml index e69de29b..30ad32f2 100644 --- a/kubernetes/apps/nachet/nachet-deployment.yml +++ b/kubernetes/apps/nachet/nachet-deployment.yml @@ -0,0 +1,70 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: nachet + labels: + name: nachet + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nachet-deployment + namespace: nachet +spec: + replicas: 2 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.14.2 + ports: + - containerPort: 80 + +--- +apiVersion: v1 +kind: Service +metadata: + name: nachet + namespace: nachet +spec: + clusterIP: None + selector: + app: nginx + ports: + - protocol: TCP + port: 80 + +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: nachet-ingress + namespace: nachet + annotations: + cert-manager.io/cluster-issuer: letsencrypt-http + ingress.kubernetes.io/force-ssl-redirect: "true" + kubernetes.io/tls-acme: "true" +spec: + ingressClassName: nginx + tls: + - hosts: + - nachet.ninebasetwo.xyz + secretName: aciacfia-tls + rules: + - host: nachet.ninebasetwo.xyz + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: nginx + port: + number: 80 diff --git a/kubernetes/system/cert-manager/issuer.yml b/kubernetes/system/cert-manager/issuer.yml index 48025307..7154c1e7 100644 --- a/kubernetes/system/cert-manager/issuer.yml +++ b/kubernetes/system/cert-manager/issuer.yml @@ -1,11 +1,11 @@ apiVersion: cert-manager.io/v1 kind: ClusterIssuer metadata: - name: letsencrypt-prod + name: letsencrypt-http spec: acme: server: https://acme-v02.api.letsencrypt.org/directory - email: thomas.cardin@inspection.gc.ca + email: tomcardin@outlook.com privateKeySecretRef: name: letsencrypt-private-key solvers: diff --git a/kubernetes/system/vault/namespace.yml b/kubernetes/system/vault/namespace.yml new file mode 100644 index 00000000..e5d2f58d --- /dev/null +++ b/kubernetes/system/vault/namespace.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: vault + labels: + name: vault + \ No newline at end of file diff --git a/kubernetes/system/vault/vault.yml b/kubernetes/system/vault/vault.yml new file mode 100644 index 00000000..6f53a230 --- /dev/null +++ b/kubernetes/system/vault/vault.yml @@ -0,0 +1,1204 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# Available parameters and their default values for the Vault chart. + +global: + # enabled is the master enabled switch. Setting this to true or false + # will enable or disable all the components within this chart by default. + enabled: true + + # The namespace to deploy to. Defaults to the `helm` installation namespace. + namespace: "vault" + + # Image pull secret to use for registry authentication. + # Alternatively, the value may be specified as an array of strings. + imagePullSecrets: [] + # imagePullSecrets: + # - name: image-pull-secret + + # TLS for end-to-end encrypted transport + tlsDisable: false + + # External vault server address for the injector and CSI provider to use. + # Setting this will disable deployment of a vault server. + externalVaultAddr: "" + + # If deploying to OpenShift + openshift: false + + # Create PodSecurityPolicy for pods + psp: + enable: false + # Annotation for PodSecurityPolicy. + # This is a multi-line templated string map, and can also be set as YAML. + annotations: | + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default,runtime/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default + + serverTelemetry: + # Enable integration with the Prometheus Operator + # See the top level serverTelemetry section below before enabling this feature. + prometheusOperator: false + +injector: + # True if you want to enable vault agent injection. + # @default: global.enabled + enabled: true + + replicas: 1 + + # Configures the port the injector should listen on + port: 8080 + + # If multiple replicas are specified, by default a leader will be determined + # so that only one injector attempts to create TLS certificates. + leaderElector: + enabled: true + + # If true, will enable a node exporter metrics endpoint at /metrics. + metrics: + enabled: true + + # Deprecated: Please use global.externalVaultAddr instead. + externalVaultAddr: "" + + # image sets the repo and tag of the vault-k8s image to use for the injector. + image: + repository: "hashicorp/vault-k8s" + tag: "1.3.1" + pullPolicy: IfNotPresent + + # agentImage sets the repo and tag of the Vault image to use for the Vault Agent + # containers. This should be set to the official Vault image. Vault 1.3.1+ is + # required. + agentImage: + repository: "hashicorp/vault" + tag: "1.15.2" + + # The default values for the injected Vault Agent containers. + agentDefaults: + # For more information on configuring resources, see the K8s documentation: + # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + cpuLimit: "500m" + cpuRequest: "250m" + memLimit: "128Mi" + memRequest: "64Mi" + # ephemeralLimit: "128Mi" + # ephemeralRequest: "64Mi" + + # Default template type for secrets when no custom template is specified. + # Possible values include: "json" and "map". + template: "map" + + # Default values within Agent's template_config stanza. + templateConfig: + exitOnRetryFailure: true + staticSecretRenderInterval: "" + + # Used to define custom livenessProbe settings + livenessProbe: + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + + # Used to define custom readinessProbe settings + readinessProbe: + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 2 + successThreshold: 1 + timeoutSeconds: 5 + + # Used to define custom startupProbe settings + startupProbe: + failureThreshold: 12 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + + # Mount Path of the Vault Kubernetes Auth Method. + authPath: "auth/kubernetes" + + # Configures the log verbosity of the injector. + # Supported log levels include: trace, debug, info, warn, error + logLevel: "info" + + # Configures the log format of the injector. Supported log formats: "standard", "json". + logFormat: "standard" + + # Configures all Vault Agent sidecars to revoke their token when shutting down + revokeOnShutdown: false + + webhook: + # Configures failurePolicy of the webhook. The "unspecified" default behaviour depends on the + # API Version of the WebHook. + # To block pod creation while the webhook is unavailable, set the policy to `Fail` below. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy + # + failurePolicy: Ignore + + # matchPolicy specifies the approach to accepting changes based on the rules of + # the MutatingWebhookConfiguration. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy + # for more details. + # + matchPolicy: Exact + + # timeoutSeconds is the amount of seconds before the webhook request will be ignored + # or fails. + # If it is ignored or fails depends on the failurePolicy + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#timeouts + # for more details. + # + timeoutSeconds: 30 + + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: + # matchLabels: + # sidecar-injector: enabled + namespaceSelector: {} + + # objectSelector is the selector for restricting the webhook to only + # specific labels. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + # for more details. + # Example: + # objectSelector: + # matchLabels: + # vault-sidecar-injector: enabled + objectSelector: | + matchExpressions: + - key: app.kubernetes.io/name + operator: NotIn + values: + - {{ template "vault.name" . }}-agent-injector + + # Extra annotations to attach to the webhook + annotations: {} + + # Deprecated: please use 'webhook.failurePolicy' instead + # Configures failurePolicy of the webhook. The "unspecified" default behaviour depends on the + # API Version of the WebHook. + # To block pod creation while webhook is unavailable, set the policy to `Fail` below. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy + # + failurePolicy: Ignore + + # Deprecated: please use 'webhook.namespaceSelector' instead + # namespaceSelector is the selector for restricting the webhook to only + # specific namespaces. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector + # for more details. + # Example: + # namespaceSelector: + # matchLabels: + # sidecar-injector: enabled + namespaceSelector: {} + + # Deprecated: please use 'webhook.objectSelector' instead + # objectSelector is the selector for restricting the webhook to only + # specific labels. + # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + # for more details. + # Example: + # objectSelector: + # matchLabels: + # vault-sidecar-injector: enabled + objectSelector: {} + + # Deprecated: please use 'webhook.annotations' instead + # Extra annotations to attach to the webhook + webhookAnnotations: {} + + certs: + # secretName is the name of the secret that has the TLS certificate and + # private key to serve the injector webhook. If this is null, then the + # injector will default to its automatic management mode that will assign + # a service account to the injector to generate its own certificates. + secretName: null + + # caBundle is a base64-encoded PEM-encoded certificate bundle for the CA + # that signed the TLS certificate that the webhook serves. This must be set + # if secretName is non-null unless an external service like cert-manager is + # keeping the caBundle updated. + caBundle: "" + + # certName and keyName are the names of the files within the secret for + # the TLS cert and private key, respectively. These have reasonable + # defaults but can be customized if necessary. + certName: tls.crt + keyName: tls.key + + # Security context for the pod template and the injector container + # The default pod securityContext is: + # runAsNonRoot: true + # runAsGroup: {{ .Values.injector.gid | default 1000 }} + # runAsUser: {{ .Values.injector.uid | default 100 }} + # fsGroup: {{ .Values.injector.gid | default 1000 }} + # and for container is + # allowPrivilegeEscalation: false + # capabilities: + # drop: + # - ALL + securityContext: + pod: {} + container: {} + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # extraEnvironmentVars is a list of extra environment variables to set in the + # injector deployment. + extraEnvironmentVars: + VAULT_ADDR: https://vault.vault:8200 + + # Affinity Settings for injector pods + # This can either be a multi-line string or YAML matching the PodSpec's affinity field. + # Commenting out or setting as empty the affinity variable, will allow + # deployment of multiple replicas to single node services such as Minikube. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: webhook + topologyKey: kubernetes.io/hostname + + # Topology settings for injector pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for injector pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Priority class for injector pods + priorityClassName: "" + + # Extra annotations to attach to the injector pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the injector pods + annotations: {} + + # Extra labels to attach to the agent-injector + # This should be a YAML map of the labels to apply to the injector + extraLabels: {} + + # Should the injector pods run on the host network (useful when using + # an alternate CNI in EKS) + hostNetwork: false + + # Injector service specific config + service: + # Extra annotations to attach to the injector service + annotations: {} + + # Injector serviceAccount specific config + serviceAccount: + # Extra annotations to attach to the injector serviceAccount + annotations: {} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + podDisruptionBudget: {} + # podDisruptionBudget: + # maxUnavailable: 1 + + # strategy for updating the deployment. This can be a multi-line string or a + # YAML map. + strategy: {} + # strategy: | + # rollingUpdate: + # maxSurge: 25% + # maxUnavailable: 25% + # type: RollingUpdate + +server: + # If true, or "-" with global.enabled true, Vault server will be installed. + # See vault.mode in _helpers.tpl for implementation details. + enabled: "-" + + # [Enterprise Only] This value refers to a Kubernetes secret that you have + # created that contains your enterprise license. If you are not using an + # enterprise image or if you plan to introduce the license key via another + # route, then leave secretName blank ("") or set it to null. + # Requires Vault Enterprise 1.8 or later. + enterpriseLicense: + # The name of the Kubernetes secret that holds the enterprise license. The + # secret must be in the same namespace that Vault is installed into. + secretName: "" + # The key within the Kubernetes secret that holds the enterprise license. + secretKey: "license" + + # Resource requests, limits, etc. for the server cluster placement. This + # should map directly to the value of the resources field for a PodSpec. + # By default no direct resource request is made. + + image: + repository: "hashicorp/vault" + tag: "1.15.2" + # Overrides the default Image Pull Policy + pullPolicy: IfNotPresent + + # Configure the Update Strategy Type for the StatefulSet + # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + updateStrategyType: "OnDelete" + + # Configure the logging verbosity for the Vault server. + # Supported log levels include: trace, debug, info, warn, error + logLevel: "" + + # Configure the logging format for the Vault server. + # Supported log formats include: standard, json + logFormat: "" + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # Ingress allows ingress services to be created to allow external access + # from Kubernetes to access Vault pods. + # If deployment is on OpenShift, the following block is ignored. + # In order to expose the service, use the route section below + ingress: + enabled: false + labels: {} + # traffic: external + annotations: {} + # | + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # or + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + # Optionally use ingressClassName instead of deprecated annotation. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#deprecated-annotation + ingressClassName: "" + + # As of Kubernetes 1.19, all Ingress Paths must have a pathType configured. The default value below should be sufficient in most cases. + # See: https://kubernetes.io/docs/concepts/services-networking/ingress/#path-types for other possible values. + pathType: Prefix + + # When HA mode is enabled and K8s service registration is being used, + # configure the ingress to point to the Vault active service. + activeService: true + hosts: + - host: chart-example.local + paths: [] + ## Extra paths to prepend to the host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # service: + # name: ssl-redirect + # port: + # number: use-annotation + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + + # hostAliases is a list of aliases to be added to /etc/hosts. Specified as a YAML list. + hostAliases: [] + # - ip: 127.0.0.1 + # hostnames: + # - chart-example.local + + # OpenShift only - create a route to expose the service + # By default the created route will be of type passthrough + route: + enabled: false + + # When HA mode is enabled and K8s service registration is being used, + # configure the route to point to the Vault active service. + activeService: true + + labels: {} + annotations: {} + host: chart-example.local + # tls will be passed directly to the route's TLS config, which + # can be used to configure other termination methods that terminate + # TLS at the router + tls: + termination: passthrough + + # authDelegator enables a cluster role binding to be attached to the service + # account. This cluster role binding can be used to setup Kubernetes auth + # method. See https://developer.hashicorp.com/vault/docs/auth/kubernetes + authDelegator: + enabled: true + + # extraInitContainers is a list of init containers. Specified as a YAML list. + # This is useful if you need to run a script to provision TLS certificates or + # write out configuration files in a dynamic way. + extraInitContainers: null + # # This example installs a plugin pulled from github into the /usr/local/libexec/vault/oauthapp folder, + # # which is defined in the volumes value. + # - name: oauthapp + # image: "alpine" + # command: [sh, -c] + # args: + # - cd /tmp && + # wget https://github.com/puppetlabs/vault-plugin-secrets-oauthapp/releases/download/v1.2.0/vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64.tar.xz -O oauthapp.xz && + # tar -xf oauthapp.xz && + # mv vault-plugin-secrets-oauthapp-v1.2.0-linux-amd64 /usr/local/libexec/vault/oauthapp && + # chmod +x /usr/local/libexec/vault/oauthapp + # volumeMounts: + # - name: plugins + # mountPath: /usr/local/libexec/vault + + # extraContainers is a list of sidecar containers. Specified as a YAML list. + extraContainers: null + + # shareProcessNamespace enables process namespace sharing between Vault and the extraContainers + # This is useful if Vault must be signaled, e.g. to send a SIGHUP for a log rotation + shareProcessNamespace: false + + # extraArgs is a string containing additional Vault server arguments. + extraArgs: "" + + # extraPorts is a list of extra ports. Specified as a YAML list. + # This is useful if you need to add additional ports to the statefulset in dynamic way. + extraPorts: null + # - containerPort: 8300 + # name: http-monitoring + + # Used to define custom readinessProbe settings + readinessProbe: + enabled: true + path: /v1/sys/health?standbyok=true&sealedcode=204&uninitcode=204 + port: 8200 + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + + # Used to enable a livenessProbe for the pods + livenessProbe: + enabled: true + execCommand: [] + path: "/v1/sys/health?standbyok=true" + port: 8200 + failureThreshold: 2 + initialDelaySeconds: 60 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + + # Optional duration in seconds the pod needs to terminate gracefully. + # See: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ + terminationGracePeriodSeconds: 10 + + # Used to set the sleep time during the preStop step + preStopSleepSeconds: 5 + + # Used to define commands to run after the pod is ready. + # This can be used to automate processes such as initialization + # or boostrapping auth methods. + postStart: [] + # - /bin/sh + # - -c + # - /vault/userconfig/myscript/run.sh + + # extraEnvironmentVars is a list of extra environment variables to set with the stateful set. These could be + # used to include variables required for auto-unseal. + extraEnvironmentVars: + GOOGLE_REGION: northamerica-northeast1 + GOOGLE_PROJECT: spartan-rhino-408115 + GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/kms-creds/credentials.json + VAULT_CACERT: /vault/userconfig/vault-ha-tls/vault.ca + VAULT_TLSCERT: /vault/userconfig/vault-ha-tls/vault.crt + VAULT_TLSKEY: /vault/userconfig/vault-ha-tls/vault.key + + # extraSecretEnvironmentVars is a list of extra environment variables to set with the stateful set. + # These variables take value from existing Secret objects. + extraSecretEnvironmentVars: [] + # - envName: AWS_SECRET_ACCESS_KEY + # secretName: vault + # secretKey: AWS_SECRET_ACCESS_KEY + + # Deprecated: please use 'volumes' instead. + # extraVolumes is a list of extra volumes to mount. These will be exposed + # to Vault in the path `/vault/userconfig//`. The value below is + # an array of objects, examples are shown below. + extraVolumes: [] + # - type: secret (or "configMap") + # name: my-secret + # path: null # default is `/vault/userconfig` + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: + - name: vault-ha-tls + secret: + secretName: vault-ha-tls + - name: kms-creds + secret: + secretName: kms-creds + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: + - name: vault-ha-tls + mountPath: /vault/userconfig/vault-ha-tls + - name: kms-creds + mountPath: /vault/userconfig/kms-creds + + # Affinity Settings + # Commenting out or setting as empty the affinity variable, will allow + # deployment to single node services such as Minikube + # This should be either a multi-line string or YAML matching the PodSpec's affinity field. + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/name: {{ template "vault.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" + component: server + topologyKey: kubernetes.io/hostname + + # Topology settings for server pods + # ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + # This should be either a multi-line string or YAML matching the topologySpreadConstraints array + # in a PodSpec. + topologySpreadConstraints: [] + + # Toleration Settings for server pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for server pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Enables network policy for server pods + networkPolicy: + enabled: false + egress: [] + # egress: + # - to: + # - ipBlock: + # cidr: 10.0.0.0/24 + # ports: + # - protocol: TCP + # port: 443 + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 8200 + protocol: TCP + - port: 8201 + protocol: TCP + + # Priority class for server pods + priorityClassName: "" + + # Extra labels to attach to the server pods + # This should be a YAML map of the labels to apply to the server pods + extraLabels: {} + + # Extra annotations to attach to the server pods + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the server pods + annotations: {} + + # Enables a headless service to be used by the Vault Statefulset + service: + enabled: true + # Enable or disable the vault-active service, which selects Vault pods that + # have labeled themselves as the cluster leader with `vault-active: "true"`. + active: + enabled: true + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the active service. + annotations: {} + # Enable or disable the vault-standby service, which selects Vault pods that + # have labeled themselves as a cluster follower with `vault-active: "false"`. + standby: + enabled: true + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the standby service. + annotations: {} + # If enabled, the service selectors will include `app.kubernetes.io/instance: {{ .Release.Name }}` + # When disabled, services may select Vault pods not deployed from the chart. + # Does not affect the headless vault-internal service with `ClusterIP: None` + instanceSelector: + enabled: true + # clusterIP controls whether a Cluster IP address is attached to the + # Vault service within Kubernetes. By default, the Vault service will + # be given a Cluster IP address, set to None to disable. When disabled + # Kubernetes will create a "headless" service. Headless services can be + # used to communicate with pods directly through DNS instead of a round-robin + # load balancer. + # clusterIP: None + + # Configures the service type for the main Vault service. Can be ClusterIP + # or NodePort. + type: ClusterIP + + # The IP family and IP families options are to set the behaviour in a dual-stack environment. + # Omitting these values will let the service fall back to whatever the CNI dictates the defaults + # should be. + # These are only supported for kubernetes versions >=1.23.0 + # + # Configures the service's supported IP family policy, can be either: + # SingleStack: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range. + # PreferDualStack: Allocates IPv4 and IPv6 cluster IPs for the Service. + # RequireDualStack: Allocates Service .spec.ClusterIPs from both IPv4 and IPv6 address ranges. + ipFamilyPolicy: "" + + # Sets the families that should be supported and the order in which they should be applied to ClusterIP as well. + # Can be IPv4 and/or IPv6. + ipFamilies: [] + + # Do not wait for pods to be ready before including them in the services' + # targets. Does not apply to the headless service, which is used for + # cluster-internal communication. + publishNotReadyAddresses: true + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #nodePort: 30000 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #activeNodePort: 30001 + + # When HA mode is enabled + # If type is set to "NodePort", a specific nodePort value can be configured, + # will be random if left blank. + #standbyNodePort: 30002 + + # Port on which Vault server is listening + port: 8200 + # Target port to which the service should be mapped to + targetPort: 8200 + # Extra annotations for the service definition. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the service. + annotations: {} + + # This configures the Vault Statefulset to create a PVC for data + # storage when using the file or raft backend storage engines. + # See https://developer.hashicorp.com/vault/docs/configuration/storage to know more + dataStorage: + enabled: true + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/data" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + # Labels to apply to the PVC + labels: {} + + # Persistent Volume Claim (PVC) retention policy + # ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + # Example: + # persistentVolumeClaimRetentionPolicy: + # whenDeleted: Retain + # whenScaled: Retain + persistentVolumeClaimRetentionPolicy: {} + + # This configures the Vault Statefulset to create a PVC for audit + # logs. Once Vault is deployed, initialized, and unsealed, Vault must + # be configured to use this for audit logs. This will be mounted to + # /vault/audit + # See https://developer.hashicorp.com/vault/docs/audit to know more + auditStorage: + enabled: true + # Size of the PVC created + size: 10Gi + # Location where the PVC will be mounted. + mountPath: "/vault/audit" + # Name of the storage class to use. If null it will use the + # configured default Storage Class. + storageClass: null + # Access Mode of the storage device being used for the PVC + accessMode: ReadWriteOnce + # Annotations to apply to the PVC + annotations: {} + # Labels to apply to the PVC + labels: {} + + # Run Vault in "HA" mode. There are no storage requirements unless the audit log + # persistence is required. In HA mode Vault will configure itself to use Consul + # for its storage backend. The default configuration provided will work the Consul + # Helm project by default. It is possible to manually configure Vault to use a + # different HA backend. + ha: + enabled: true + replicas: 2 + + # Set the api_addr configuration for Vault HA + # See https://developer.hashicorp.com/vault/docs/configuration#api_addr + # If set to null, this will be set to the Pod IP Address + apiAddr: null + + # Set the cluster_addr confuguration for Vault HA + # See https://developer.hashicorp.com/vault/docs/configuration#cluster_addr + # If set to null, this will be set to https://$(HOSTNAME).{{ template "vault.fullname" . }}-internal:8201 + clusterAddr: null + + # Enables Vault's integrated Raft storage. Unlike the typical HA modes where + # Vault's persistence is external (such as Consul), enabling Raft mode will create + # persistent volumes for Vault to store data according to the configuration under server.dataStorage. + # The Vault cluster will coordinate leader elections and failovers internally. + raft: + enabled: true + # Set the Node Raft ID to the name of the pod + setNodeId: true + + # Note: Configuration files are stored in ConfigMaps so sensitive data + # such as passwords should be either mounted through extraSecretEnvironmentVars + # or through a Kube secret. For more information see: + # https://developer.hashicorp.com/vault/docs/platform/k8s/helm/run#protecting-sensitive-vault-configurations + # https://developer.hashicorp.com/vault/docs/configuration/storage/raft + config: | + ui = true + plugin_directory = "/vault/plugins" + + listener "tcp" { + tls_disable = 0 + address = "[::]:8200" + cluster_address = "[::]:8201" + tls_cert_file = "/vault/userconfig/vault-ha-tls/vault.crt" + tls_key_file = "/vault/userconfig/vault-ha-tls/vault.key" + tls_client_ca_file = "/vault/userconfig/vault-ha-tls/vault.ca" + } + + storage "raft" { + path = "/vault/data" + retry_join { + leader_api_addr = "https://vault-0.vault-internal:8200" + leader_ca_cert_file = "/vault/userconfig/vault-ha-tls/vault.ca" + leader_client_cert_file = "/vault/userconfig/vault-ha-tls/vault.crt" + leader_client_key_file = "/vault/userconfig/vault-ha-tls/vault.key" + } + retry_join { + leader_api_addr = "https://vault-1.vault-internal:8200" + leader_ca_cert_file = "/vault/userconfig/vault-ha-tls/vault.ca" + leader_client_cert_file = "/vault/userconfig/vault-ha-tls/vault.crt" + leader_client_key_file = "/vault/userconfig/vault-ha-tls/vault.key" + } + retry_join { + leader_api_addr = "https://vault-2.vault-internal:8200" + leader_ca_cert_file = "/vault/userconfig/vault-ha-tls/vault.ca" + leader_client_cert_file = "/vault/userconfig/vault-ha-tls/vault.crt" + leader_client_key_file = "/vault/userconfig/vault-ha-tls/vault.key" + } + } + + seal "gcpckms" { + project = "spartan-rhino-408115" + region = "northamerica-northeast1" + key_ring = "vault-keyring-gcp" + crypto_key = "vault-cryptokey-gcp" + } + + service_registration "kubernetes" {} + + # A disruption budget limits the number of pods of a replicated application + # that are down simultaneously from voluntary disruptions + disruptionBudget: + enabled: true + + # maxUnavailable will default to (n/2)-1 where n is the number of + # replicas. If you'd like a custom value, you can specify an override here. + maxUnavailable: null + + # Definition of the serviceAccount used to run Vault. + # These options are also used when using an external Vault server to validate + # Kubernetes tokens. + serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # Create a Secret API object to store a non-expiring token for the service account. + # Prior to v1.24.0, Kubernetes used to generate this secret for each service account by default. + # Kubernetes now recommends using short-lived tokens from the TokenRequest API or projected volumes instead if possible. + # For more details, see https://kubernetes.io/docs/concepts/configuration/secret/#service-account-token-secrets + # serviceAccount.create must be equal to 'true' in order to use this feature. + createSecret: false + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + # Extra labels to attach to the serviceAccount + # This should be a YAML map of the labels to apply to the serviceAccount + extraLabels: {} + # Enable or disable a service account role binding with the permissions required for + # Vault's Kubernetes service_registration config option. + # See https://developer.hashicorp.com/vault/docs/configuration/service-registration/kubernetes + serviceDiscovery: + enabled: true + + # Settings for the statefulSet used to run Vault. + statefulSet: + # Extra annotations for the statefulSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the statefulSet. + annotations: {} + + # Set the pod and container security contexts. + # If not set, these will default to, and for *not* OpenShift: + # pod: + # runAsNonRoot: true + # runAsGroup: {{ .Values.server.gid | default 1000 }} + # runAsUser: {{ .Values.server.uid | default 100 }} + # fsGroup: {{ .Values.server.gid | default 1000 }} + # container: + # allowPrivilegeEscalation: false + # + # If not set, these will default to, and for OpenShift: + # pod: {} + # container: {} + securityContext: + pod: {} + container: {} + + # Should the server pods run on the host network + hostNetwork: false + +# Vault UI +ui: + # True if you want to create a Service entry for the Vault UI. + # + # serviceType can be used to control the type of service created. For + # example, setting this to "LoadBalancer" will create an external load + # balancer (for supported K8S installations) to access the UI. + enabled: true + publishNotReadyAddresses: true + # The service should only contain selectors for active Vault pod + activeVaultPodOnly: false + serviceType: "ClusterIP" + serviceNodePort: null + externalPort: 8200 + targetPort: 8200 + + # The IP family and IP families options are to set the behaviour in a dual-stack environment. + # Omitting these values will let the service fall back to whatever the CNI dictates the defaults + # should be. + # These are only supported for kubernetes versions >=1.23.0 + # + # Configures the service's supported IP family, can be either: + # SingleStack: Single-stack service. The control plane allocates a cluster IP for the Service, using the first configured service cluster IP range. + # PreferDualStack: Allocates IPv4 and IPv6 cluster IPs for the Service. + # RequireDualStack: Allocates Service .spec.ClusterIPs from both IPv4 and IPv6 address ranges. + serviceIPFamilyPolicy: "" + + # Sets the families that should be supported and the order in which they should be applied to ClusterIP as well + # Can be IPv4 and/or IPv6. + serviceIPFamilies: [] + + # The externalTrafficPolicy can be set to either Cluster or Local + # and is only valid for LoadBalancer and NodePort service types. + # The default value is Cluster. + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-traffic-policy + externalTrafficPolicy: Cluster + + #loadBalancerSourceRanges: + # - 10.0.0.0/16 + # - 1.78.23.3/32 + + # loadBalancerIP: + + # Extra annotations to attach to the ui service + # This can either be YAML or a YAML-formatted multi-line templated string map + # of the annotations to apply to the ui service + annotations: {} + +# secrets-store-csi-driver-provider-vault +csi: + # True if you want to install a secrets-store-csi-driver-provider-vault daemonset. + # + # Requires installing the secrets-store-csi-driver separately, see: + # https://github.com/kubernetes-sigs/secrets-store-csi-driver#install-the-secrets-store-csi-driver + # + # With the driver and provider installed, you can mount Vault secrets into volumes + # similar to the Vault Agent injector, and you can also sync those secrets into + # Kubernetes secrets. + enabled: true + + image: + repository: "hashicorp/vault-csi-provider" + tag: "1.4.1" + pullPolicy: IfNotPresent + + # volumes is a list of volumes made available to all containers. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumes: + - name: vault-ha-tls + secret: + secretName: vault-ha-tls + + # volumeMounts is a list of volumeMounts for the main server container. These are rendered + # via toYaml rather than pre-processed like the extraVolumes value. + # The purpose is to make it easy to share volumes between containers. + volumeMounts: + - name: vault-ha-tls + mountPath: "/vault/userconfig/vault-ha-tls" + readOnly: true + + resources: {} + # resources: + # requests: + # cpu: 50m + # memory: 128Mi + # limits: + # cpu: 50m + # memory: 128Mi + + # Override the default secret name for the CSI Provider's HMAC key used for + # generating secret versions. + hmacSecretName: "" + + # Settings for the daemonSet used to run the provider. + daemonSet: + updateStrategy: + type: RollingUpdate + maxUnavailable: "" + # Extra annotations for the daemonSet. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the daemonSet. + annotations: {} + # Provider host path (must match the CSI provider's path) + providersDir: "/etc/kubernetes/secrets-store-csi-providers" + # Kubelet host path + kubeletRootDir: "/var/lib/kubelet" + # Extra labels to attach to the vault-csi-provider daemonSet + # This should be a YAML map of the labels to apply to the csi provider daemonSet + extraLabels: {} + # security context for the pod template and container in the csi provider daemonSet + securityContext: + pod: {} + container: {} + + pod: + # Extra annotations for the provider pods. This can either be YAML or a + # YAML-formatted multi-line templated string map of the annotations to apply + # to the pod. + annotations: {} + + # Toleration Settings for provider pods + # This should be either a multi-line string or YAML matching the Toleration array + # in a PodSpec. + tolerations: [] + + # nodeSelector labels for csi pod assignment, formatted as a multi-line string or YAML map. + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + # Example: + # nodeSelector: + # beta.kubernetes.io/arch: amd64 + nodeSelector: {} + + # Affinity Settings + # This should be either a multi-line string or YAML matching the PodSpec's affinity field. + affinity: {} + + # Extra labels to attach to the vault-csi-provider pod + # This should be a YAML map of the labels to apply to the csi provider pod + extraLabels: {} + + agent: + enabled: true + extraArgs: [] + + image: + repository: "hashicorp/vault" + tag: "1.15.2" + pullPolicy: IfNotPresent + + logFormat: standard + logLevel: info + + resources: {} + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 256Mi + # cpu: 250m + + # Priority class for csi pods + priorityClassName: "" + + serviceAccount: + # Extra annotations for the serviceAccount definition. This can either be + # YAML or a YAML-formatted multi-line templated string map of the + # annotations to apply to the serviceAccount. + annotations: {} + + # Extra labels to attach to the vault-csi-provider serviceAccount + # This should be a YAML map of the labels to apply to the csi provider serviceAccount + extraLabels: {} + + # Used to configure readinessProbe for the pods. + readinessProbe: + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + + # Used to configure livenessProbe for the pods. + livenessProbe: + failureThreshold: 2 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 3 + + # Enables debug logging. + debug: false + + # Pass arbitrary additional arguments to vault-csi-provider. + # See https://developer.hashicorp.com/vault/docs/platform/k8s/csi/configurations#command-line-arguments + # for the available command line flags. + extraArgs: + - "-vault-tls-ca-cert=/vault/userconfig/vault-ha-tls/vault.ca" + - "-vault-addr=https://vault.vault:8200" + +# Vault is able to collect and publish various runtime metrics. +# Enabling this feature requires setting adding `telemetry{}` stanza to +# the Vault configuration. There are a few examples included in the `config` sections above. +# +# For more information see: +# https://developer.hashicorp.com/vault/docs/configuration/telemetry +# https://developer.hashicorp.com/vault/docs/internals/telemetry +serverTelemetry: + # Enable support for the Prometheus Operator. Currently, this chart does not support + # authenticating to Vault's metrics endpoint, so the following `telemetry{}` must be included + # in the `listener "tcp"{}` stanza + # telemetry { + # unauthenticated_metrics_access = "true" + # } + # + # See the `standalone.config` for a more complete example of this. + # + # In addition, a top level `telemetry{}` stanza must also be included in the Vault configuration: + # + # example: + # telemetry { + # prometheus_retention_time = "30s" + # disable_hostname = true + # } + # + # Configuration for monitoring the Vault server. + serviceMonitor: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + # + # Instructions on how to install the Helm chart can be found here: + # https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack + # More information can be found here: + # https://github.com/prometheus-operator/prometheus-operator + # https://github.com/prometheus-operator/kube-prometheus + + # Enable deployment of the Vault Server ServiceMonitor CustomResource. + enabled: false + + # Selector labels to add to the ServiceMonitor. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Interval at which Prometheus scrapes metrics + interval: 30s + + # Timeout for Prometheus scrapes + scrapeTimeout: 10s + + prometheusRules: + # The Prometheus operator *must* be installed before enabling this feature, + # if not the chart will fail to install due to missing CustomResourceDefinitions + # provided by the operator. + + # Deploy the PrometheusRule custom resource for AlertManager based alerts. + # Requires that AlertManager is properly deployed. + enabled: false + + # Selector labels to add to the PrometheusRules. + # When empty, defaults to: + # release: prometheus + selectors: {} + + # Some example rules. + rules: [] + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 500ms on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 500 + # for: 5m + # labels: + # severity: warning + # - alert: vault-HighResponseTime + # annotations: + # message: The response time of Vault is over 1s on average over the last 5 minutes. + # expr: vault_core_handle_request{quantile="0.5", namespace="mynamespace"} > 1000 + # for: 5m + # labels: + # severity: critical + diff --git a/main.tf b/main.tf index ef45b321..b7e37371 100644 --- a/main.tf +++ b/main.tf @@ -1,5 +1,5 @@ module "gcp-kubernetes-cluster-0" { - source = "./modules/gcp-kubernetes-cluster" + source = "./terraform/gcp-kubernetes-cluster" cluster_name = "acia-cfia" project_id = "spartan-rhino-408115" @@ -8,3 +8,14 @@ module "gcp-kubernetes-cluster-0" { location_1 = "northamerica-northeast1-a" location_2 = "northamerica-northeast1-b" } + +# module "namecheap" { +# source = "./terraform/dns" + +# cluster_name = "acia-cfia" +# project_id = "spartan-rhino-408115" + +# region = "northamerica-northeast1" +# location_1 = "northamerica-northeast1-a" +# location_2 = "northamerica-northeast1-b" +# } diff --git a/modules/gcp-kubernetes-cluster/provider.tf b/modules/gcp-kubernetes-cluster/provider.tf deleted file mode 100644 index 6afd5751..00000000 --- a/modules/gcp-kubernetes-cluster/provider.tf +++ /dev/null @@ -1,4 +0,0 @@ -provider "google" { - project = var.project_id - region = var.region -} \ No newline at end of file diff --git a/providers.tf b/providers.tf new file mode 100644 index 00000000..3fc4729f --- /dev/null +++ b/providers.tf @@ -0,0 +1,35 @@ +terraform { + backend "gcs" { + bucket = "terraform-tfstate-gcp-storage" + prefix = "terraform/state" + } + + required_providers { + google = { + source = "hashicorp/google" + version = "~> 5.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "2.24.0" + } + # aws = { + # source = "hashicorp/aws" + # version = "~> 3.0" + # } + # azurerm = { + # source = "hashicorp/azurerm" + # version = "~> 2.0" + # } + } +} + +provider "google" { + project = "spartan-rhino-408115" + region = "northamerica-northeast1" +} + +provider "kubernetes" { + config_path = "~/.kube/config" + config_context = var.kube_ctx +} \ No newline at end of file diff --git a/kubernetes/apps/finesse/finesse-deployment.yml b/terraform/aws-kubernetes-cluster/provider.tf similarity index 100% rename from kubernetes/apps/finesse/finesse-deployment.yml rename to terraform/aws-kubernetes-cluster/provider.tf diff --git a/terraform/azure-kubernetes-cluster/provider.tf b/terraform/azure-kubernetes-cluster/provider.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/dns/provider.tf b/terraform/dns/provider.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/dns/variables.tf b/terraform/dns/variables.tf new file mode 100644 index 00000000..e69de29b diff --git a/modules/gcp-kubernetes-cluster/accounts.tf b/terraform/gcp-kubernetes-cluster/accounts.tf similarity index 100% rename from modules/gcp-kubernetes-cluster/accounts.tf rename to terraform/gcp-kubernetes-cluster/accounts.tf diff --git a/modules/gcp-kubernetes-cluster/cluster.tf b/terraform/gcp-kubernetes-cluster/cluster.tf similarity index 98% rename from modules/gcp-kubernetes-cluster/cluster.tf rename to terraform/gcp-kubernetes-cluster/cluster.tf index 19710bc5..1dc3ed78 100644 --- a/modules/gcp-kubernetes-cluster/cluster.tf +++ b/terraform/gcp-kubernetes-cluster/cluster.tf @@ -1,6 +1,6 @@ resource "google_container_cluster" "cluster" { name = "${var.cluster_name}-cluster" - location = "${var.location_1}" + location = var.location_1 remove_default_node_pool = true initial_node_count = 1 network = google_compute_network.network.self_link diff --git a/modules/gcp-kubernetes-cluster/networking.tf b/terraform/gcp-kubernetes-cluster/networking.tf similarity index 95% rename from modules/gcp-kubernetes-cluster/networking.tf rename to terraform/gcp-kubernetes-cluster/networking.tf index ef3ed3ab..3847ca59 100644 --- a/modules/gcp-kubernetes-cluster/networking.tf +++ b/terraform/gcp-kubernetes-cluster/networking.tf @@ -14,7 +14,7 @@ resource "google_compute_network" "network" { resource "google_compute_subnetwork" "subnetwork" { name = "${var.cluster_name}-private-subnet" ip_cidr_range = "10.0.0.0/18" - region = "${var.region}" + region = var.region network = google_compute_network.network.id private_ip_google_access = true @@ -31,14 +31,14 @@ resource "google_compute_subnetwork" "subnetwork" { resource "google_compute_router" "router" { name = "${var.cluster_name}-router" - region = "${var.region}" + region = var.region network = google_compute_network.network.id } resource "google_compute_router_nat" "router-nat" { name = "${var.cluster_name}-nat" router = google_compute_router.router.name - region = "${var.region}" + region = var.region source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" nat_ip_allocate_option = "MANUAL_ONLY" diff --git a/terraform/gcp-kubernetes-cluster/provider.tf b/terraform/gcp-kubernetes-cluster/provider.tf new file mode 100644 index 00000000..ce9153b5 --- /dev/null +++ b/terraform/gcp-kubernetes-cluster/provider.tf @@ -0,0 +1,29 @@ +terraform { + required_providers { + local = { + source = "hashicorp/local" + version = "2.4.0" + } + tls = { + source = "hashicorp/tls" + version = "4.0.4" + } + external = { + source = "hashicorp/external" + version = "2.3.1" + } + google = { + source = "hashicorp/google" + version = "~> 5.0" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "2.24.0" + } + } +} + +provider "google" { + project = var.project_id + region = var.region +} \ No newline at end of file diff --git a/modules/gcp-kubernetes-cluster/variables.tf b/terraform/gcp-kubernetes-cluster/variables.tf similarity index 100% rename from modules/gcp-kubernetes-cluster/variables.tf rename to terraform/gcp-kubernetes-cluster/variables.tf diff --git a/terraform/gcp-kubernetes-cluster/vault-csr.conf b/terraform/gcp-kubernetes-cluster/vault-csr.conf new file mode 100644 index 00000000..0e0f3cef --- /dev/null +++ b/terraform/gcp-kubernetes-cluster/vault-csr.conf @@ -0,0 +1,22 @@ +[req] +default_bits = 2048 +prompt = no +encrypt_key = yes +default_md = sha256 +distinguished_name = kubelet_serving +req_extensions = v3_req +[ kubelet_serving ] +O = system:nodes +CN = system:node:*.vault.svc.cluster.local +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment, dataEncipherment +extendedKeyUsage = serverAuth, clientAuth +subjectAltName = @alt_names +[alt_names] +DNS.1 = *.vault-internal +DNS.2 = *.vault-internal.vault.svc.cluster.local +DNS.3 = *.vault +DNS.4 = vault.vault.svc.cluster.local +DNS.5 = vault.vault.svc +IP.1 = 127.0.0.1 \ No newline at end of file diff --git a/terraform/gcp-kubernetes-cluster/vault.tf b/terraform/gcp-kubernetes-cluster/vault.tf new file mode 100644 index 00000000..210b5af9 --- /dev/null +++ b/terraform/gcp-kubernetes-cluster/vault.tf @@ -0,0 +1,114 @@ +# Create a service account for the Vault KMS +resource "google_service_account" "vault_kms_service_account" { + account_id = "${var.cluster_name}-vault-gcpkms" + display_name = "Vault KMS for auto-unseal" +} + +# This will be used to create a credentials.json inside a k8s secret obj +resource "google_service_account_key" "vault_kms_service_account_key" { + service_account_id = google_service_account.vault_kms_service_account.name +} + +# Create a KMS Key +resource "google_kms_key_ring" "key_ring" { + name = "vault-keyring-gcp" + location = var.region +} + +# Create a crypto key for the key ring. This key will be used to automatically unseal Vault +resource "google_kms_crypto_key" "crypto_key" { + name = "vault-cryptokey-gcp" + key_ring = google_kms_key_ring.key_ring.id + rotation_period = "100000s" +} + +# We give vault service account access to that key ring. Making im owner of the key. +resource "google_kms_key_ring_iam_binding" "vault_iam_kms_binding" { + key_ring_id = google_kms_key_ring.key_ring.id + role = "roles/owner" + + members = [ + "serviceAccount:${google_service_account.vault_kms_service_account.email}", + ] +} + +# Vault certificates +resource "tls_private_key" "vault_key" { + algorithm = "RSA" + rsa_bits = 2048 +} + +# This requires that jq and openssl are installed in the runtime environment +# It creates a certificate signing request (CSR) based on the vault-csr.conf file +# The 2 jq at the begining and end of the pipes are used to read the input and wrap the result in json +# since this is how terraform "external" passes data. +data "external" "k8s_cert_request" { + program = [ + "bash", "-c", + "curl -o jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && chmod +x jq && jq -rc '.key' | openssl req -new -noenc -config ${path.module}/vault-csr.conf -key /dev/stdin | jq -rRncs '{\"request\": inputs}'" + ] + query = { + "key" = tls_private_key.vault_key.private_key_pem + } +} + +# We make ask Kubernetes to sign the certificate +# https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/ +resource "kubernetes_certificate_signing_request_v1" "vault_kube_cert_req" { + metadata { + name = "vault.svc" + } + spec { + request = data.external.k8s_cert_request.result["request"] + signer_name = "kubernetes.io/kube-apiserver-client" + usages = ["digital signature", "key encipherment", "server auth"] + } + auto_approve = true + lifecycle { + ignore_changes = [spec[0].request] + replace_triggered_by = [tls_private_key.vault_key] + } +} + +# Makes sure the vault namespace is created before adding secrets +resource "kubernetes_namespace" "vault_ns" { + metadata { + name = "vault" + } +} + +# Get the GKE cluster data to fetch the CA certificate +data "google_container_cluster" "gke_cluster" { + name = "${var.cluster_name}-cluster" + location = "${var.region}-a" +} + +# This secret contains the certificates used +resource "kubernetes_secret" "vault_ha_tls" { + metadata { + name = "vault-ha-tls" + namespace = kubernetes_namespace.vault_ns.metadata[0].name + } + + data = { + "vault.key" = tls_private_key.vault_key.private_key_pem + "vault.crt" = kubernetes_certificate_signing_request_v1.vault_kube_cert_req.certificate + "vault.ca" = base64decode(data.google_container_cluster.gke_cluster.master_auth[0].cluster_ca_certificate) + } + + type = "kubernetes.io/generic" +} + +# This secret contains the GCP KMS service account that is used to unseal +resource "kubernetes_secret" "kms_creds" { + metadata { + name = "kms-creds" + namespace = kubernetes_namespace.vault_ns.metadata[0].name + } + + data = { + "credentials.json" = base64decode(google_service_account_key.vault_kms_service_account_key.private_key) + } + + type = "kubernetes.io/generic" +} diff --git a/variables.tf b/variables.tf new file mode 100644 index 00000000..b9f833d5 --- /dev/null +++ b/variables.tf @@ -0,0 +1,4 @@ +variable "kube_ctx" { + description = "The kubernetes cluster context." + type = string +}