From 56479f978b251847c59195c6cb1bb162f4b86dfe Mon Sep 17 00:00:00 2001 From: AntiD2ta Date: Mon, 5 Feb 2024 13:23:05 +0100 Subject: [PATCH] Add READMEs to charts --- charts/cert-manager/README.md | 455 ++-- charts/common/README.md | 21 + charts/dirk/README.md | 92 + charts/dirk/values.yaml | 108 +- charts/ethereum-node/README.md | 90 + .../ethereum-node/charts/nethermind/README.md | 7 +- charts/ethereum-node/charts/prysm/README.md | 12 +- charts/execution-beacon/README.md | 204 ++ charts/execution-beacon/values.yaml | 404 ++-- charts/external-dns/README.md | 222 +- charts/external-secrets/README.md | 4 +- charts/juno-node/README.md | 134 ++ charts/kong/README.md | 1462 +++---------- charts/kube-prometheus-stack/README.md | 1854 +++++++++-------- charts/lodestar/README.md | 62 + charts/loki/README.md | 222 +- charts/mev-boost/README.md | 60 + charts/mev-boost/values.yaml | 38 +- charts/mysql/README.md | 25 + charts/posmoni/README.md | 60 + charts/posmoni/values.yaml | 78 +- charts/promtail/README.md | 2 +- charts/rpc-saas-secretStore/README.md | 26 + charts/validator-ejector/README.md | 101 + charts/validator-ejector/values.yaml | 122 +- charts/validator-kapi/README.md | 89 + charts/validator-kapi/values.yaml | 106 +- charts/validators/README.md | 125 ++ charts/validators/values.yaml | 328 +-- charts/vouch/README.md | 108 + charts/vouch/values.yaml | 106 +- charts/web3signer/README.md | 77 + charts/web3signer/values.yaml | 140 +- 33 files changed, 3687 insertions(+), 3257 deletions(-) create mode 100644 charts/common/README.md create mode 100644 charts/dirk/README.md create mode 100644 charts/ethereum-node/README.md create mode 100644 charts/execution-beacon/README.md create mode 100644 charts/juno-node/README.md create mode 100644 charts/lodestar/README.md create mode 100644 charts/mev-boost/README.md create mode 100644 charts/mysql/README.md create mode 100644 charts/posmoni/README.md create mode 100644 charts/rpc-saas-secretStore/README.md create mode 100644 charts/validator-ejector/README.md create mode 100644 charts/validator-kapi/README.md create mode 100644 charts/validators/README.md create mode 100644 charts/vouch/README.md create mode 100644 charts/web3signer/README.md diff --git a/charts/cert-manager/README.md b/charts/cert-manager/README.md index d613a03e0..5442ad605 100644 --- a/charts/cert-manager/README.md +++ b/charts/cert-manager/README.md @@ -1,271 +1,188 @@ # cert-manager -cert-manager is a Kubernetes addon to automate the management and issuance of -TLS certificates from various issuing sources. - -It will ensure certificates are valid and up to date periodically, and attempt -to renew certificates at an appropriate time before expiry. - -## Prerequisites - -- Kubernetes 1.20+ - -## Installing the Chart - -Full installation instructions, including details on how to configure extra -functionality in cert-manager can be found in the [installation docs](https://cert-manager.io/docs/installation/kubernetes/). - -Before installing the chart, you must first install the cert-manager CustomResourceDefinition resources. -This is performed in a separate step to allow you to easily uninstall and reinstall cert-manager without deleting your installed custom resources. - -```bash -$ kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.3/cert-manager.crds.yaml -``` - -To install the chart with the release name `my-release`: - -```console -## Add the Jetstack Helm repository -$ helm repo add jetstack https://charts.jetstack.io - -## Install the cert-manager helm chart -$ helm install my-release --namespace cert-manager --version v1.12.3 jetstack/cert-manager -``` - -In order to begin issuing certificates, you will need to set up a ClusterIssuer -or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer). - -More information on the different types of issuers and how to configure them -can be found in [our documentation](https://cert-manager.io/docs/configuration/). - -For information on how to configure cert-manager to automatically provision -Certificates for Ingress resources, take a look at the -[Securing Ingresses documentation](https://cert-manager.io/docs/usage/ingress/). - -> **Tip**: List all releases using `helm list` - -## Upgrading the Chart - -Special considerations may be required when upgrading the Helm chart, and these -are documented in our full [upgrading guide](https://cert-manager.io/docs/installation/upgrading/). - -**Please check here before performing upgrades!** - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -If you want to completely uninstall cert-manager from your cluster, you will also need to -delete the previously installed CustomResourceDefinition resources: - -```console -$ kubectl delete -f https://github.com/cert-manager/cert-manager/releases/download/v1.12.3/cert-manager.crds.yaml -``` - -## Configuration - -The following table lists the configurable parameters of the cert-manager chart and their default values. - -| Parameter | Description | Default | -| --------- | ----------- | ------- | -| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | `[]` | -| `global.commonLabels` | Labels to apply to all resources | `{}` | -| `global.rbac.create` | If `true`, create and use RBAC resources (includes sub-charts) | `true` | -| `global.priorityClassName`| Priority class name for cert-manager and webhook pods | `""` | -| `global.podSecurityPolicy.enabled` | If `true`, create and use PodSecurityPolicy (includes sub-charts) | `false` | -| `global.podSecurityPolicy.useAppArmor` | If `true`, use Apparmor seccomp profile in PSP | `true` | -| `global.leaderElection.namespace` | Override the namespace used to store the ConfigMap for leader election | `kube-system` | -| `global.leaderElection.leaseDuration` | The duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate | | -| `global.leaderElection.renewDeadline` | The interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration | | -| `global.leaderElection.retryPeriod` | The duration the clients should wait between attempting acquisition and renewal of a leadership | | -| `installCRDs` | If true, CRD resources will be installed as part of the Helm chart. If enabled, when uninstalling CRD resources will be deleted causing all installed custom resources to be DELETED | `false` | -| `image.repository` | Image repository | `quay.io/jetstack/cert-manager-controller` | -| `image.tag` | Image tag | `v1.12.3` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `replicaCount` | Number of cert-manager replicas | `1` | -| `clusterResourceNamespace` | Override the namespace used to store DNS provider credentials etc. for ClusterIssuer resources | Same namespace as cert-manager pod | -| `featureGates` | Set of comma-separated key=value pairs that describe feature gates on the controller. Some feature gates may also have to be enabled on other components, and can be set supplying the `feature-gate` flag to `.extraArgs` | `` | -| `extraArgs` | Optional flags for cert-manager | `[]` | -| `extraEnv` | Optional environment variables for cert-manager | `[]` | -| `serviceAccount.create` | If `true`, create a new service account | `true` | -| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the fullname template | | -| `serviceAccount.annotations` | Annotations to add to the service account | | -| `serviceAccount.automountServiceAccountToken` | Automount API credentials for the Service Account | `true` | -| `volumes` | Optional volumes for cert-manager | `[]` | -| `volumeMounts` | Optional volume mounts for cert-manager | `[]` | -| `resources` | CPU/memory resource requests/limits | `{}` | -| `securityContext` | Security context for the controller pod assignment | refer to [Default Security Contexts](#default-security-contexts) | -| `containerSecurityContext` | Security context to be set on the controller component container | refer to [Default Security Contexts](#default-security-contexts) | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `affinity` | Node affinity for pod assignment | `{}` | -| `tolerations` | Node tolerations for pod assignment | `[]` | -| `topologySpreadConstraints` | Topology spread constraints for pod assignment | `[]` | -| `livenessProbe.enabled` | Enable or disable the liveness probe for the controller container in the controller Pod. See https://cert-manager.io/docs/installation/best-practice/ to learn about when you might want to enable this livenss probe. | `false` | -| `livenessProbe.initialDelaySeconds` | The liveness probe initial delay (in seconds) | `10` | -| `livenessProbe.periodSeconds` | The liveness probe period (in seconds) | `10` | -| `livenessProbe.timeoutSeconds` | The liveness probe timeout (in seconds) | `10` | -| `livenessProbe.periodSeconds` | The liveness probe period (in seconds) | `10` | -| `livenessProbe.successThreshold` | The liveness probe success threshold | `1` | -| `livenessProbe.failureThreshold` | The liveness probe failure threshold | `8` | -| `ingressShim.defaultIssuerName` | Optional default issuer to use for ingress resources | | -| `ingressShim.defaultIssuerKind` | Optional default issuer kind to use for ingress resources | | -| `ingressShim.defaultIssuerGroup` | Optional default issuer group to use for ingress resources | | -| `prometheus.enabled` | Enable Prometheus monitoring | `true` | -| `prometheus.servicemonitor.enabled` | Enable Prometheus Operator ServiceMonitor monitoring | `false` | -| `prometheus.servicemonitor.namespace` | Define namespace where to deploy the ServiceMonitor resource | (namespace where you are deploying) | -| `prometheus.servicemonitor.prometheusInstance` | Prometheus Instance definition | `default` | -| `prometheus.servicemonitor.targetPort` | Prometheus scrape port | `9402` | -| `prometheus.servicemonitor.path` | Prometheus scrape path | `/metrics` | -| `prometheus.servicemonitor.interval` | Prometheus scrape interval | `60s` | -| `prometheus.servicemonitor.labels` | Add custom labels to ServiceMonitor | | -| `prometheus.servicemonitor.scrapeTimeout` | Prometheus scrape timeout | `30s` | -| `prometheus.servicemonitor.honorLabels` | Enable label honoring for metrics scraped by Prometheus (see [Prometheus scrape config docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) for details). By setting `honorLabels` to `true`, Prometheus will prefer label contents given by cert-manager on conflicts. Can be used to remove the "exported_namespace" label for example. | `false` | -| `podAnnotations` | Annotations to add to the cert-manager pod | `{}` | -| `deploymentAnnotations` | Annotations to add to the cert-manager deployment | `{}` | -| `podDisruptionBudget.enabled` | Adds a PodDisruptionBudget for the cert-manager deployment | `false` | -| `podDisruptionBudget.minAvailable` | Configures the minimum available pods for voluntary disruptions. Cannot used if `maxUnavailable` is set. | `1` | -| `podDisruptionBudget.maxUnavailable` | Configures the maximum unavailable pods for voluntary disruptions. Cannot used if `minAvailable` is set. | | -| `podDnsPolicy` | Optional cert-manager pod [DNS policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-policy) | | -| `podDnsConfig` | Optional cert-manager pod [DNS configurations](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pods-dns-config) | | -| `podLabels` | Labels to add to the cert-manager pod | `{}` | -| `serviceLabels` | Labels to add to the cert-manager controller service | `{}` | -| `serviceAnnotations` | Annotations to add to the cert-manager service | `{}` | -| `http_proxy` | Value of the `HTTP_PROXY` environment variable in the cert-manager pod | | -| `https_proxy` | Value of the `HTTPS_PROXY` environment variable in the cert-manager pod | | -| `no_proxy` | Value of the `NO_PROXY` environment variable in the cert-manager pod | | -| `dns01RecursiveNameservers` | Comma separated string with host and port of the recursive nameservers cert-manager should query | `` | -| `dns01RecursiveNameserversOnly` | Forces cert-manager to only use the recursive nameservers for verification. | `false` | -| `enableCertificateOwnerRef` | When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted | `false` | -| `webhook.replicaCount` | Number of cert-manager webhook replicas | `1` | -| `webhook.timeoutSeconds` | Seconds the API server should wait the webhook to respond before treating the call as a failure. | `10` | -| `webhook.podAnnotations` | Annotations to add to the webhook pods | `{}` | -| `webhook.podLabels` | Labels to add to the cert-manager webhook pod | `{}` | -| `webhook.serviceLabels` | Labels to add to the cert-manager webhook service | `{}` | -| `webhook.deploymentAnnotations` | Annotations to add to the webhook deployment | `{}` | -| `webhook.podDisruptionBudget.enabled` | Adds a PodDisruptionBudget for the cert-manager deployment | `false` | -| `webhook.podDisruptionBudget.minAvailable` | Configures the minimum available pods for voluntary disruptions. Cannot used if `maxUnavailable` is set. | `1` | -| `webhook.podDisruptionBudget.maxUnavailable` | Configures the maximum unavailable pods for voluntary disruptions. Cannot used if `minAvailable` is set. | | -| `webhook.mutatingWebhookConfigurationAnnotations` | Annotations to add to the mutating webhook configuration | `{}` | -| `webhook.validatingWebhookConfigurationAnnotations` | Annotations to add to the validating webhook configuration | `{}` | -| `webhook.serviceAnnotations` | Annotations to add to the webhook service | `{}` | -| `webhook.config` | WebhookConfiguration YAML used to configure flags for the webhook. Generates a ConfigMap containing contents of the field. See `values.yaml` for example. | `{}` | -| `webhook.extraArgs` | Optional flags for cert-manager webhook component | `[]` | -| `webhook.serviceAccount.create` | If `true`, create a new service account for the webhook component | `true` | -| `webhook.serviceAccount.name` | Service account for the webhook component to be used. If not set and `webhook.serviceAccount.create` is `true`, a name is generated using the fullname template | | -| `webhook.serviceAccount.annotations` | Annotations to add to the service account for the webhook component | | -| `webhook.serviceAccount.automountServiceAccountToken` | Automount API credentials for the webhook Service Account | | -| `webhook.resources` | CPU/memory resource requests/limits for the webhook pods | `{}` | -| `webhook.nodeSelector` | Node labels for webhook pod assignment | `{}` | -| `webhook.networkPolicy.enabled` | Enable default network policies for webhooks egress and ingress traffic | `false` | -| `webhook.networkPolicy.ingress` | Sets ingress policy block. See NetworkPolicy documentation. See `values.yaml` for example. | `{}` | -| `webhook.networkPolicy.egress` | Sets ingress policy block. See NetworkPolicy documentation. See `values.yaml` for example. | `{}` | -| `webhook.affinity` | Node affinity for webhook pod assignment | `{}` | -| `webhook.tolerations` | Node tolerations for webhook pod assignment | `[]` | -| `webhook.topologySpreadConstraints` | Topology spread constraints for webhook pod assignment | `[]` | -| `webhook.image.repository` | Webhook image repository | `quay.io/jetstack/cert-manager-webhook` | -| `webhook.image.tag` | Webhook image tag | `v1.12.3` | -| `webhook.image.pullPolicy` | Webhook image pull policy | `IfNotPresent` | -| `webhook.securePort` | The port that the webhook should listen on for requests. | `10250` | -| `webhook.securityContext` | Security context for webhook pod assignment | refer to [Default Security Contexts](#default-security-contexts) | -| `webhook.containerSecurityContext` | Security context to be set on the webhook component container | refer to [Default Security Contexts](#default-security-contexts) | -| `webhook.hostNetwork` | If `true`, run the Webhook on the host network. | `false` | -| `webhook.serviceType` | The type of the `Service`. | `ClusterIP` | -| `webhook.loadBalancerIP` | The specific load balancer IP to use (when `serviceType` is `LoadBalancer`). | | -| `webhook.url.host` | The host to use to reach the webhook, instead of using internal cluster DNS for the service. | | -| `webhook.livenessProbe.failureThreshold` | The liveness probe failure threshold | `3` | -| `webhook.livenessProbe.initialDelaySeconds` | The liveness probe initial delay (in seconds) | `60` | -| `webhook.livenessProbe.periodSeconds` | The liveness probe period (in seconds) | `10` | -| `webhook.livenessProbe.successThreshold` | The liveness probe success threshold | `1` | -| `webhook.livenessProbe.timeoutSeconds` | The liveness probe timeout (in seconds) | `1` | -| `webhook.readinessProbe.failureThreshold` | The readiness probe failure threshold | `3` | -| `webhook.readinessProbe.initialDelaySeconds` | The readiness probe initial delay (in seconds) | `5` | -| `webhook.readinessProbe.periodSeconds` | The readiness probe period (in seconds) | `5` | -| `webhook.readinessProbe.successThreshold` | The readiness probe success threshold | `1` | -| `webhook.readinessProbe.timeoutSeconds` | The readiness probe timeout (in seconds) | `1` | -| `cainjector.enabled` | Toggles whether the cainjector component should be installed (required for the webhook component to work) | `true` | -| `cainjector.replicaCount` | Number of cert-manager cainjector replicas | `1` | -| `cainjector.podAnnotations` | Annotations to add to the cainjector pods | `{}` | -| `cainjector.podLabels` | Labels to add to the cert-manager cainjector pod | `{}` | -| `cainjector.deploymentAnnotations` | Annotations to add to the cainjector deployment | `{}` | -| `cainjector.podDisruptionBudget.enabled` | Adds a PodDisruptionBudget for the cert-manager deployment | `false` | -| `cainjector.podDisruptionBudget.minAvailable` | Configures the minimum available pods for voluntary disruptions. Cannot used if `maxUnavailable` is set. | `1` | -| `cainjector.podDisruptionBudget.maxUnavailable` | Configures the maximum unavailable pods for voluntary disruptions. Cannot used if `minAvailable` is set. | | -| `cainjector.extraArgs` | Optional flags for cert-manager cainjector component | `[]` | -| `cainjector.serviceAccount.create` | If `true`, create a new service account for the cainjector component | `true` | -| `cainjector.serviceAccount.name` | Service account for the cainjector component to be used. If not set and `cainjector.serviceAccount.create` is `true`, a name is generated using the fullname template | | -| `cainjector.serviceAccount.annotations` | Annotations to add to the service account for the cainjector component | | -| `cainjector.serviceAccount.automountServiceAccountToken` | Automount API credentials for the cainjector Service Account | `true` | -| `cainjector.resources` | CPU/memory resource requests/limits for the cainjector pods | `{}` | -| `cainjector.nodeSelector` | Node labels for cainjector pod assignment | `{}` | -| `cainjector.affinity` | Node affinity for cainjector pod assignment | `{}` | -| `cainjector.tolerations` | Node tolerations for cainjector pod assignment | `[]` | -| `cainjector.topologySpreadConstraints` | Topology spread constraints for cainjector pod assignment | `[]` | -| `cainjector.image.repository` | cainjector image repository | `quay.io/jetstack/cert-manager-cainjector` | -| `cainjector.image.tag` | cainjector image tag | `v1.12.3` | -| `cainjector.image.pullPolicy` | cainjector image pull policy | `IfNotPresent` | -| `cainjector.securityContext` | Security context for cainjector pod assignment | refer to [Default Security Contexts](#default-security-contexts) | -| `cainjector.containerSecurityContext` | Security context to be set on cainjector component container | refer to [Default Security Contexts](#default-security-contexts) | -| `acmesolver.image.repository` | acmesolver image repository | `quay.io/jetstack/cert-manager-acmesolver` | -| `acmesolver.image.tag` | acmesolver image tag | `v1.12.3` | -| `acmesolver.image.pullPolicy` | acmesolver image pull policy | `IfNotPresent` | -| `startupapicheck.enabled` | Toggles whether the startupapicheck Job should be installed | `true` | -| `startupapicheck.securityContext` | Security context for startupapicheck pod assignment | refer to [Default Security Contexts](#default-security-contexts) | -| `startupapicheck.containerSecurityContext` | Security context to be set on startupapicheck component container | refer to [Default Security Contexts](#default-security-contexts) | -| `startupapicheck.timeout` | Timeout for 'kubectl check api' command | `1m` | -| `startupapicheck.backoffLimit` | Job backoffLimit | `4` | -| `startupapicheck.jobAnnotations` | Optional additional annotations to add to the startupapicheck Job | `{}` | -| `startupapicheck.podAnnotations` | Optional additional annotations to add to the startupapicheck Pods | `{}` | -| `startupapicheck.extraArgs` | Optional additional arguments for startupapicheck | `[]` | -| `startupapicheck.resources` | CPU/memory resource requests/limits for the startupapicheck pod | `{}` | -| `startupapicheck.nodeSelector` | Node labels for startupapicheck pod assignment | `{}` | -| `startupapicheck.affinity` | Node affinity for startupapicheck pod assignment | `{}` | -| `startupapicheck.tolerations` | Node tolerations for startupapicheck pod assignment | `[]` | -| `startupapicheck.podLabels` | Optional additional labels to add to the startupapicheck Pods | `{}` | -| `startupapicheck.image.repository` | startupapicheck image repository | `quay.io/jetstack/cert-manager-ctl` | -| `startupapicheck.image.tag` | startupapicheck image tag | `v1.12.3` | -| `startupapicheck.image.pullPolicy` | startupapicheck image pull policy | `IfNotPresent` | -| `startupapicheck.serviceAccount.create` | If `true`, create a new service account for the startupapicheck component | `true` | -| `startupapicheck.serviceAccount.name` | Service account for the startupapicheck component to be used. If not set and `startupapicheck.serviceAccount.create` is `true`, a name is generated using the fullname template | | -| `startupapicheck.serviceAccount.annotations` | Annotations to add to the service account for the startupapicheck component | | -| `startupapicheck.serviceAccount.automountServiceAccountToken` | Automount API credentials for the startupapicheck Service Account | `true` | -| `maxConcurrentChallenges` | The maximum number of challenges that can be scheduled as 'processing' at once | `60` | - -### Default Security Contexts - -The default pod-level and container-level security contexts, below, adhere to the [restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) Pod Security Standards policies. - -Default pod-level securityContext: -```yaml -runAsNonRoot: true -seccompProfile: - type: RuntimeDefault -``` - -Default containerSecurityContext: -```yaml -allowPrivilegeEscalation: false -capabilities: - drop: - - ALL -``` - -### Assigning Values - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. - -Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, - -```console -$ helm install my-release -f values.yaml . -``` -> **Tip**: You can use the default [values.yaml](https://github.com/cert-manager/cert-manager/blob/master/deploy/charts/cert-manager/values.yaml) - -## Contributing - -This chart is maintained at [github.com/cert-manager/cert-manager](https://github.com/cert-manager/cert-manager/tree/master/deploy/charts/cert-manager). +![Version: v1.12.4](https://img.shields.io/badge/Version-v1.12.4-informational?style=flat-square) ![AppVersion: v1.12.4](https://img.shields.io/badge/AppVersion-v1.12.4-informational?style=flat-square) + +A Helm chart for cert-manager + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Source Code + +* + +## Requirements + +Kubernetes: `>= 1.22.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| acmesolver.image.repository | string | `"quay.io/jetstack/cert-manager-acmesolver"` | | +| affinity | object | `{}` | | +| cainjector.affinity | object | `{}` | | +| cainjector.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| cainjector.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| cainjector.enabled | bool | `true` | | +| cainjector.extraArgs | list | `[]` | | +| cainjector.image.pullPolicy | string | `"IfNotPresent"` | | +| cainjector.image.repository | string | `"quay.io/jetstack/cert-manager-cainjector"` | | +| cainjector.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| cainjector.podDisruptionBudget.enabled | bool | `false` | | +| cainjector.podDisruptionBudget.minAvailable | int | `1` | | +| cainjector.podLabels | object | `{}` | | +| cainjector.replicaCount | int | `1` | | +| cainjector.resources | object | `{}` | | +| cainjector.securityContext.runAsNonRoot | bool | `true` | | +| cainjector.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| cainjector.serviceAccount.automountServiceAccountToken | bool | `true` | | +| cainjector.serviceAccount.create | bool | `true` | | +| cainjector.strategy | object | `{}` | | +| cainjector.tolerations | list | `[]` | | +| cainjector.topologySpreadConstraints | list | `[]` | | +| cainjector.volumeMounts | list | `[]` | | +| cainjector.volumes | list | `[]` | | +| clusterResourceNamespace | string | `""` | | +| containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| dns01RecursiveNameservers | string | `""` | | +| dns01RecursiveNameserversOnly | bool | `false` | | +| enableCertificateOwnerRef | bool | `false` | | +| extraArgs | list | `[]` | | +| extraEnv | list | `[]` | | +| featureGates | string | `""` | | +| global.commonLabels | object | `{}` | | +| global.imagePullSecrets | list | `[]` | | +| global.leaderElection.namespace | string | `"kube-system"` | | +| global.logLevel | int | `2` | | +| global.podSecurityPolicy.enabled | bool | `false` | | +| global.podSecurityPolicy.useAppArmor | bool | `true` | | +| global.priorityClassName | string | `""` | | +| global.rbac.aggregateClusterRoles | bool | `true` | | +| global.rbac.create | bool | `true` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"quay.io/jetstack/cert-manager-controller"` | | +| ingressShim | object | `{}` | | +| installCRDs | bool | `true` | | +| livenessProbe.enabled | bool | `false` | | +| livenessProbe.failureThreshold | int | `8` | | +| livenessProbe.initialDelaySeconds | int | `10` | | +| livenessProbe.periodSeconds | int | `10` | | +| livenessProbe.successThreshold | int | `1` | | +| livenessProbe.timeoutSeconds | int | `15` | | +| maxConcurrentChallenges | int | `60` | | +| namespace | string | `""` | | +| nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| podDisruptionBudget.enabled | bool | `false` | | +| podDisruptionBudget.minAvailable | int | `1` | | +| podLabels | object | `{}` | | +| prometheus.enabled | bool | `true` | | +| prometheus.servicemonitor.annotations | object | `{}` | | +| prometheus.servicemonitor.enabled | bool | `false` | | +| prometheus.servicemonitor.honorLabels | bool | `false` | | +| prometheus.servicemonitor.interval | string | `"60s"` | | +| prometheus.servicemonitor.labels | object | `{}` | | +| prometheus.servicemonitor.path | string | `"/metrics"` | | +| prometheus.servicemonitor.prometheusInstance | string | `"default"` | | +| prometheus.servicemonitor.scrapeTimeout | string | `"30s"` | | +| prometheus.servicemonitor.targetPort | int | `9402` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| securityContext.runAsNonRoot | bool | `true` | | +| securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| startupapicheck.affinity | object | `{}` | | +| startupapicheck.backoffLimit | int | `4` | | +| startupapicheck.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| startupapicheck.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| startupapicheck.enabled | bool | `true` | | +| startupapicheck.extraArgs | list | `[]` | | +| startupapicheck.image.pullPolicy | string | `"IfNotPresent"` | | +| startupapicheck.image.repository | string | `"quay.io/jetstack/cert-manager-ctl"` | | +| startupapicheck.jobAnnotations."helm.sh/hook" | string | `"post-install"` | | +| startupapicheck.jobAnnotations."helm.sh/hook-delete-policy" | string | `"before-hook-creation,hook-succeeded"` | | +| startupapicheck.jobAnnotations."helm.sh/hook-weight" | string | `"1"` | | +| startupapicheck.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| startupapicheck.podLabels | object | `{}` | | +| startupapicheck.rbac.annotations."helm.sh/hook" | string | `"post-install"` | | +| startupapicheck.rbac.annotations."helm.sh/hook-delete-policy" | string | `"before-hook-creation,hook-succeeded"` | | +| startupapicheck.rbac.annotations."helm.sh/hook-weight" | string | `"-5"` | | +| startupapicheck.resources | object | `{}` | | +| startupapicheck.securityContext.runAsNonRoot | bool | `true` | | +| startupapicheck.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| startupapicheck.serviceAccount.annotations."helm.sh/hook" | string | `"post-install"` | | +| startupapicheck.serviceAccount.annotations."helm.sh/hook-delete-policy" | string | `"before-hook-creation,hook-succeeded"` | | +| startupapicheck.serviceAccount.annotations."helm.sh/hook-weight" | string | `"-5"` | | +| startupapicheck.serviceAccount.automountServiceAccountToken | bool | `true` | | +| startupapicheck.serviceAccount.create | bool | `true` | | +| startupapicheck.timeout | string | `"1m"` | | +| startupapicheck.tolerations | list | `[]` | | +| startupapicheck.volumeMounts | list | `[]` | | +| startupapicheck.volumes | list | `[]` | | +| strategy | object | `{}` | | +| tolerations | list | `[]` | | +| topologySpreadConstraints | list | `[]` | | +| volumeMounts | list | `[]` | | +| volumes | list | `[]` | | +| webhook.affinity | object | `{}` | | +| webhook.config | string | `nil` | | +| webhook.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| webhook.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| webhook.extraArgs | list | `[]` | | +| webhook.hostNetwork | bool | `false` | | +| webhook.image.pullPolicy | string | `"IfNotPresent"` | | +| webhook.image.repository | string | `"quay.io/jetstack/cert-manager-webhook"` | | +| webhook.livenessProbe.failureThreshold | int | `3` | | +| webhook.livenessProbe.initialDelaySeconds | int | `60` | | +| webhook.livenessProbe.periodSeconds | int | `10` | | +| webhook.livenessProbe.successThreshold | int | `1` | | +| webhook.livenessProbe.timeoutSeconds | int | `1` | | +| webhook.networkPolicy.egress[0].ports[0].port | int | `80` | | +| webhook.networkPolicy.egress[0].ports[0].protocol | string | `"TCP"` | | +| webhook.networkPolicy.egress[0].ports[1].port | int | `443` | | +| webhook.networkPolicy.egress[0].ports[1].protocol | string | `"TCP"` | | +| webhook.networkPolicy.egress[0].ports[2].port | int | `53` | | +| webhook.networkPolicy.egress[0].ports[2].protocol | string | `"TCP"` | | +| webhook.networkPolicy.egress[0].ports[3].port | int | `53` | | +| webhook.networkPolicy.egress[0].ports[3].protocol | string | `"UDP"` | | +| webhook.networkPolicy.egress[0].ports[4].port | int | `6443` | | +| webhook.networkPolicy.egress[0].ports[4].protocol | string | `"TCP"` | | +| webhook.networkPolicy.egress[0].to[0].ipBlock.cidr | string | `"0.0.0.0/0"` | | +| webhook.networkPolicy.enabled | bool | `false` | | +| webhook.networkPolicy.ingress[0].from[0].ipBlock.cidr | string | `"0.0.0.0/0"` | | +| webhook.nodeSelector."kubernetes.io/os" | string | `"linux"` | | +| webhook.podDisruptionBudget.enabled | bool | `false` | | +| webhook.podDisruptionBudget.minAvailable | int | `1` | | +| webhook.podLabels | object | `{}` | | +| webhook.readinessProbe.failureThreshold | int | `3` | | +| webhook.readinessProbe.initialDelaySeconds | int | `5` | | +| webhook.readinessProbe.periodSeconds | int | `5` | | +| webhook.readinessProbe.successThreshold | int | `1` | | +| webhook.readinessProbe.timeoutSeconds | int | `1` | | +| webhook.replicaCount | int | `1` | | +| webhook.resources | object | `{}` | | +| webhook.securePort | int | `10250` | | +| webhook.securityContext.runAsNonRoot | bool | `true` | | +| webhook.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| webhook.serviceAccount.automountServiceAccountToken | bool | `true` | | +| webhook.serviceAccount.create | bool | `true` | | +| webhook.serviceLabels | object | `{}` | | +| webhook.serviceType | string | `"ClusterIP"` | | +| webhook.strategy | object | `{}` | | +| webhook.timeoutSeconds | int | `10` | | +| webhook.tolerations | list | `[]` | | +| webhook.topologySpreadConstraints | list | `[]` | | +| webhook.url | object | `{}` | | +| webhook.volumeMounts | list | `[]` | | +| webhook.volumes | list | `[]` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/common/README.md b/charts/common/README.md new file mode 100644 index 000000000..7327fd7c9 --- /dev/null +++ b/charts/common/README.md @@ -0,0 +1,21 @@ +# common + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![Type: library](https://img.shields.io/badge/Type-library-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) + +A Library Helm Chart for grouping common logic between stakewise charts. This chart is not deployable by itself. + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| exampleValue | string | `"common-chart"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/dirk/README.md b/charts/dirk/README.md new file mode 100644 index 000000000..8f8d69e31 --- /dev/null +++ b/charts/dirk/README.md @@ -0,0 +1,92 @@ +# dirk + +![Version: 1.1.1](https://img.shields.io/badge/Version-1.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v22.10.0](https://img.shields.io/badge/AppVersion-v22.10.0-informational?style=flat-square) + +A Helm chart for installing and configuring large scale ETH staking infrastructure on top of the Kubernetes + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| file://../common | common | 1.0.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| cliImage | object | `{"pullPolicy":"IfNotPresent","repository":"nethermindeth/keystores-cli","tag":"v1.0.0"}` | CLI image is used to fetch private keys. | +| dirk.clientName | string | `"client1"` | | +| dirk.fetchKeysExtraFlags | list | `[]` | | +| dirk.loglevel | string | `"Debug"` | | +| dirk.tracing | string | `nil` | | +| externalSecrets.dataFrom.key | string | `"dirk"` | | +| externalSecrets.enabled | bool | `false` | | +| externalSecrets.secretStoreRef.kind | string | `"SecretStore"` | | +| externalSecrets.secretStoreRef.name | string | `"secretStoreRef"` | | +| fullnameOverride | string | `""` | Provide a name to substitute for the full names of resources | +| global.podSecurityContext | object | `{"fsGroup":10000,"runAsNonRoot":true,"runAsUser":10000}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| global.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| global.securityContext.fsGroup | int | `10000` | | +| global.securityContext.readOnlyRootFilesystem | bool | `true` | | +| global.securityContext.runAsNonRoot | bool | `true` | | +| global.securityContext.runAsUser | int | `10000` | | +| global.serviceAccount.create | bool | `true` | | +| httpPort | int | `8881` | Port on which dirk HTTP listens. | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"attestant/dirk"` | | +| image.tag | string | `"1.2.0"` | Overrides the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | | +| initImage | object | `{"pullPolicy":"IfNotPresent","repository":"bash","tag":"5.2"}` | Init image is used to chown data volume, etc. | +| livenessProbe.exec.command[0] | string | `"/bin/bash"` | | +| livenessProbe.exec.command[1] | string | `"/scripts/liveness_probe.sh"` | | +| livenessProbe.failureThreshold | int | `1` | | +| livenessProbe.periodSeconds | int | `90` | | +| loggingLevel | string | `"INFO"` | Sets logging verbosity. Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. | +| metricsPort | int | `9000` | The port (TCP) on which Prometheus accesses metrics | +| minReadySeconds | int | `10` | | +| nameOverride | string | `""` | Provide a name in place of operator for `app:` labels | +| nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| persistence.accessModes[0] | string | `"ReadWriteOnce"` | | +| persistence.annotations | object | `{}` | | +| persistence.size | string | `"1Gi"` | | +| persistence.storageClassName | string | `""` | | +| podAnnotations | object | `{}` | | +| readinessProbe.enabled | bool | `true` | | +| readinessProbe.failureThreshold | int | `3` | | +| readinessProbe.httpGet.path | string | `"/metrics"` | | +| readinessProbe.httpGet.port | string | `"metrics"` | | +| readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| readinessProbe.initialDelaySeconds | int | `10` | | +| readinessProbe.periodSeconds | int | `1` | | +| readinessProbe.successThreshold | int | `3` | | +| readinessProbe.timeoutSeconds | int | `3` | | +| replicaCount | int | `3` | | +| resources | object | `{}` | Configure resource requests and limits. ref: http://kubernetes.io/docs/user-guide/compute-resources/ | +| service.httpPort | int | `8881` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount | object | `{"annotations":{},"name":""}` | ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabellings":[],"scrapeTimeout":""}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| serviceMonitor.relabellings | list | `[]` | RelabelConfigs to apply to samples before scraping. | +| serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/dirk/values.yaml b/charts/dirk/values.yaml index 06da1b53a..a67b17348 100644 --- a/charts/dirk/values.yaml +++ b/charts/dirk/values.yaml @@ -5,9 +5,9 @@ global: serviceAccount: create: true - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## + # -- Pod Security Context + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + # podSecurityContext: runAsNonRoot: true runAsUser: 10000 @@ -27,18 +27,18 @@ replicaCount: 3 image: repository: attestant/dirk pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. + # -- Overrides the image tag whose default is the chart appVersion. tag: "1.2.0" -## Init image is used to chown data volume, etc. -## +# -- Init image is used to chown data volume, etc. +# initImage: repository: bash tag: "5.2" pullPolicy: IfNotPresent -## CLI image is used to fetch private keys. -## +# -- CLI image is used to fetch private keys. +# cliImage: repository: nethermindeth/keystores-cli tag: "v1.0.0" @@ -61,24 +61,24 @@ dirk: imagePullSecrets: [] -## Provide a name in place of operator for `app:` labels -## +# -- Provide a name in place of operator for `app:` labels +# nameOverride: "" -## Provide a name to substitute for the full names of resources -## +# -- Provide a name to substitute for the full names of resources +# fullnameOverride: "" -## Sets logging verbosity. -## Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. +# -- Sets logging verbosity. +# Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. loggingLevel: "INFO" -## Port on which dirk HTTP listens. -## +# -- Port on which dirk HTTP listens. +# httpPort: 8881 -## The port (TCP) on which Prometheus accesses metrics -## +# -- The port (TCP) on which Prometheus accesses metrics +# metricsPort: 9000 readinessProbe: @@ -111,12 +111,12 @@ livenessProbe: failureThreshold: 1 -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" @@ -126,9 +126,9 @@ service: type: ClusterIP httpPort: 8881 -## Configure resource requests and limits. -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## +# -- Configure resource requests and limits. +# ref: http://kubernetes.io/docs/user-guide/compute-resources/ +# resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -141,47 +141,47 @@ resources: {} # cpu: 100m # memory: 128Mi -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## +# -- Node labels for pod assignment +# ref: https://kubernetes.io/docs/user-guide/node-selection/ +# nodeSelector: {} -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## +# -- Tolerations for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +# tolerations: {} -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## +# -- Affinity for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} -## Prometheus Service Monitor -## ref: https://github.com/coreos/prometheus-operator -## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint -## +# -- Prometheus Service Monitor +# ref: https://github.com/coreos/prometheus-operator +# https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint +# serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created + # namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped + # interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended + # scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- RelabelConfigs to apply to samples before scraping. + # relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. + # metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint + # honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + # additionalLabels: {} diff --git a/charts/ethereum-node/README.md b/charts/ethereum-node/README.md new file mode 100644 index 000000000..12031f604 --- /dev/null +++ b/charts/ethereum-node/README.md @@ -0,0 +1,90 @@ +# ethereum-node + +![Version: 0.0.20](https://img.shields.io/badge/Version-0.0.20-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) + +This chart acts as an umbrella chart and allows to run a ethereum execution and consensus layer client. It's also able to deploy optional monitoring applications. + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://ethpandaops.github.io/ethereum-helm-charts | nethermind | 1.0.9 | +| https://ethpandaops.github.io/ethereum-helm-charts | prysm | 1.1.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| ethereum-metrics-exporter.config.consensus.enabled | bool | `true` | | +| ethereum-metrics-exporter.config.consensus.name | string | `"consensus-client"` | | +| ethereum-metrics-exporter.config.consensus.url | string | `"{{ tpl .Values.global.main.beaconEndpoint . }}"` | | +| ethereum-metrics-exporter.config.execution.enabled | bool | `true` | | +| ethereum-metrics-exporter.config.execution.modules[0] | string | `"eth"` | | +| ethereum-metrics-exporter.config.execution.modules[1] | string | `"net"` | | +| ethereum-metrics-exporter.config.execution.modules[2] | string | `"web3"` | | +| ethereum-metrics-exporter.config.execution.name | string | `"execution-client"` | | +| ethereum-metrics-exporter.config.execution.url | string | `"{{ tpl .Values.global.main.rpcEndpoint . }}"` | | +| ethereum-metrics-exporter.enabled | bool | `false` | | +| ethereum-metrics-exporter.nameOverride | string | `"metrics-exporter"` | | +| global.checkpointSync.addresses.goerli | string | `"https://checkpoint-sync.goerli.ethpandaops.io"` | | +| global.checkpointSync.addresses.mainnet | string | `"https://mainnet-checkpoint-sync.attestant.io"` | | +| global.checkpointSync.addresses.sepolia | string | `"https://checkpoint-sync.sepolia.ethpandaops.io"` | | +| global.checkpointSync.enabled | bool | `true` | | +| global.clientArgs.networks.goerli.consensus.prysm[0] | string | `"--goerli"` | | +| global.clientArgs.networks.goerli.execution.nethermind[0] | string | `"--config=goerli"` | | +| global.clientArgs.networks.mainnet.consensus.prysm | list | `[]` | | +| global.clientArgs.networks.mainnet.execution.nethermind[0] | string | `"--Pruning.Mode=Hybrid"` | | +| global.clientArgs.networks.mainnet.execution.nethermind[1] | string | `"--Pruning.FullPruningTrigger=VolumeFreeSpace"` | | +| global.clientArgs.networks.mainnet.execution.nethermind[2] | string | `"--Pruning.FullPruningThresholdMb=256000"` | | +| global.clientArgs.networks.mainnet.execution.nethermind[3] | string | `"--Pruning.AvailableSpaceCheckEnabled=false"` | | +| global.clientArgs.networks.mainnet.execution.nethermind[4] | string | `"--Sync.NonValidatorNode=true"` | | +| global.clientArgs.networks.mainnet.execution.nethermind[5] | string | `"--Sync.DownloadBodiesInFastSync=false"` | | +| global.clientArgs.networks.mainnet.execution.nethermind[6] | string | `"--Sync.DownloadReceiptsInFastSync=false"` | | +| global.clientArgs.networks.sepolia.consensus.prysm[0] | string | `"--sepolia"` | | +| global.clientArgs.networks.sepolia.execution.nethermind[0] | string | `"--config=sepolia"` | | +| global.main.beaconEndpoint | string | `"http://{{ .Release.Name }}-beacon:5052"` | | +| global.main.engineEndpoint | string | `"http://{{ .Release.Name }}-execution:8551"` | | +| global.main.env | string | `"staging"` | | +| global.main.network | string | `"mainnet"` | | +| global.main.rpcEndpoint | string | `"http://{{ .Release.Name }}-execution:8545"` | | +| global.secretStore.gcp.clusterLocation | string | `"dummy-cluster-location"` | | +| global.secretStore.gcp.clusterName | string | `"dummy-cluster-name"` | | +| global.secretStore.gcp.projectID | string | `"dummy-project-id"` | | +| global.secretStore.gcp.serviceAccountRef.name | string | `"dummy-service-account"` | | +| global.secretStore.gcp.serviceAccountRef.namespace | string | `"dummy-namespace"` | | +| global.secretStore.refreshInterval | string | `"10m"` | | +| global.secretStore.remoteRef.key | string | `"dummy-key"` | | +| global.secretStore.remoteRef.property | string | `"dummy-property"` | | +| global.secretStore.remoteRef.version | string | `"1"` | | +| nethermind.enabled | bool | `true` | | +| nethermind.extraArgs[0] | string | `"{{- with( index .Values.global.clientArgs.networks .Values.global.main.network ) }}\n {{- range $i, $v := .execution.nethermind }}\n {{- if (eq $i 0) }}\n {{- $v }}\n {{- else }}\n {{ $v }}\n {{- end }}\n {{- end -}}\n{{- end }}"` | | +| nethermind.httpPort | int | `8545` | | +| nethermind.nameOverride | string | `"execution"` | | +| nethermind.p2pPort | int | `30303` | | +| prysm.checkpointSync.enabled | string | `"{{ default .Values.global.checkpointSync.enabled false }}"` | | +| prysm.checkpointSync.url | string | `"{{ index .Values.global.checkpointSync.addresses .Values.global.main.network }}"` | | +| prysm.enabled | bool | `true` | | +| prysm.extraArgs[0] | string | `"--execution-endpoint={{ tpl .Values.global.main.engineEndpoint . }} {{- with( index .Values.global.clientArgs.networks .Values.global.main.network ) }}\n {{- range .consensus.prysm }}\n {{ . }}\n {{- end -}}\n{{- end -}}"` | | +| prysm.httpPort | int | `5052` | | +| prysm.nameOverride | string | `"beacon"` | | +| prysm.p2pPort | int | `9000` | | +| prysm.persistence.accessModes | list | `["ReadWriteOnce"]` | Use an existing PVC when persistence.enabled | +| prysm.persistence.annotations | object | `{}` | Annotations for volume claim template | +| prysm.persistence.enabled | bool | `true` | Uses an EmptyDir when not enabled | +| prysm.persistence.selector | object | `{}` | Selector for volume claim template | +| prysm.persistence.size | string | `"100Gi"` | Requested size for volume claim template | +| prysm.persistence.storageClassName | string | `"standard-rwo"` | Use a specific storage class E.g 'local-path' for local storage to achieve best performance Read more (https://github.com/rancher/local-path-provisioner) | +| prysm.resources.limits.cpu | int | `4` | | +| prysm.resources.limits.memory | string | `"4Gi"` | | +| prysm.resources.requests.cpu | int | `2` | | +| prysm.resources.requests.memory | string | `"2Gi"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/ethereum-node/charts/nethermind/README.md b/charts/ethereum-node/charts/nethermind/README.md index ea38a27af..c4f3ad69e 100644 --- a/charts/ethereum-node/charts/nethermind/README.md +++ b/charts/ethereum-node/charts/nethermind/README.md @@ -35,8 +35,7 @@ Nethermind is an Ethereum execution layer implementation created with the C# .NE | imagePullSecrets | list | `[]` | Image pull secrets for Docker images | | ingress.annotations | object | `{}` | Annotations for Ingress | | ingress.enabled | bool | `false` | Ingress resource for the HTTP API | -| ingress.hosts[0].host | string | `"chart-example.local"` | | -| ingress.hosts[0].paths | list | `[]` | | +| ingress.hosts | list | `[{"host":"chart-example.local","paths":[]}]` | Ingress host | | ingress.tls | list | `[]` | Ingress TLS | | initChownData.enabled | bool | `true` | Init container to set the correct permissions to access data directories | | initChownData.image.pullPolicy | string | `"IfNotPresent"` | Container pull policy | @@ -92,8 +91,8 @@ Nethermind is an Ethereum execution layer implementation created with the C# .NE | serviceMonitor.scrapeTimeout | string | `"30s"` | ServiceMonitor scrape timeout | | serviceMonitor.tlsConfig | object | `{}` | ServiceMonitor TLS configuration | | terminationGracePeriodSeconds | int | `300` | How long to wait until the pod is forcefully terminated | -| tolerations | list | `[]` | Tolerations for pods | -| topologySpreadConstraints | list | `[]` | Topology Spread Constraints for pods | +| tolerations | list | `[]` | Tolerations for pods # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| topologySpreadConstraints | list | `[]` | Topology Spread Constraints for pods # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | updateStrategy | object | `{"type":"RollingUpdate"}` | Update stategy for the Statefulset | | updateStrategy.type | string | `"RollingUpdate"` | Update stategy type | | wsPort | int | `8545` | WS Port | diff --git a/charts/ethereum-node/charts/prysm/README.md b/charts/ethereum-node/charts/prysm/README.md index 25f48b20d..6d54451a6 100644 --- a/charts/ethereum-node/charts/prysm/README.md +++ b/charts/ethereum-node/charts/prysm/README.md @@ -31,13 +31,12 @@ An open-source Ethereum 2.0 client, written in Go | fullnameOverride | string | `""` | Overrides the chart's computed fullname | | httpPort | int | `3500` | HTTP Port | | image.pullPolicy | string | `"IfNotPresent"` | Prysm container pull policy | -| image.repository | string | `"ethpandaops/prysm"` | Prysm container image repository | -| image.tag | string | `"master"` | | +| image.repository | string | `"gcr.io/prylabs-dev/prysm/beacon-chain"` | Prysm container image repository repository: gcr.io/prysmaticlabs/prysm/beacon-chain | +| image.tag | string | `"v4.1.1"` | Prysm container image tag | | imagePullSecrets | list | `[]` | Image pull secrets for Docker images | | ingress.annotations | object | `{}` | Annotations for Ingress | | ingress.enabled | bool | `false` | Ingress resource for the HTTP API | -| ingress.hosts[0].host | string | `"chart-example.local"` | | -| ingress.hosts[0].paths | list | `[]` | | +| ingress.hosts | list | `[{"host":"chart-example.local","paths":[]}]` | Ingress host | | ingress.tls | list | `[]` | Ingress TLS | | initChownData.enabled | bool | `true` | Init container to set the correct permissions to access data directories | | initChownData.image.pullPolicy | string | `"IfNotPresent"` | Container pull policy | @@ -45,7 +44,6 @@ An open-source Ethereum 2.0 client, written in Go | initChownData.image.tag | string | `"1.34.0"` | Container tag | | initChownData.resources | object | `{}` | Resource requests and limits | | initContainers | list | `[]` | Additional init containers | -| jwt | string | `"ecb22bc24e7d4061f7ed690ccd5846d7d73f5d2b9733267e12f56790398d908a"` | JWT secret used by client as a secret. Change this value. | | livenessProbe | object | See `values.yaml` | Liveness probe | | metricsPort | int | `8080` | Metrics Port | | mode | string | `"beacon"` | Mode can be 'beacon' or 'validator' | @@ -92,8 +90,8 @@ An open-source Ethereum 2.0 client, written in Go | serviceMonitor.scrapeTimeout | string | `"30s"` | ServiceMonitor scrape timeout | | serviceMonitor.tlsConfig | object | `{}` | ServiceMonitor TLS configuration | | terminationGracePeriodSeconds | int | `300` | How long to wait until the pod is forcefully terminated | -| tolerations | list | `[]` | Tolerations for pods | -| topologySpreadConstraints | list | `[]` | Topology Spread Constraints for pods | +| tolerations | list | `[]` | Tolerations for pods # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| topologySpreadConstraints | list | `[]` | Topology Spread Constraints for pods # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ | | updateStrategy | object | `{"type":"RollingUpdate"}` | Update stategy for the Statefulset | | updateStrategy.type | string | `"RollingUpdate"` | Update stategy type | diff --git a/charts/execution-beacon/README.md b/charts/execution-beacon/README.md new file mode 100644 index 000000000..859233aef --- /dev/null +++ b/charts/execution-beacon/README.md @@ -0,0 +1,204 @@ +# execution-beacon + +![Version: 1.1.0](https://img.shields.io/badge/Version-1.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.0.1](https://img.shields.io/badge/AppVersion-0.0.1-informational?style=flat-square) + +A Helm chart that combines Kubernetes manifests and scripts to deploy Ethereum full-nodes without validators with top-notch performance as the goal. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | +| AntiD2ta | | | + +## Requirements + +Kubernetes: `^1.23.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| file://../common | common | 1.0.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| beacon.builderEndpoint | string | `""` | MEV Boost endpoint | +| beacon.checkPointSync | object | `{"enabled":true,"trustedSourceUrl":"","url":"https://mainnet-checkpoint-sync.attestant.io"}` | To get Beacon node up and running in only a few minutes from a recent finalized checkpoint state rather than syncing from genesis. | +| beacon.client | string | `"nimbus"` | | +| beacon.extraFlags | list | `[]` | Extra flags to pass to the node | +| beacon.grpc.enabled | bool | `true` | | +| beacon.grpc.host | string | `"0.0.0.0"` | | +| beacon.grpc.port | int | `4000` | | +| beacon.grpc.portName | string | `"rpc"` | | +| beacon.initChownData | bool | `true` | If false, data ownership will not be reset at startup This allows the beacon node to be run with an arbitrary user | +| beacon.javaOpts | object | `{"enabled":true,"maxHeapSize":"-Xmx3g"}` | Teku specific setting | +| beacon.javaOpts.maxHeapSize | string | `"-Xmx3g"` | This option is used to set java specific values for heap size and should be used if you experience out of memory errors. The Xmx option stands for the maximum memory allocation pool for a Java Virtual Machine ( JVM ). https://besu.hyperledger.org/en/stable/public-networks/how-to/configure-jvm/manage-memory/ | +| beacon.metrics.annotations | object | `{}` | | +| beacon.metrics.categories | list | `["JVM","PROCESS","BEACON","DISCOVERY","EVENTBUS","EXECUTOR","NETWORK","STORAGE","STORAGE_HOT_DB","STORAGE_FINALIZED_DB","REMOTE_VALIDATOR","VALIDATOR","VALIDATOR_PERFORMANCE"]` | Monitoring Teku Metric categories to enable | +| beacon.metrics.enabled | bool | `true` | Whether to enable metrics collection or not | +| beacon.metrics.host | string | `"0.0.0.0"` | | +| beacon.metrics.hostAllowList | list | `["*"]` | List of hostnames to allow, or * to allow any host | +| beacon.metrics.port | int | `9090` | | +| beacon.metrics.prometheusRule | object | `{"enabled":true}` | Custom PrometheusRule to be defined ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions | +| beacon.metrics.prometheusRule.enabled | bool | `true` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | +| beacon.metrics.serviceMonitor | object | `{"enabled":true}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| beacon.metrics.serviceMonitor.enabled | bool | `true` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| beacon.persistence | object | `{"accessModes":["ReadWriteOnce"],"annotations":{},"enabled":true,"size":"100Gi","storageClassName":""}` | Whether or not to allocate persistent volume disk for the data directory. In case of node failure, the node data directory will still persist. | +| beacon.proposerOnly | bool | `false` | Lighthouse specific setting | +| beacon.resources | object | `{}` | | +| beacon.restApi | object | `{"corsOrigins":["*"],"enabled":true,"host":"0.0.0.0","hostAllowList":["*"],"portMap":{"lighthouse":5052,"lodestar":9596,"nimbus":5052,"prysm":8080,"teku":5051}}` | Rest API Settings | +| beacon.restApi.enabled | bool | `true` | Enables Beacon Rest API | +| beacon.restApi.hostAllowList | list | `["*"]` | Comma-separated list of hostnames to allow, or * to allow any host | +| beacon.restApi.portMap | object | `{"lighthouse":5052,"lodestar":9596,"nimbus":5052,"prysm":8080,"teku":5051}` | Port number of Beacon Rest API | +| beacon.suggestedFeeRecipient | string | `""` | Post bellatrix, this address will receive the transaction fees produced by any blocks from this node. Default to junk whilst bellatrix is in development state. Validator client can override this value through the preparebeaconproposer api. | +| beacon.targetPeers | int | `50` | | +| beacon.targetPeersMin | int | `40` | | +| beacon.totalDifficultyOverride | string | `""` | Sets the total difficulty to manual overrides the default TERMINAL_TOTAL_DIFFICULTY value. WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal difficulty. Incorrect usage will result in your node experience consensus failure. | +| execution.client | string | `"nethermind"` | | +| execution.extraFlags | list | `[]` | Extra flags to pass to the node | +| execution.healthchecks | object | `{"enabled":true,"lowStorageSpaceShutdownThreshold":0,"lowStorageSpaceWarningThreshold":5,"pollingInterval":5,"slug":"/health"}` | Nethermind HealthChecks module | +| execution.initChownData | bool | `true` | If false, data ownership will not be reset at startup This allows the execution node to be run with an arbitrary user | +| execution.javaOpts | object | `{"enabled":false,"maxHeapSize":""}` | Besu specific setting | +| execution.javaOpts.maxHeapSize | string | `""` | This option is used to set java specific values for heap size and should be used if you experience out of memory errors. The Xmx option stands for the maximum memory allocation pool for a Java Virtual Machine ( JVM ). https://besu.hyperledger.org/en/stable/public-networks/how-to/configure-jvm/manage-memory/ | +| execution.jsonrpc.enabled | bool | `true` | | +| execution.jsonrpc.engine.corsOrigins[0] | string | `"*"` | | +| execution.jsonrpc.engine.hostAllowList[0] | string | `"*"` | | +| execution.jsonrpc.engine.port | int | `8551` | | +| execution.jsonrpc.grpc.port | int | `9090` | | +| execution.jsonrpc.host | string | `"0.0.0.0"` | | +| execution.jsonrpc.http.corsOrigins[0] | string | `"*"` | | +| execution.jsonrpc.http.hostAllowList[0] | string | `"*"` | | +| execution.jsonrpc.http.port | int | `8545` | | +| execution.jsonrpc.namespaces.erigon[0] | string | `"eth"` | | +| execution.jsonrpc.namespaces.erigon[1] | string | `"erigon"` | | +| execution.jsonrpc.namespaces.erigon[2] | string | `"web3"` | | +| execution.jsonrpc.namespaces.erigon[3] | string | `"net"` | | +| execution.jsonrpc.namespaces.erigon[4] | string | `"engine"` | | +| execution.jsonrpc.namespaces.geth[0] | string | `"web3"` | | +| execution.jsonrpc.namespaces.geth[1] | string | `"eth"` | | +| execution.jsonrpc.namespaces.geth[2] | string | `"net"` | | +| execution.jsonrpc.namespaces.geth[3] | string | `"engine"` | | +| execution.jsonrpc.namespaces.nethermind[0] | string | `"Web3"` | | +| execution.jsonrpc.namespaces.nethermind[1] | string | `"Eth"` | | +| execution.jsonrpc.namespaces.nethermind[2] | string | `"Net"` | | +| execution.jsonrpc.namespaces.nethermind[3] | string | `"Subscribe"` | | +| execution.jsonrpc.namespaces.nethermind[4] | string | `"Health"` | | +| execution.jsonrpc.websocket.enabled | bool | `true` | | +| execution.jsonrpc.websocket.origins | string | `"*"` | | +| execution.jsonrpc.websocket.port | int | `8546` | | +| execution.metrics | object | `{"enabled":true,"host":"0.0.0.0","port":8008,"prometheusRule":{"enabled":true},"serviceMonitor":{"enabled":true}}` | Monitoring Additional settings could be made in non-global section. | +| execution.metrics.enabled | bool | `true` | Whether to enable metrics collection or not | +| execution.metrics.prometheusRule | object | `{"enabled":true}` | Custom PrometheusRule to be defined ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions | +| execution.metrics.prometheusRule.enabled | bool | `true` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | +| execution.metrics.serviceMonitor | object | `{"enabled":true}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| execution.metrics.serviceMonitor.enabled | bool | `true` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| execution.persistence | object | `{"accessModes":["ReadWriteOnce"],"annotations":{},"enabled":true,"size":"100Gi","storageClassName":""}` | Whether or not to allocate persistent volume disk for the data directory. In case of node failure, the node data directory will still persist. | +| execution.privateApiAddr | string | `"127.0.0.1:9090"` | Private api network address, for example: 127.0.0.1:9090, empty string means not to start the listener. Do not expose to public network. Serves remote database interface (default: "127.0.0.1:9090") | +| execution.resources | object | `{}` | | +| execution.targetPeers | int | `50` | | +| execution.terminalTotalDifficulty | string | `""` | Manually specify TerminalTotalDifficulty, overriding the bundled setting | +| fullnameOverride | string | `""` | Provide a name to substitute for the full names of resources | +| global.JWTSecret | string | `""` | JSON Web Token (JWT) authentication is used to secure the communication between the beacon node and execution client. You can generate a JWT using a command line tool, for example: openssl rand -hex 32 > token.txt | +| global.affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| global.ethsider.bindAddr | int | `3000` | | +| global.ethsider.enabled | bool | `true` | | +| global.ethsider.livenessProbe | object | `{"enabled":false,"failureThreshold":3,"httpGet":{"path":"/liveness","port":"sidecar","scheme":"HTTP"},"initialDelaySeconds":10,"periodSeconds":1,"successThreshold":3,"timeoutSeconds":3}` | Configure liveness and readiness probes ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ NB! readinessProbe and livenessProbe must be disabled before genesis | +| global.ethsider.pullPolicy | string | `"IfNotPresent"` | | +| global.ethsider.readinessProbe.enabled | bool | `true` | | +| global.ethsider.readinessProbe.failureThreshold | int | `3` | | +| global.ethsider.readinessProbe.httpGet.path | string | `"/readiness"` | | +| global.ethsider.readinessProbe.httpGet.port | string | `"sidecar"` | | +| global.ethsider.readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| global.ethsider.readinessProbe.initialDelaySeconds | int | `10` | | +| global.ethsider.readinessProbe.periodSeconds | int | `1` | | +| global.ethsider.readinessProbe.successThreshold | int | `3` | | +| global.ethsider.readinessProbe.timeoutSeconds | int | `3` | | +| global.ethsider.repository | string | `"nethermindeth/ethsider"` | | +| global.ethsider.tag | string | `"v1.0.0"` | | +| global.externalSecrets.data | list | `[]` | | +| global.externalSecrets.enabled | bool | `false` | | +| global.externalSecrets.secretStoreRef.kind | string | `"SecretStore"` | | +| global.externalSecrets.secretStoreRef.name | string | `"secretStoreRef"` | | +| global.image.beacon.lighthouse.repository | string | `"sigp/lighthouse"` | | +| global.image.beacon.lighthouse.tag | string | `"v4.5.0"` | | +| global.image.beacon.lodestar.repository | string | `"chainsafe/lodestar"` | | +| global.image.beacon.lodestar.tag | string | `"v1.13.0"` | | +| global.image.beacon.nimbus.repository | string | `"statusim/nimbus-eth2"` | | +| global.image.beacon.nimbus.tag | string | `"multiarch-v24.1.0"` | | +| global.image.beacon.prysm.repository | string | `"gcr.io/prylabs-dev/prysm/beacon-chain"` | | +| global.image.beacon.prysm.tag | string | `"v4.1.1"` | | +| global.image.beacon.teku.repository | string | `"consensys/teku"` | | +| global.image.beacon.teku.tag | string | `"23.12.1"` | | +| global.image.execution.bseu.repository | string | `"hyperledger/besu"` | | +| global.image.execution.bseu.tag | string | `"23.10.2"` | | +| global.image.execution.erigon.repository | string | `"thorax/erigon"` | | +| global.image.execution.erigon.tag | string | `"v2.55.1"` | | +| global.image.execution.geth.repository | string | `"ethereum/client-go"` | | +| global.image.execution.geth.tag | string | `"v1.13.8"` | | +| global.image.execution.nethermind.repository | string | `"nethermind/nethermind"` | | +| global.image.execution.nethermind.tag | string | `"1.24.0"` | | +| global.image.imagePullPolicy | string | `"IfNotPresent"` | | +| global.imagePullSecrets | list | `[]` | Credentials to fetch images from private registry ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| global.ingress.annotations | object | `{}` | | +| global.ingress.enabled | bool | `false` | | +| global.ingress.hosts | list | `[]` | Hostnames. Must be provided if Ingress is enabled. | +| global.ingress.ingressClassName | string | `"nginx"` | | +| global.ingress.labels | object | `{}` | | +| global.ingress.paths | list | `[]` | Paths to use for ingress rules By default, the Service created by this chart is used as the target Service for the Ingress. If not defined the following default object will be used: - path: "/" port: 8545 pathType: "ImplementationSpecific" serviceName: "" | +| global.ingress.routePrefix | string | `"/"` | Route Prefix. Can skip it if any item of path has the path defined. | +| global.ingress.tls | list | `[]` | TLS configuration for Ingress Secret must be manually created in the namespace | +| global.initImage | object | `{"pullPolicy":"IfNotPresent","repository":"bitnami/kubectl","tag":"1.28"}` | Init image is used to chown data volume, initialise genesis, etc. | +| global.metrics | object | `{"annotations":{},"enabled":true,"prometheusRule":{"additionalLabels":{},"default":true,"namespace":"","rules":[]},"serviceMonitor":{"additionalLabels":{},"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabellings":[],"scrapeTimeout":""}}` | Monitoring | +| global.metrics.prometheusRule | object | `{"additionalLabels":{},"default":true,"namespace":"","rules":[]}` | Custom PrometheusRule to be defined ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions | +| global.metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels for the prometheusRule | +| global.metrics.prometheusRule.default | bool | `true` | Create a default set of Alerts | +| global.metrics.prometheusRule.namespace | string | `""` | The namespace in which the prometheusRule will be created | +| global.metrics.prometheusRule.rules | list | `[]` | Custom Prometheus rules | +| global.metrics.serviceMonitor | object | `{"additionalLabels":{},"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabellings":[],"scrapeTimeout":""}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| global.metrics.serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| global.metrics.serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| global.metrics.serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| global.metrics.serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| global.metrics.serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| global.metrics.serviceMonitor.relabellings | list | `[]` | Metrics RelabelConfigs to apply to samples before scraping. | +| global.metrics.serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| global.network | string | `"mainnet"` | Ethereum network | +| global.nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| global.p2pNodePort | object | `{"annotations":{},"enabled":false,"replicaToNodePort":{},"startAtBeacon":31200,"startAtExecution":31100,"type":"NodePort"}` | When p2pNodePort is enabled, your P2P port will be exposed via service type NodePort. This will generate a service for each replica, with a port binding via NodePort. This is useful if you want to expose and announce your node to the Internet. | +| global.p2pNodePort.enabled | bool | `false` | Expose P2P port via NodePort | +| global.p2pNodePort.replicaToNodePort | object | See `values.yaml` for example | Overwrite a port for specific replicas | +| global.p2pNodePort.startAtExecution | int | `31100` | The ports allocation will start from this value | +| global.p2pNodePort.type | string | `"NodePort"` | Options: NodePort, LoadBalancer | +| global.podSecurityContext | object | `{"fsGroup":10000,"runAsNonRoot":true,"runAsUser":10000}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| global.priorityClassName | string | `""` | Used to assign priority to pods ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ | +| global.rbac | object | `{"create":true}` | RBAC configuration. ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ | +| global.replicaCount | int | `1` | | +| global.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| global.securityContext.readOnlyRootFilesystem | bool | `true` | | +| global.securityContext.runAsNonRoot | bool | `true` | | +| global.securityContext.runAsUser | int | `10000` | | +| global.service.svcHeadless | bool | `true` | | +| global.service.type | string | `"ClusterIP"` | | +| global.serviceAccount | object | `{"create":true}` | Service account ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| global.sessionAffinity.enabled | bool | `false` | Whether to enable session affinity or not | +| global.sessionAffinity.timeoutSeconds | int | `86400` | The session duration in seconds | +| global.sharedPersistence | object | `{"accessModes":["ReadWriteOnce"],"annotations":{},"enabled":false,"size":"300Gi","storageClassName":""}` | Whether or not to allocate persistent volume disk for the data directory. In case of node failure, the node data directory will still persist. | +| global.terminationGracePeriodSeconds | int | `120` | Termination Grace Period ref: https://kubernetes.io/docs/tasks/run-application/force-delete-stateful-set-pod/#delete-pods | +| global.tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| nameOverride | string | `""` | Provide a name in place of geth for `app:` labels | +| rbac | object | `{"clusterRules":[{"apiGroups":[""],"resources":["nodes"],"verbs":["get","list","watch"]}],"name":"","rules":[{"apiGroups":[""],"resources":["services"],"verbs":["get","list","watch"]}]}` | RBAC configuration. ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ | +| rbac.clusterRules | list | `[{"apiGroups":[""],"resources":["nodes"],"verbs":["get","list","watch"]}]` | Required ClusterRole rules | +| rbac.clusterRules[0] | object | `{"apiGroups":[""],"resources":["nodes"],"verbs":["get","list","watch"]}` | Required to obtain the nodes external IP | +| rbac.name | string | `""` | The name of the cluster role to use. If not set and create is true, a name is generated using the fullname template | +| rbac.rules | list | `[{"apiGroups":[""],"resources":["services"],"verbs":["get","list","watch"]}]` | Required Role rules | +| rbac.rules[0] | object | `{"apiGroups":[""],"resources":["services"],"verbs":["get","list","watch"]}` | Required to get information about the services nodePort. | +| serviceAccount | object | `{"annotations":{},"name":""}` | Service account ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/execution-beacon/values.yaml b/charts/execution-beacon/values.yaml index 8d5ea4607..beace8318 100644 --- a/charts/execution-beacon/values.yaml +++ b/charts/execution-beacon/values.yaml @@ -3,8 +3,7 @@ global: replicaCount: 1 - ## Ethereum network - ## + # -- Ethereum network network: mainnet image: @@ -39,30 +38,30 @@ global: repository: "chainsafe/lodestar" tag: "v1.13.0" - ## JSON Web Token (JWT) authentication is used to secure the communication - ## between the beacon node and execution client. You can generate a JWT using - ## a command line tool, for example: - ## openssl rand -hex 32 > token.txt - ## + # -- JSON Web Token (JWT) authentication is used to secure the communication + # between the beacon node and execution client. You can generate a JWT using + # a command line tool, for example: + # openssl rand -hex 32 > token.txt JWTSecret: "" - ## Credentials to fetch images from private registry - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## + # -- Credentials to fetch images from private registry + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # imagePullSecrets: [] - # Node labels for pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/user-guide/node-selection/ + # nodeSelector: {} - ## Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## + # -- Tolerations for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + # tolerations: {} - ## Affinity for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## + # -- Affinity for pod assignment + # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + # + affinity: {} ## Example: ## affinity: ## podAntiAffinity: @@ -74,17 +73,15 @@ global: ## values: ## - prysm ## topologyKey: kubernetes.io/hostname - ## - affinity: {} - ## Used to assign priority to pods - ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ - ## + # -- Used to assign priority to pods + # ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + # priorityClassName: "" - ## Whether or not to allocate persistent volume disk for the data directory. - ## In case of node failure, the node data directory will still persist. - ## + # -- Whether or not to allocate persistent volume disk for the data directory. + # In case of node failure, the node data directory will still persist. + # sharedPersistence: enabled: false storageClassName: "" @@ -106,10 +103,10 @@ global: tag: "v1.0.0" pullPolicy: IfNotPresent bindAddr: 3000 - ## Configure liveness and readiness probes - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## NB! readinessProbe and livenessProbe must be disabled before genesis - ## + # -- Configure liveness and readiness probes + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + # NB! readinessProbe and livenessProbe must be disabled before genesis + # livenessProbe: enabled: false initialDelaySeconds: 10 @@ -139,110 +136,92 @@ global: type: ClusterIP sessionAffinity: - # Whether to enable session affinity or not + # -- Whether to enable session affinity or not enabled: false - # The session duration in seconds + # -- The session duration in seconds timeoutSeconds: 86400 - ## Service account - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - ## + # -- Service account + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + # serviceAccount: create: true - ## RBAC configuration. - ## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ - ## + # -- RBAC configuration. + # ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ + # rbac: create: true - ## Termination Grace Period - ## ref: https://kubernetes.io/docs/tasks/run-application/force-delete-stateful-set-pod/#delete-pods - ## + # -- Termination Grace Period + # ref: https://kubernetes.io/docs/tasks/run-application/force-delete-stateful-set-pod/#delete-pods + # terminationGracePeriodSeconds: 120 - ## Init image is used to chown data volume, initialise genesis, etc. - ## + # -- Init image is used to chown data volume, initialise genesis, etc. initImage: repository: "bitnami/kubectl" tag: "1.28" pullPolicy: IfNotPresent - ## Monitoring - ## + # -- Monitoring metrics: enabled: true annotations: {} - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## + # -- Prometheus Service Monitor + # ref: https://github.com/coreos/prometheus-operator + # https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # serviceMonitor: - ## The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created namespace: "" - ## The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped interval: 30s - ## The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended scrapeTimeout: "" - ## Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- Metrics RelabelConfigs to apply to samples before scraping. relabellings: [] - ## Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. metricRelabelings: [] - ## Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint honorLabels: false - ## Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus additionalLabels: {} - ## Custom PrometheusRule to be defined - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## + # -- Custom PrometheusRule to be defined + # ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + # prometheusRule: - ## Create a default set of Alerts - ## + # -- Create a default set of Alerts default: true - ## The namespace in which the prometheusRule will be created - ## + # -- The namespace in which the prometheusRule will be created namespace: "" - ## Additional labels for the prometheusRule - ## + # -- Additional labels for the prometheusRule additionalLabels: {} - ## Custom Prometheus rules - ## + # -- Custom Prometheus rules rules: [] - ## When p2pNodePort is enabled, your P2P port will be exposed via service type NodePort. - ## This will generate a service for each replica, with a port binding via NodePort. - ## This is useful if you want to expose and announce your node to the Internet. - ## + # -- When p2pNodePort is enabled, your P2P port will be exposed via service type NodePort. + # This will generate a service for each replica, with a port binding via NodePort. + # This is useful if you want to expose and announce your node to the Internet. + # p2pNodePort: - ## @param p2pNodePort.enabled Expose P2P port via NodePort - ## + # -- Expose P2P port via NodePort enabled: false - ## @param p2pNodePort.annotations - ## annotations: {} - ## @param p2pNodePort.type - ## Options: NodePort, LoadBalancer + # -- Options: NodePort, LoadBalancer type: NodePort - ## @param p2pNodePort.startAt The ports allocation will start from this value - ## + # -- The ports allocation will start from this value startAtExecution: 31100 startAtBeacon: 31200 - ## @param p2pNodePort.replicaToNodePort Overwrite a port for specific replicas - ## @default -- See `values.yaml` for example + # -- Overwrite a port for specific replicas + # @default -- See `values.yaml` for example replicaToNodePort: {} # "0": 32345 # "3": 32348 - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## + # -- Pod Security Context + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + # podSecurityContext: runAsNonRoot: true runAsUser: 10000 @@ -260,44 +239,44 @@ global: enabled: false ingressClassName: nginx - ## Route Prefix. Can skip it if any item of path has the path defined. - ## + # -- Route Prefix. Can skip it if any item of path has the path defined. routePrefix: / annotations: {} labels: {} - ## Hostnames. - ## Must be provided if Ingress is enabled. - ## + # -- Hostnames. + # Must be provided if Ingress is enabled. + # + hosts: [] # hosts: # - prometheus.domain.com - hosts: [] - ## Paths to use for ingress rules - ## By default, the Service created by this chart is used as the target - ## Service for the Ingress. - ## If not defined the following default object will be used: - ## - path: "/" - ## port: 8545 - ## pathType: "ImplementationSpecific" - ## serviceName: "" + # -- Paths to use for ingress rules + # By default, the Service created by this chart is used as the target + # Service for the Ingress. + # If not defined the following default object will be used: + # - path: "/" + # port: 8545 + # pathType: "ImplementationSpecific" + # serviceName: "" + # paths: [] - ## paths: - ## - path: "/execution" - ## port: 8545 - ## pathType: "Prefix" - ## - path: "/beacon" - ## port: 8080 - ## pathType: "Exact" - ## - path: "/execution" - ## port: 8545 - ## pathType: "Prefix" - ## serviceName: "alternativeServiceName" - - ## TLS configuration for Ingress - ## Secret must be manually created in the namespace - ## + # paths: + # - path: "/execution" + # port: 8545 + # pathType: "Prefix" + # - path: "/beacon" + # port: 8080 + # pathType: "Exact" + # - path: "/execution" + # port: 8545 + # pathType: "Prefix" + # serviceName: "alternativeServiceName" + + # -- TLS configuration for Ingress + # Secret must be manually created in the namespace + # tls: [] # - secretName: execution-beacon-general-tls # hosts: @@ -306,9 +285,9 @@ global: execution: client: nethermind - ## Whether or not to allocate persistent volume disk for the data directory. - ## In case of node failure, the node data directory will still persist. - ## + # -- Whether or not to allocate persistent volume disk for the data directory. + # In case of node failure, the node data directory will still persist. + # persistence: enabled: true storageClassName: "" @@ -317,47 +296,44 @@ execution: size: 100Gi annotations: {} - ## If false, data ownership will not be reset at startup - ## This allows the execution node to be run with an arbitrary user - ## + # -- If false, data ownership will not be reset at startup + # This allows the execution node to be run with an arbitrary user + # initChownData: true - # private api network address, for example: 127.0.0.1:9090, + # -- Private api network address, for example: 127.0.0.1:9090, # empty string means not to start the listener. # Do not expose to public network. # Serves remote database interface (default: "127.0.0.1:9090") + # privateApiAddr: "127.0.0.1:9090" - ## Monitoring - ## Additional settings could be made in non-global section. - ## + # -- Monitoring + # Additional settings could be made in non-global section. + # metrics: - ## Whether to enable metrics collection or not - ## + # -- Whether to enable metrics collection or not enabled: true - port: 8008 host: "0.0.0.0" - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## + # -- Prometheus Service Monitor + # ref: https://github.com/coreos/prometheus-operator + # https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # serviceMonitor: - ## Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator enabled: true - ## Custom PrometheusRule to be defined - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## + # -- Custom PrometheusRule to be defined + # ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + # prometheusRule: - ## Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator - ## + # -- Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator enabled: true ## -------------------------- Execution node specific settings ----------------------------------------- - # Manually specify TerminalTotalDifficulty, overriding the bundled setting + # -- Manually specify TerminalTotalDifficulty, overriding the bundled setting terminalTotalDifficulty: "" jsonrpc: @@ -400,17 +376,17 @@ execution: corsOrigins: - "*" - ## Besu specific setting + # -- Besu specific setting javaOpts: enabled: false - # This option is used to set java specific values for heap size and should be used if you experience out of memory errors. + # -- This option is used to set java specific values for heap size and should be used if you experience out of memory errors. # The Xmx option stands for the maximum memory allocation pool for a Java Virtual Machine ( JVM ). # https://besu.hyperledger.org/en/stable/public-networks/how-to/configure-jvm/manage-memory/ + maxHeapSize: "" # Example for kubernetes resources at 8Gi memory: # maxHeapSize: "-Xmx3g" - maxHeapSize: "" - # Nethermind HealthChecks module + # -- Nethermind HealthChecks module healthchecks: enabled: true slug: "/health" @@ -419,17 +395,16 @@ execution: lowStorageSpaceWarningThreshold: 5 targetPeers: 50 - ## Extra flags to pass to the node - ## + # -- Extra flags to pass to the node extraFlags: [] resources: {} beacon: client: nimbus - ## Whether or not to allocate persistent volume disk for the data directory. - ## In case of node failure, the node data directory will still persist. - ## + # -- Whether or not to allocate persistent volume disk for the data directory. + # In case of node failure, the node data directory will still persist. + # persistence: enabled: true storageClassName: "" @@ -438,22 +413,21 @@ beacon: size: 100Gi annotations: {} - ## If false, data ownership will not be reset at startup - ## This allows the beacon node to be run with an arbitrary user - ## + # -- If false, data ownership will not be reset at startup + # This allows the beacon node to be run with an arbitrary user + # initChownData: true metrics: - ## Whether to enable metrics collection or not - ## + # -- Whether to enable metrics collection or not enabled: true annotations: {} port: 9090 host: "0.0.0.0" - ## Monitoring - ## Teku Metric categories to enable + # -- Monitoring + # Teku Metric categories to enable categories: - JVM - PROCESS @@ -469,69 +443,66 @@ beacon: - VALIDATOR - VALIDATOR_PERFORMANCE - # List of hostnames to allow, or * to allow any host + # -- List of hostnames to allow, or * to allow any host hostAllowList: - "*" - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## + # -- Prometheus Service Monitor + # ref: https://github.com/coreos/prometheus-operator + # https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # serviceMonitor: - ## Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator enabled: true - ## Custom PrometheusRule to be defined - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## + # -- Custom PrometheusRule to be defined + # ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + # prometheusRule: - ## Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator - ## + # -- Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator enabled: true - ## To get Beacon node up and running in only a few minutes - ## from a recent finalized checkpoint state rather than syncing from genesis. - ## + # -- To get Beacon node up and running in only a few minutes + # from a recent finalized checkpoint state rather than syncing from genesis. + # checkPointSync: enabled: true url: "https://mainnet-checkpoint-sync.attestant.io" trustedSourceUrl: "" - ## Post bellatrix, this address will receive the transaction fees produced - ## by any blocks from this node. Default to junk whilst bellatrix is in development state. - ## Validator client can override this value through the preparebeaconproposer api. - ## + # -- Post bellatrix, this address will receive the transaction fees produced + # by any blocks from this node. Default to junk whilst bellatrix is in development state. + # Validator client can override this value through the preparebeaconproposer api. + # suggestedFeeRecipient: "" - # Lighthouse specific setting + # -- Lighthouse specific setting proposerOnly: false - ## Teku specific setting + # -- Teku specific setting javaOpts: enabled: true - # This option is used to set java specific values for heap size and should be used if you experience out of memory errors. + # -- This option is used to set java specific values for heap size and should be used if you experience out of memory errors. # The Xmx option stands for the maximum memory allocation pool for a Java Virtual Machine ( JVM ). # https://besu.hyperledger.org/en/stable/public-networks/how-to/configure-jvm/manage-memory/ + maxHeapSize: "-Xmx3g" # Example for kubernetes resources at 8Gi memory: # maxHeapSize: "-Xmx3g" - maxHeapSize: "-Xmx3g" - ## MEV Boost endpoint - ## + # -- MEV Boost endpoint builderEndpoint: "" - # Rest API Settings + # -- Rest API Settings restApi: - # Enables Beacon Rest API + # -- Enables Beacon Rest API enabled: true host: "0.0.0.0" - # Comma-separated list of hostnames to allow, or * + # -- Comma-separated list of hostnames to allow, or * # to allow any host hostAllowList: - "*" corsOrigins: - "*" - # Port number of Beacon Rest API + # -- Port number of Beacon Rest API portMap: teku: 5051 prysm: 8080 @@ -548,46 +519,44 @@ beacon: targetPeers: 50 targetPeersMin: 40 - ## Sets the total difficulty to manual overrides the default - ## TERMINAL_TOTAL_DIFFICULTY value. WARNING: This flag should be used only if you - ## have a clear understanding that community has decided to override the terminal difficulty. - ## Incorrect usage will result in your node experience consensus failure. + # -- Sets the total difficulty to manual overrides the default + # TERMINAL_TOTAL_DIFFICULTY value. WARNING: This flag should be used only if you + # have a clear understanding that community has decided to override the terminal difficulty. + # Incorrect usage will result in your node experience consensus failure. + # totalDifficultyOverride: "" - ## Extra flags to pass to the node - ## + # -- Extra flags to pass to the node extraFlags: [] resources: {} -## Service account -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- Service account +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template + # name: "" -## Provide a name in place of geth for `app:` labels -## +# -- Provide a name in place of geth for `app:` labels nameOverride: "" -## Provide a name to substitute for the full names of resources -## +# -- Provide a name to substitute for the full names of resources fullnameOverride: "" -# RBAC configuration. -## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -## +# -- RBAC configuration. +# ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +# rbac: - # The name of the cluster role to use. + # -- The name of the cluster role to use. # If not set and create is true, a name is generated using the fullname template + # name: "" - ## Required ClusterRole rules - ## + # -- Required ClusterRole rules clusterRules: - ## Required to obtain the nodes external IP - ## + # -- Required to obtain the nodes external IP - apiGroups: [""] resources: - "nodes" @@ -595,11 +564,10 @@ rbac: - "get" - "list" - "watch" - ## Required Role rules - ## + # -- Required Role rules rules: - ## Required to get information about the services nodePort. - ## + # -- Required to get information about the services nodePort. + # - apiGroups: [""] resources: - "services" diff --git a/charts/external-dns/README.md b/charts/external-dns/README.md index 4ea6f4fe7..080b57fad 100644 --- a/charts/external-dns/README.md +++ b/charts/external-dns/README.md @@ -1,118 +1,104 @@ -# ExternalDNS - -[ExternalDNS](https://github.com/kubernetes-sigs/external-dns/) synchronizes exposed Kubernetes Services and Ingresses with DNS providers. - -## Installing the Chart - -Before you can install the chart you will need to add the `external-dns` repo to [Helm](https://helm.sh/). - -```shell -helm repo add external-dns https://kubernetes-sigs.github.io/external-dns/ -``` - -After you've installed the repo you can install the chart. - -```shell -helm upgrade --install external-dns external-dns/external-dns -``` - -## Configuration - -The following table lists the configurable parameters of the _ExternalDNS_ chart and their default values. - -| Parameter | Description | Default | -|------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------| -| `image.repository` | Image repository. | `registry.k8s.io/external-dns/external-dns` | -| `image.tag` | Image tag, will override the default tag derived from the chart app version. | `""` | -| `image.pullPolicy` | Image pull policy. | `IfNotPresent` | -| `imagePullSecrets` | Image pull secrets. | `[]` | -| `nameOverride` | Override the `name` of the chart. | `""` | -| `fullnameOverride` | Override the `fullname` of the chart. | `""` | -| `serviceAccount.create` | If `true`, create a new `serviceaccount`. | `true` | -| `serviceAccount.annotations` | Annotations to add to the service account. | `{}` | -| `serviceAccount.labels` | Labels to add to the service account. | `{}` | -| `serviceAccount.name` | Service account to be used. If not set and `serviceAccount.create` is `true`, a name is generated using the full name template. | `""` | -| `rbac.create` | If `true`, create the RBAC resources. | `true` | -| `rbac.additionalPermissions` | Additional permissions to be added to the cluster role. | `{}` | -| `initContainers` | Add init containers to the pod. | `[]` | -| `deploymentAnnotations` | Annotations to add to the Deployment. | `{}` | -| `podLabels` | Labels to add to the pod. | `{}` | -| `podAnnotations` | Annotations to add to the pod. | `{}` | -| `podSecurityContext` | Security context for the pod, this supports the full [PodSecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#podsecuritycontext-v1-core) API. | _see values.yaml_ | -| `shareProcessNamespace` | If `true` enable [Process Namespace Sharing](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) | `false` | -| `securityContext` | Security context for the _external-dns_ container, this supports the full [SecurityContext](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#securitycontext-v1-core) API. | _see values.yaml_ | -| `priorityClassName` | Priority class name to use for the pod. | `""` | -| `terminationGracePeriodSeconds` | Termination grace period for the pod. | `null` | -| `serviceMonitor.enabled` | If `true`, create a _Prometheus_ service monitor. | `false` | -| `serviceMonitor.namespace` | Forced namespace for ServiceMonitor. | `null` | -| `serviceMonitor.annotations` | Annotations to be set on the ServiceMonitor. | `{}` | -| `serviceMonitor.additionalLabels` | Additional labels to be set on the ServiceMonitor. | `{}` | -| `serviceMonitor.interval` | _Prometheus_ scrape frequency. | `null` | -| `serviceMonitor.scrapeTimeout` | _Prometheus_ scrape timeout. | `null` | -| `serviceMonitor.scheme` | _Prometheus_ scrape scheme. | `null` | -| `serviceMonitor.tlsConfig` | _Prometheus_ scrape tlsConfig. | `{}` | -| `serviceMonitor.metricRelabelings` | _Prometheus_ scrape metricRelabelings. | `[]` | -| `serviceMonitor.relabelings` | _Prometheus_ scrape relabelings. | `[]` | -| `serviceMonitor.targetLabels` | _Prometheus_ scrape targetLabels. | `[]` | -| `env` | [Environment variables](https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/) for the _external-dns_ container, this supports the full [EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) API including secrets and configmaps. | `[]` | -| `livenessProbe` | [Liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) for the _external-dns_ container, this supports the full [Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) API. | See _values.yaml_ | -| `readinessProbe` | [Readiness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) for the _external-dns_ container, this supports the full [Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) API. | See _values.yaml_ | -| `service.annotations` | Annotations to add to the service. | `{}` | -| `service.port` | Port to expose via the service. | `7979` | -| `extraVolumes` | Additional volumes for the pod, this supports the full [VolumeDevice](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumedevice-v1-core) API. | `[]` | -| `extraVolumeMounts` | Additional volume mounts for the _external-dns_ container, this supports the full [VolumeMount](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#volumemount-v1-core) API. | `[]` | -| `resources` | Resource requests and limits for the _external-dns_ container, this supports the full [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#resourcerequirements-v1-core) API. | `{}` | -| `nodeSelector` | Node labels for pod assignment. | `{}` | -| `tolerations` | Tolerations for pod assignment, this supports the full [Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#toleration-v1-core) API. | `[]` | -| `affinity` | Affinity settings for pod assignment, this supports the full [Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#affinity-v1-core) API. | `{}` | -| `topologySpreadConstraints` | TopologySpreadConstraint settings for pod assignment, this supports the full [TopologySpreadConstraints](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#topologyspreadconstraint-v1-core) API. | `[]` | -| `logLevel` | Verbosity of the logs, available values are: `panic`, `debug`, `info`, `warning`, `error`, `fatal`. | `info` | -| `logFormat` | Formats of the logs, available values are: `text`, `json`. | `text` | -| `interval` | The interval for DNS updates. | `1m` | -| `triggerLoopOnEvent` | When enabled, triggers run loop on create/update/delete events in addition of regular interval. | `false` | -| `namespaced` | When enabled, external-dns runs on namespace scope. Additionally, Role and Rolebinding will be namespaced, too. | `false` | -| `sources` | K8s resources type to be observed for new DNS entries. | See _values.yaml_ | -| `policy` | How DNS records are synchronized between sources and providers, available values are: `sync`, `upsert-only`. | `upsert-only` | -| `registry` | Registry Type, available types are: `txt`, `noop`. | `txt` | -| `txtOwnerId` | TXT registry identifier. | `""` | -| `txtPrefix` | Prefix to create a TXT record with a name following the pattern `prefix.`. | `""` | -| `domainFilters` | Limit possible target zones by domain suffixes. | `[]` | -| `provider` | DNS provider where the DNS records will be created, for the available providers and how to configure them see the [README](https://github.com/kubernetes-sigs/external-dns#deploying-to-a-cluster) (this can be templated). | `aws` | -| `extraArgs` | Extra arguments to pass to the _external-dns_ container, these are needed for provider specific arguments (these can be templated). | `[]` | -| `deploymentStrategy` | .spec.strategy of the external-dns Deployment. Defaults to 'Recreate' since multiple external-dns pods may conflict with each other. | `{type: Recreate}` | -| `secretConfiguration.enabled` | Enable additional secret configuration. | `false` | -| `secretConfiguration.mountPath` | Mount path of secret configuration secret (this can be templated). | `""` | -| `secretConfiguration.data` | Secret configuration secret data. Could be used to store DNS provider credentials. | `{}` | -| `secretConfiguration.subPath` | Sub-path of secret configuration secret (this can be templated). | `""` | - -## Namespaced scoped installation - -external-dns supports running on a namespaced only scope, too. -If `namespaced=true` is defined, the helm chart will setup `Roles` and `RoleBindings` instead `ClusterRoles` and `ClusterRoleBindings`. - -### Limited supported -Not all sources are supported in namespaced scope, since some sources depends on cluster-wide resources. -For example: Source `node` isn't supported, since `kind: Node` has scope `Cluster`. -Sources like `istio-virtualservice` only work, if all resources like `Gateway` and `VirtualService` are present in the same -namespaces as `external-dns`. - -The annotation `external-dns.alpha.kubernetes.io/endpoints-type: NodeExternalIP` is not supported. - -If `namespaced` is set to `true`, please ensure that `sources` my only contains supported sources (Default: `service,ingress`. - -### Support matrix - -| Source | Supported | Infos | -|------------------------|-----------|------------------------| -| `ingress` | ✅ | | -| `istio-gateway` | ✅ | | -| `istio-virtualservice` | ✅ | | -| `crd` | ✅ | | -| `kong-tcpingress` | ✅ | | -| `openshift-route` | ✅ | | -| `skipper-routegroup` | ✅ | | -| `gloo-proxy` | ✅ | | -| `contour-httpproxy` | ✅ | | -| `service` | ⚠️️ | NodePort not supported | -| `node` | ❌ | | -| `pod` | ❌ | | +# external-dns + +![Version: 1.13.1](https://img.shields.io/badge/Version-1.13.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.13.5](https://img.shields.io/badge/AppVersion-0.13.5-informational?style=flat-square) + +ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS providers. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| commonLabels | object | `{}` | | +| deploymentAnnotations | object | `{}` | | +| deploymentStrategy.type | string | `"Recreate"` | | +| dnsPolicy | string | `nil` | | +| domainFilters | list | `[]` | | +| env | list | `[]` | | +| extraArgs | list | `[]` | | +| extraVolumeMounts | list | `[]` | | +| extraVolumes | list | `[]` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"registry.k8s.io/external-dns/external-dns"` | | +| image.tag | string | `""` | | +| imagePullSecrets | list | `[]` | | +| initContainers | list | `[]` | | +| interval | string | `"1m"` | | +| livenessProbe.failureThreshold | int | `2` | | +| livenessProbe.httpGet.path | string | `"/healthz"` | | +| livenessProbe.httpGet.port | string | `"http"` | | +| livenessProbe.initialDelaySeconds | int | `10` | | +| livenessProbe.periodSeconds | int | `10` | | +| livenessProbe.successThreshold | int | `1` | | +| livenessProbe.timeoutSeconds | int | `5` | | +| logFormat | string | `"text"` | | +| logLevel | string | `"info"` | | +| nameOverride | string | `""` | | +| namespaced | bool | `false` | | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | | +| podLabels | object | `{}` | | +| podSecurityContext.fsGroup | int | `65534` | | +| podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| policy | string | `"upsert-only"` | | +| priorityClassName | string | `""` | | +| provider | string | `"google"` | | +| rbac.additionalPermissions | list | `[]` | | +| rbac.create | bool | `true` | | +| readinessProbe.failureThreshold | int | `6` | | +| readinessProbe.httpGet.path | string | `"/healthz"` | | +| readinessProbe.httpGet.port | string | `"http"` | | +| readinessProbe.initialDelaySeconds | int | `5` | | +| readinessProbe.periodSeconds | int | `10` | | +| readinessProbe.successThreshold | int | `1` | | +| readinessProbe.timeoutSeconds | int | `5` | | +| registry | string | `"txt"` | | +| resources | object | `{}` | | +| secretConfiguration.data | object | `{}` | | +| secretConfiguration.enabled | bool | `false` | | +| secretConfiguration.mountPath | string | `""` | | +| secretConfiguration.subPath | string | `""` | | +| securityContext.allowPrivilegeEscalation | bool | `false` | | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.readOnlyRootFilesystem | bool | `true` | | +| securityContext.runAsNonRoot | bool | `true` | | +| securityContext.runAsUser | int | `65534` | | +| service.annotations | object | `{}` | | +| service.port | int | `7979` | | +| serviceAccount.annotations."iam.gke.io/gcp-service-account" | string | `"external-dns@juno-dev-nth.iam.gserviceaccount.com"` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.labels | object | `{}` | | +| serviceAccount.name | string | `""` | | +| serviceMonitor.additionalLabels | object | `{}` | | +| serviceMonitor.annotations | object | `{}` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.metricRelabelings | list | `[]` | | +| serviceMonitor.relabelings | list | `[]` | | +| serviceMonitor.targetLabels | list | `[]` | | +| shareProcessNamespace | bool | `false` | | +| sources[0] | string | `"service"` | | +| sources[1] | string | `"ingress"` | | +| terminationGracePeriodSeconds | string | `nil` | | +| tolerations | list | `[]` | | +| topologySpreadConstraints | list | `[]` | | +| triggerLoopOnEvent | bool | `false` | | +| txtOwnerId | string | `""` | | +| txtPrefix | string | `""` | | +| txtSuffix | string | `""` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/external-secrets/README.md b/charts/external-secrets/README.md index dd7e3ae26..a1a27cb2c 100644 --- a/charts/external-secrets/README.md +++ b/charts/external-secrets/README.md @@ -4,7 +4,7 @@ [//]: # (README.md generated by gotmpl. DO NOT EDIT.) -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.9](https://img.shields.io/badge/Version-0.9.9-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.9.10](https://img.shields.io/badge/Version-0.9.10-informational?style=flat-square) External secret management for Kubernetes @@ -133,7 +133,7 @@ The command removes all the Kubernetes components associated with the chart and | securityContext.runAsNonRoot | bool | `true` | | | securityContext.runAsUser | int | `1000` | | | securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | -| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. | +| serviceAccount.annotations | object | `{"iam.gke.io/gcp-service-account":"dummy-workloadIdentity-account"}` | Annotations to add to the service account. | | serviceAccount.automount | bool | `true` | Automounts the service account token in all containers of the pod | | serviceAccount.create | bool | `true` | Specifies whether a service account should be created. | | serviceAccount.extraLabels | object | `{}` | Extra Labels to add to the service account. | diff --git a/charts/juno-node/README.md b/charts/juno-node/README.md new file mode 100644 index 000000000..d1b6cdded --- /dev/null +++ b/charts/juno-node/README.md @@ -0,0 +1,134 @@ +# juno-chart + +![Version: 0.1.2](https://img.shields.io/badge/Version-0.1.2-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) + +A Helm chart for deploying Juno service + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| args.--db-path | string | `"/var/lib/juno"` | | +| args.--http | string | `"true"` | | +| args.--http-host | string | `"0.0.0.0"` | | +| args.--http-port | string | `"6060"` | | +| args.--metrics | string | `"true"` | | +| args.--metrics-host | string | `"0.0.0.0"` | | +| args.--metrics-port | string | `"9090"` | | +| args.--network | string | `"goerli"` | | +| args.--pending-poll-interval | string | `"2s"` | | +| args.--ws | string | `"true"` | | +| args.--ws-host | string | `"0.0.0.0"` | | +| args.--ws-port | string | `"6061"` | | +| batchjob.enabled | bool | `false` | | +| batchjob.schedule | string | `"* */1 * * *"` | | +| deployment.healthCheck.enabled | bool | `false` | | +| deployment.healthCheck.livenessProbe.failureThreshold | int | `6` | | +| deployment.healthCheck.livenessProbe.initialDelaySeconds | int | `9600` | | +| deployment.healthCheck.livenessProbe.periodSeconds | int | `600` | | +| deployment.healthCheck.readinessProbe.failureThreshold | int | `6` | | +| deployment.healthCheck.readinessProbe.initialDelaySeconds | int | `9600` | | +| deployment.healthCheck.readinessProbe.periodSeconds | int | `600` | | +| deployment.imagename | string | `"nethermind/juno"` | | +| deployment.imagetag | string | `"v0.6.2"` | | +| deployment.namespace | string | `"test"` | | +| deployment.port[0] | int | `6060` | | +| deployment.port[1] | int | `6061` | | +| deployment.projectName | string | `"goerli-test"` | | +| deployment.replicas | int | `1` | | +| deployment.resources.limits.cpu | string | `"4"` | | +| deployment.resources.limits.memory | string | `"14Gi"` | | +| deployment.resources.requests.cpu | string | `"2"` | | +| deployment.resources.requests.memory | string | `"8Gi"` | | +| deployment.snapshotUrl | string | `""` | | +| env.data[0].name | string | `"NETWORK"` | | +| env.data[0].value | string | `"juno"` | | +| env.enabled | bool | `false` | | +| hpa.cpuUtilization | int | `80` | | +| hpa.enabled | bool | `false` | | +| hpa.maxReplicas | int | `2` | | +| hpa.minReplicas | int | `1` | | +| nodeSelector.enabled | bool | `false` | | +| nodeSelector.label.network | string | `"juno"` | | +| pgo.config.BUCKET | string | `"gs://juno-prod-pgo-bucket"` | | +| pgo.config.DURATION | string | `"30"` | | +| pgo.config.ENV | string | `"juno-integration"` | | +| pgo.config.ITERATION | string | `"10"` | | +| pgo.config.URL | string | `"http://localhost:6062/debug/pprof/profile"` | | +| pgo.enabled | bool | `false` | | +| pgo.image | string | `"gcr.io/juno-dev-nth/node-pgo-monitor:4.0"` | | +| pgo.port | int | `3000` | | +| pgo.resources.limits.cpu | string | `"2"` | | +| pgo.resources.limits.memory | string | `"4Gi"` | | +| pgo.resources.requests.cpu | string | `"1"` | | +| pgo.resources.requests.memory | string | `"2Gi"` | | +| pvc.datasource | string | `""` | | +| pvc.enabled | bool | `true` | | +| pvc.mount[0].mountPath | string | `"/var/lib/juno"` | | +| pvc.mount[0].pvName | string | `"pv"` | | +| pvc.mount[0].storageSize | string | `"250Gi"` | | +| pvc.storageClassName | string | `"standard"` | | +| serviceAccount.enabled | bool | `false` | | +| serviceAccount.gcpServiceAccount | string | `"monitoring-sa-euw1@juno-prod-nth.iam.gserviceaccount.com"` | | +| serviceAccount.name | string | `"juno-pgo"` | | +| svc.globalStaticIpName | string | `""` | | +| svc.ingress.enabled | bool | `true` | | +| svc.ingress.extraHost.enabled | bool | `false` | | +| svc.ingress.extraHost.hosts[0] | string | `""` | | +| svc.ingress.host | string | `"test.juno.rpc.nethermind.dev"` | | +| svc.ingress.rules[0].host | string | `"test.juno.rpc.nethermind.dev"` | | +| svc.ingress.rules[0].http | string | `nil` | | +| svc.ingress.rules[0].paths[0].backend | string | `nil` | | +| svc.ingress.rules[0].paths[0].path | string | `"/*"` | | +| svc.ingress.rules[0].paths[0].pathType | string | `"ImplementationSpecific"` | | +| svc.ingress.rules[0].paths[0].service.name | string | `"goerli-test"` | | +| svc.ingress.rules[0].paths[0].service.port.number | int | `6060` | | +| svc.ingress.rules[0].paths[1].backend | string | `nil` | | +| svc.ingress.rules[0].paths[1].path | string | `"/ws"` | | +| svc.ingress.rules[0].paths[1].pathType | string | `"ImplementationSpecific"` | | +| svc.ingress.rules[0].paths[1].service.name | string | `"goerli-test-wss"` | | +| svc.ingress.rules[0].paths[1].service.port.number | int | `6061` | | +| svc.rpc.backendConfig.connectionDraining.drainingTimeoutSec | int | `300` | | +| svc.rpc.backendConfig.customResponseHeaders.headers[0] | string | `"X-Frame-Options: SAMEORIGIN"` | | +| svc.rpc.backendConfig.customResponseHeaders.headers[1] | string | `"X-Content-Type-Options: nosniff"` | | +| svc.rpc.backendConfig.customResponseHeaders.headers[2] | string | `"X-XSS-Protection: 1; mode=block"` | | +| svc.rpc.backendConfig.customResponseHeaders.headers[3] | string | `"Referrer-Policy: no-referrer-when-downgrade"` | | +| svc.rpc.backendConfig.customResponseHeaders.headers[4] | string | `"Strict-Transport-Security: max-age=63072000; includeSubDomains; preload"` | | +| svc.rpc.backendConfig.healthCheck.checkIntervalSec | int | `15` | | +| svc.rpc.backendConfig.healthCheck.healthyThreshold | int | `1` | | +| svc.rpc.backendConfig.healthCheck.port | int | `6060` | | +| svc.rpc.backendConfig.healthCheck.requestPath | string | `"/"` | | +| svc.rpc.backendConfig.healthCheck.timeoutSec | int | `15` | | +| svc.rpc.backendConfig.healthCheck.type | string | `"HTTP"` | | +| svc.rpc.backendConfig.healthCheck.unhealthyThreshold | int | `2` | | +| svc.rpc.backendConfig.sessionAffinity.affinityType | string | `"CLIENT_IP"` | | +| svc.rpc.backendConfig.timeoutSec | int | `400` | | +| svc.rpc.clustertype | string | `"ClusterIP"` | | +| svc.rpc.port | string | `"6060"` | | +| svc.wss.backendConfig.connectionDraining.drainingTimeoutSec | int | `300` | | +| svc.wss.backendConfig.healthCheck.checkIntervalSec | int | `15` | | +| svc.wss.backendConfig.healthCheck.healthyThreshold | int | `1` | | +| svc.wss.backendConfig.healthCheck.port | int | `6060` | | +| svc.wss.backendConfig.healthCheck.timeoutSec | int | `15` | | +| svc.wss.backendConfig.healthCheck.type | string | `"HTTP"` | | +| svc.wss.backendConfig.healthCheck.unhealthyThreshold | int | `2` | | +| svc.wss.backendConfig.sessionAffinity.affinityType | string | `"CLIENT_IP"` | | +| svc.wss.backendConfig.timeoutSec | int | `400` | | +| svc.wss.clustertype | string | `"ClusterIP"` | | +| svc.wss.port | string | `"6061"` | | +| svc_lb.clustertype | string | `"LoadBalancer"` | | +| svc_lb.enabled | bool | `false` | | +| svc_lb.hostname | string | `"integration.juno.prod"` | | +| svc_lb.wss_hostname | string | `"integration-wss.juno.prod"` | | +| taintsToleration.enabled | bool | `false` | | +| taintsToleration.tolerations.network | string | `"juno"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/kong/README.md b/charts/kong/README.md index e27b40ee9..ecc7047e0 100644 --- a/charts/kong/README.md +++ b/charts/kong/README.md @@ -1,1192 +1,270 @@ -## Kong for Kubernetes - -[Kong for Kubernetes](https://github.com/Kong/kubernetes-ingress-controller) -is an open-source Ingress Controller for Kubernetes that offers -API management capabilities with a plugin architecture. - -This chart bootstraps all the components needed to run Kong on a -[Kubernetes](http://kubernetes.io) cluster using the -[Helm](https://helm.sh) package manager. - -## TL;DR; - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -$ helm install kong/kong --generate-name -``` - -## Table of contents - -- [Prerequisites](#prerequisites) -- [Install](#install) -- [Uninstall](#uninstall) -- [FAQs](#faqs) -- [Kong Enterprise](#kong-enterprise) -- [Deployment Options](#deployment-options) - - [Database](#database) - - [DB-less deployment](#db-less-deployment) - - [Using the Postgres sub-chart](#using-the-postgres-sub-chart) - - [Postgres sub-chart considerations for OpenShift](#postgres-sub-chart-considerations-for-openshift) - - [Runtime package](#runtime-package) - - [Configuration method](#configuration-method) - - [Separate admin and proxy nodes](#separate-admin-and-proxy-nodes) - - [Standalone controller nodes](#standalone-controller-nodes) - - [Hybrid mode](#hybrid-mode) - - [Certificates](#certificates) - - [Control plane node configuration](#control-plane-node-configuration) - - [Data plane node configuration](#data-plane-node-configuration) - - [Cert Manager Integration](#cert-manager-integration) - - [CRD management](#crd-management) - - [InitContainers](#initcontainers) - - [HostAliases](#hostaliases) - - [Sidecar Containers](#sidecar-containers) - - [Migration Sidecar Containers](#migration-sidecar-containers) - - [User Defined Volumes](#user-defined-volumes) - - [User Defined Volume Mounts](#user-defined-volume-mounts) - - [Removing cluster-scoped permissions](#removing-cluster-scoped-permissions) - - [Using a DaemonSet](#using-a-daemonset) - - [Using dnsPolicy and dnsConfig](#using-dnspolicy-and-dnsconfig) - - [Example configurations](#example-configurations) -- [Configuration](#configuration) - - [Kong parameters](#kong-parameters) - - [Kong Service Parameters](#kong-service-parameters) - - [Admin Service mTLS](#admin-service-mtls) - - [Stream listens](#stream-listens) - - [Ingress Controller Parameters](#ingress-controller-parameters) - - [The `env` section](#the-env-section) - - [The `customEnv` section](#the-customenv-section) - - [General Parameters](#general-parameters) - - [The `env` section](#the-env-section-1) - - [The `customEnv` section](#the-customenv-section-1) - - [The `extraLabels` section](#the-extralabels-section) -- [Kong Enterprise Parameters](#kong-enterprise-parameters) - - [Overview](#overview) - - [Prerequisites](#prerequisites-1) - - [Kong Enterprise License](#kong-enterprise-license) - - [Kong Enterprise Docker registry access](#kong-enterprise-docker-registry-access) - - [Service location hints](#service-location-hints) - - [RBAC](#rbac) - - [Sessions](#sessions) - - [Email/SMTP](#emailsmtp) -- [Prometheus Operator integration](#prometheus-operator-integration) -- [Argo CD considerations](#argo-cd-considerations) -- [Changelog](https://github.com/Kong/charts/blob/main/charts/kong/CHANGELOG.md) -- [Upgrading](https://github.com/Kong/charts/blob/main/charts/kong/UPGRADE.md) -- [Seeking help](#seeking-help) - -## Prerequisites - -- Kubernetes 1.17+. Older chart releases support older Kubernetes versions. - Refer to the [supported version matrix](https://docs.konghq.com/kubernetes-ingress-controller/latest/references/version-compatibility/#kubernetes) - and the [chart changelog](https://github.com/Kong/charts/blob/main/charts/kong/CHANGELOG.md) - for information about the default chart controller versions and Kubernetes - versions supported by controller releases. -- PV provisioner support in the underlying infrastructure if persistence - is needed for Kong datastore. - -## Install - -To install Kong: - -```bash -$ helm repo add kong https://charts.konghq.com -$ helm repo update - -$ helm install kong/kong --generate-name -``` - -## Uninstall - -To uninstall/delete a Helm release `my-release`: - -```bash -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the -chart and deletes the release. - -> **Tip**: List all releases using `helm list` - -## FAQs - -Please read the -[FAQs](https://github.com/Kong/charts/blob/main/charts/kong/FAQs.md) -document. - -## Kong Enterprise - -If using Kong Enterprise, several additional steps are necessary before -installing the chart: - -- Set `enterprise.enabled` to `true` in `values.yaml` file. -- Update values.yaml to use a Kong Enterprise image. -- Satisfy the two prerequisites below for - [Enterprise License](#kong-enterprise-license) and - [Enterprise Docker Registry](#kong-enterprise-docker-registry-access). -- (Optional) [set a `password` environment variable](#rbac) to create the - initial super-admin. Though not required, this is recommended for users that - wish to use RBAC, as it cannot be done after initial setup. - -Once you have these set, it is possible to install Kong Enterprise. - -Please read through -[Kong Enterprise considerations](#kong-enterprise-parameters) -to understand all settings that are enterprise specific. - -## Deployment Options - -Kong is a highly configurable piece of software that can be deployed -in a number of different ways, depending on your use-case. - -All combinations of various runtimes, databases and configuration methods are -supported by this Helm chart. -The recommended approach is to use the Ingress Controller based configuration -along-with DB-less mode. - -Following sections detail on various high-level architecture options available: - -### Database - -Kong can run with or without a database (DB-less). By default, this chart -installs Kong without a database. - -You can set the database the `env.database` parameter. For more details, please -read the [env](#the-env-section) section. - -#### DB-less deployment - -When deploying Kong in DB-less mode(`env.database: "off"`) -and without the Ingress Controller(`ingressController.enabled: false`), -you have to provide a [declarative configuration](https://docs.konghq.com/gateway-oss/latest/db-less-and-declarative-config/#the-declarative-configuration-format) for Kong to run. -You can provide an existing ConfigMap -(`dblessConfig.configMap`) or Secret (`dblessConfig.secret`) or place the whole -configuration into `values.yaml` (`dblessConfig.config`) parameter. See the -example configuration in the default values.yaml for more details. You can use -`--set-file dblessConfig.config=/path/to/declarative-config.yaml` in Helm -commands to substitute in a complete declarative config file. - -Note that externally supplied ConfigMaps are not hashed or tracked in deployment annotations. -Subsequent ConfigMap updates will require user-initiated new deployment rollouts -to apply the new configuration. You should run `kubectl rollout restart deploy` -after updating externally supplied ConfigMap content. - -#### Using the Postgres sub-chart - -The chart can optionally spawn a Postgres instance using [Bitnami's Postgres -chart](https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md) -as a sub-chart. Set `postgresql.enabled=true` to enable the sub-chart. Enabling -this will auto-populate Postgres connection settings in Kong's environment. - -The Postgres sub-chart is best used to quickly provision temporary environments -without installing and configuring your database separately. For longer-lived -environments, we recommend you manage your database outside the Kong Helm -release. - -##### Postgres sub-chart considerations for OpenShift - -Due to the default `securityContexts` in the postgres sub-chart, you will need to add the following values to the `postgresql` section to get postgres running on OpenShift: - -```yaml - volumePermissions: - enabled: false - securityContext: - runAsUser: "auto" - primary: - containerSecurityContext: - enabled: false - podSecurityContext: - enabled: false -``` - -### Runtime package - -There are three different packages of Kong that are available: - -- **Kong Gateway**\ - This is the [Open-Source](https://github.com/kong/kong) offering. It is a - full-blown API Gateway and Ingress solution with a wide-array of functionality. - When Kong Gateway is combined with the Ingress based configuration method, - you get Kong for Kubernetes. This is the default deployment for this Helm - Chart. -- **Kong Enterprise K8S**\ - This package builds up on top of the Open-Source Gateway and bundles in all - the Enterprise-only plugins as well. - When Kong Enterprise K8S is combined with the Ingress based - configuration method, you get Kong for Kubernetes Enterprise. - This package also comes with 24x7 support from Kong Inc. -- **Kong Enterprise**\ - This is the full-blown Enterprise package which packs with itself all the - Enterprise functionality like Manager, Portal, Vitals, etc. - This package can't be run in DB-less mode. - -The package to run can be changed via `image.repository` and `image.tag` -parameters. If you would like to run the Enterprise package, please read -the [Kong Enterprise Parameters](#kong-enterprise-parameters) section. - -### Configuration method - -Kong can be configured via two methods: -- **Ingress and CRDs**\ - The configuration for Kong is done via `kubectl` and Kubernetes-native APIs. - This is also known as Kong Ingress Controller or Kong for Kubernetes and is - the default deployment pattern for this Helm Chart. The configuration - for Kong is managed via Ingress and a few - [Custom Resources](https://docs.konghq.com/kubernetes-ingress-controller/latest/concepts/custom-resources). - For more details, please read the - [documentation](https://docs.konghq.com/kubernetes-ingress-controller/) - on Kong Ingress Controller. - To configure and fine-tune the controller, please read the - [Ingress Controller Parameters](#ingress-controller-parameters) section. -- **Admin API**\ - This is the traditional method of running and configuring Kong. - By default, the Admin API of Kong is not exposed as a Service. This - can be controlled via `admin.enabled` and `env.admin_listen` parameters. - -### Separate admin and proxy nodes - -*Note: although this section is titled "Separate admin and proxy nodes", this -split release technique is generally applicable to any deployment with -different types of Kong nodes. Separating Admin API and proxy nodes is one of -the more common use cases for splitting across multiple releases, but you can -also split releases for split proxy and Developer Portal nodes, multiple groups -of proxy nodes with separate listen configurations for network segmentation, etc. -However, it does not apply to hybrid mode, as only the control plane release -interacts with the database.* - -Users may wish to split their Kong deployment into multiple instances that only -run some of Kong's services (i.e. you run `helm install` once for every -instance type you wish to create). - -To disable Kong services on an instance, you should set `SVC.enabled`, -`SVC.http.enabled`, `SVC.tls.enabled`, and `SVC.ingress.enabled` all to -`false`, where `SVC` is `proxy`, `admin`, `manager`, `portal`, or `portalapi`. - -The standard chart upgrade automation process assumes that there is only a -single Kong release in the Kong cluster, and runs both `migrations up` and -`migrations finish` jobs. To handle clusters split across multiple releases, -you should: -1. Upgrade one of the releases with `helm upgrade RELEASENAME -f values.yaml - --set migrations.preUpgrade=true --set migrations.postUpgrade=false`. -2. Upgrade all but one of the remaining releases with `helm upgrade RELEASENAME - -f values.yaml --set migrations.preUpgrade=false --set - migrations.postUpgrade=false`. -3. Upgrade the final release with `helm upgrade RELEASENAME -f values.yaml - --set migrations.preUpgrade=false --set migrations.postUpgrade=true`. - -This ensures that all instances are using the new Kong package before running -`kong migrations finish`. - -Users should note that Helm supports supplying multiple values.yaml files, -allowing you to separate shared configuration from instance-specific -configuration. For example, you may have a shared values.yaml that contains -environment variables and other common settings, and then several -instance-specific values.yamls that contain service configuration only. You can -then create releases with: - -```bash -helm install proxy-only -f shared-values.yaml -f only-proxy.yaml kong/kong -helm install admin-only -f shared-values.yaml -f only-admin.yaml kong/kong -``` - -### Standalone controller nodes - -The chart can deploy releases that contain the controller only, with no Kong -container, by setting `deployment.kong.enabled: false` in values.yaml. There -are several controller settings that must be populated manually in this -scenario and several settings that are useful when using multiple controllers: - -* `ingressController.env.kong_admin_url` must be set to the Kong Admin API URL. - If the Admin API is exposed by a service in the cluster, this should look - something like `https://my-release-kong-admin.kong-namespace.svc:8444` -* `ingressController.env.publish_service` must be set to the Kong proxy - service, e.g. `namespace/my-release-kong-proxy`. -* `ingressController.ingressClass` should be set to a different value for each - instance of the controller. -* `ingressController.env.kong_admin_filter_tag` should be set to a different value - for each instance of the controller. -* If using Kong Enterprise, `ingressController.env.kong_workspace` can - optionally create configuration in a workspace other than `default`. - -Standalone controllers require a database-backed Kong instance, as DB-less mode -requires that a single controller generate a complete Kong configuration. - -### Hybrid mode - -Kong supports [hybrid mode -deployments](https://docs.konghq.com/2.0.x/hybrid-mode/) as of Kong 2.0.0 and -[Kong Enterprise 2.1.0](https://docs.konghq.com/enterprise/2.1.x/deployment/hybrid-mode/). -These deployments split Kong nodes into control plane (CP) nodes, which provide -the admin API and interact with the database, and data plane (DP) nodes, which -provide the proxy and receive configuration from control plane nodes. - -You can deploy hybrid mode Kong clusters by [creating separate releases for each node -type](#separate-admin-and-proxy-nodes), i.e. use separate control and data -plane values.yamls that are then installed separately. The [control -plane](#control-plane-node-configuration) and [data -plane](#data-plane-node-configuration) configuration sections below cover the -values.yaml specifics for each. - -Cluster certificates are not generated automatically. You must [create a -certificate and key pair](#certificates) for intra-cluster communication. - -When upgrading the Kong version, you must [upgrade the control plane release -first and then upgrade the data plane release](https://docs.konghq.com/gateway/latest/plan-and-deploy/hybrid-mode/#version-compatibility). - -#### Certificates - -> This example shows how to use Kong Hybrid mode with `cluster_mtls: shared`. -> For an example of `cluster_mtls: pki` see the [hybrid-cert-manager example](https://github.com/Kong/charts/blob/main/charts/kong/example-values/hybrid-cert-manager/) - -Hybrid mode uses TLS to secure the CP/DP node communication channel, and -requires certificates for it. You can generate these either using `kong hybrid -gen_cert` on a local Kong installation or using OpenSSL: - -```bash -openssl req -new -x509 -nodes -newkey ec:<(openssl ecparam -name secp384r1) \ - -keyout /tmp/cluster.key -out /tmp/cluster.crt \ - -days 1095 -subj "/CN=kong_clustering" -``` - -You must then place these certificates in a Secret: - -```bash -kubectl create secret tls kong-cluster-cert --cert=/tmp/cluster.crt --key=/tmp/cluster.key -``` - -#### Control plane node configuration - -You must configure the control plane nodes to mount the certificate secret on -the container filesystem is serve it from the cluster listen. In values.yaml: - -```yaml -secretVolumes: -- kong-cluster-cert -``` - -```yaml -env: - role: control_plane - cluster_cert: /etc/secrets/kong-cluster-cert/tls.crt - cluster_cert_key: /etc/secrets/kong-cluster-cert/tls.key -``` - -Furthermore, you must enable the cluster listen and Kubernetes Service, and -should typically disable the proxy: - -```yaml -cluster: - enabled: true - tls: - enabled: true - servicePort: 8005 - containerPort: 8005 - -proxy: - enabled: false -``` - -Enterprise users with Vitals enabled must also enable the cluster telemetry -service: - -```yaml -clustertelemetry: - enabled: true - tls: - enabled: true - servicePort: 8006 - containerPort: 8006 -``` - -If using the ingress controller, you must also specify the DP proxy service as -its publish target to keep Ingress status information up to date: - -``` -ingressController: - env: - publish_service: hybrid/example-release-data-kong-proxy -``` - -Replace `hybrid` with your DP nodes' namespace and `example-release-data` with -the name of the DP release. - -#### Data plane node configuration - -Data plane configuration also requires the certificate and `role` -configuration, and the database should always be set to `off`. You must also -trust the cluster certificate and indicate what hostname/port Kong should use -to find control plane nodes. - -Though not strictly required, you should disable the admin service (it will not -work on DP nodes anyway, but should be disabled to avoid creating an invalid -Service resource). - -```yaml -secretVolumes: -- kong-cluster-cert -``` - -```yaml -admin: - enabled: false -``` - -```yaml -env: - role: data_plane - database: "off" - cluster_cert: /etc/secrets/kong-cluster-cert/tls.crt - cluster_cert_key: /etc/secrets/kong-cluster-cert/tls.key - lua_ssl_trusted_certificate: /etc/secrets/kong-cluster-cert/tls.crt - cluster_control_plane: control-plane-release-name-kong-cluster.hybrid.svc.cluster.local:8005 - cluster_telemetry_endpoint: control-plane-release-name-kong-clustertelemetry.hybrid.svc.cluster.local:8006 # Enterprise-only -``` - -Note that the `cluster_control_plane` value will differ depending on your -environment. `control-plane-release-name` will change to your CP release name, -`hybrid` will change to whatever namespace it resides in. See [Kubernetes' -documentation on Service -DNS](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) -for more detail. - -### Cert Manager Integration - -By default, Kong will create self-signed certificates on start for its TLS -listens if you do not provide your own. The chart can create -[cert-manager](https://cert-manager.io/docs/) Certificates for its Services and -configure them for you. To use this integration, install cert-manager, create -an issuer, set `certificates.enabled: true` in values.yaml, and set your issuer -name in `certificates.issuer` or `certificates.clusterIssuer` depending on the -issuer type. - -If you do not have an issuer available, you can install the example [self-signed ClusterIssuer](https://cert-manager.io/docs/configuration/selfsigned/#bootstrapping-ca-issuers) -and set `certificates.clusterIssuer: selfsigned-issuer` for testing. You -should, however, migrate to an issuer using a CA your clients trust for actual -usage. - -The `proxy`, `admin`, `portal`, and `cluster` subsections under `certificates` -let you choose hostnames, override issuers, set `subject` or set `privateKey` on a per-certificate basis for the -proxy, admin API and Manager, Portal and Portal API, and hybrid mode mTLS -services, respectively. - -To use hybrid mode, the control and data plane releases must use the same -issuer for their cluster certificates. - -### CRD management - -Earlier versions of this chart (<2.0) created CRDs associated with the ingress -controller as part of the release. This raised two challenges: - -- Multiple release of the chart would conflict with one another, as each would - attempt to create its own set of CRDs. -- Because deleting a CRD also deletes any custom resources associated with it, - deleting a release of the chart could destroy user configuration without - providing any means to restore it. - -Helm 3 introduced a simplified CRD management method that was safer, but -requires some manual work when a chart added or modified CRDs: CRDs are created -on install if they are not already present, but are not modified during -release upgrades or deletes. Our chart release upgrade instructions call out -when manual action is necessary to update CRDs. This CRD handling strategy is -recommended for most users. - -Some users may wish to manage their CRDs automatically. If you manage your CRDs -this way, we _strongly_ recommend that you back up all associated custom -resources in the event you need to recover from unintended CRD deletion. - -While Helm 3's CRD management system is recommended, there is no simple means -of migrating away from release-managed CRDs if you previously installed your -release with the old system (you would need to back up your existing custom -resources, delete your release, reinstall, and restore your custom resources -after). As such, the chart detects if you currently use release-managed CRDs -and continues to use the old CRD templates when using chart version 2.0+. If -you do (your resources will have a `meta.helm.sh/release-name` annotation), we -_strongly_ recommend that you back up all associated custom resources in the -event you need to recover from unintended CRD deletion. - -### InitContainers - -The chart is able to deploy initcontainers along with Kong. This can be very -useful when there's a requirement for custom initialization. The -`deployment.initcontainers` field in values.yaml takes an array of objects that -get appended as-is to the existing `spec.template.initContainers` array in the -kong deployment resource. - -### HostAliases - -The chart is able to inject host aliases into containers. This can be very useful -when it's required to resolve additional domain name which can't be looked-up -directly from dns server. The `deployment.hostAliases` field in values.yaml -takes an array of objects that set to `spec.template.hostAliases` field in the -kong deployment resource. - -### Sidecar Containers - -The chart can deploy additional containers along with the Kong and Ingress -Controller containers, sometimes referred to as "sidecar containers". This can -be useful to include network proxies or logging services along with Kong. The -`deployment.sidecarContainers` field in values.yaml takes an array of objects -that get appended as-is to the existing `spec.template.spec.containers` array -in the Kong deployment resource. - -### Migration Sidecar Containers - -In the same way sidecar containers are attached to the Kong and Ingress -Controller containers the chart can add sidecars to the containers that runs -the migrations. The -`migrations.sidecarContainers` field in values.yaml takes an array of objects -that get appended as-is to the existing `spec.template.spec.containers` array -in the pre-upgrade-migrations, post-upgrade-migrations and migration resrouces. -Keep in mind the containers should be finite and they should be terminated -with the migration containers, otherwise the migration could get the status -as finished and the deployment of the chart will reach the timeout. - -### User Defined Volumes - -The chart can deploy additional volumes along with Kong. This can be useful to -include additional volumes which required during iniatilization phase -(InitContainer). The `deployment.userDefinedVolumes` field in values.yaml -takes an array of objects that get appended as-is to the existing -`spec.template.spec.volumes` array in the kong deployment resource. - -### User Defined Volume Mounts - -The chart can mount user-defined volumes. The -`deployment.userDefinedVolumeMounts` and -`ingressController.userDefinedVolumeMounts` fields in values.yaml take an array -of object that get appended as-is to the existing -`spec.template.spec.containers[].volumeMounts` and -`spec.template.spec.initContainers[].volumeMounts` array in the kong deployment -resource. - -### Removing cluster-scoped permissions - -You can limit the controller's access to allow it to only watch specific -namespaces for namespaced resources. By default, the controller watches all -namespaces. Limiting access requires several changes to configuration: - -- Set `ingressController.watchNamespaces` to a list of namespaces you want to - watch. The chart will automatically generate roles for each namespace and - assign them to the controller's service account. -- Optionally set `ingressController.installCRDs=false` if your user role (the - role you use when running `helm install`, not the controller service - account's role) does not have access to get CRDs. By default, the chart - attempts to look up the controller CRDs for [a legacy behavior - check](#crd-management). - -### Using a DaemonSet - -Setting `deployment.daemonset: true` deploys Kong using a [DaemonSet -controller](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) -instead of a Deployment controller. This runs a Kong Pod on every kubelet in -the Kubernetes cluster. - -### Using dnsPolicy and dnsConfig - -The chart able to inject custom DNS configuration into containers. This can be useful when you have EKS cluster with [NodeLocal DNSCache](https://kubernetes.io/docs/tasks/administer-cluster/nodelocaldns/) configured and attach AWS security groups directly to pod using [security groups for pods feature](https://docs.aws.amazon.com/eks/latest/userguide/security-groups-for-pods.html). - -### Example configurations - -Several example values.yaml are available in the -[example-values](https://github.com/Kong/charts/blob/main/charts/kong/example-values/) -directory. - -## Configuration - -### Kong parameters - -| Parameter | Description | Default | -| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | -| image.repository | Kong image | `kong` | -| image.tag | Kong image version | `2.5` | -| image.pullPolicy | Image pull policy | `IfNotPresent` | -| image.pullSecrets | Image pull secrets | `null` | -| replicaCount | Kong instance count. It has no effect when `autoscaling.enabled` is set to true | `1` | -| plugins | Install custom plugins into Kong via ConfigMaps or Secrets | `{}` | -| env | Additional [Kong configurations](https://getkong.org/docs/latest/configuration/) | | -| customEnv | Custom Environment variables without `KONG_` prefix | | -| migrations.preUpgrade | Run "kong migrations up" jobs | `true` | -| migrations.postUpgrade | Run "kong migrations finish" jobs | `true` | -| migrations.annotations | Annotations for migration job pods | `{"sidecar.istio.io/inject": "false" | -| migrations.jobAnnotations | Additional annotations for migration jobs | `{}` | -| migrations.backoffLimit | Override the system backoffLimit | `{}` | -| waitImage.enabled | Spawn init containers that wait for the database before starting Kong | `true` | -| waitImage.repository | Image used to wait for database to become ready. Uses the Kong image if none set | | -| waitImage.tag | Tag for image used to wait for database to become ready | | -| waitImage.pullPolicy | Wait image pull policy | `IfNotPresent` | -| postgresql.enabled | Spin up a new postgres instance for Kong | `false` | -| dblessConfig.configMap | Name of an existing ConfigMap containing the `kong.yml` file. This must have the key `kong.yml`.| `` | -| dblessConfig.config | Yaml configuration file for the dbless (declarative) configuration of Kong | see in `values.yaml` | - -#### Kong Service Parameters - -The various `SVC.*` parameters below are common to the various Kong services -(the admin API, proxy, Kong Manager, the Developer Portal, and the Developer -Portal API) and define their listener configuration, K8S Service properties, -and K8S Ingress properties. Defaults are listed only if consistent across the -individual services: see values.yaml for their individual default values. - -`SVC` below can be substituted with each of: -* `proxy` -* `udpProxy` -* `admin` -* `manager` -* `portal` -* `portalapi` -* `cluster` -* `clustertelemetry` -* `status` - -`status` is intended for internal use within the cluster. Unlike other -services it cannot be exposed externally, and cannot create a Kubernetes -service or ingress. It supports the settings under `SVC.http` and `SVC.tls` -only. - -`cluster` is used on hybrid mode control plane nodes. It does not support the -`SVC.http.*` settings (cluster communications must be TLS-only) or the -`SVC.ingress.*` settings (cluster communication requires TLS client -authentication, which cannot pass through an ingress proxy). `clustertelemetry` -is similar, and used when Vitals is enabled on Kong Enterprise control plane -nodes. - -`udpProxy` is used for UDP stream listens (Kubernetes does not yet support -mixed TCP/UDP LoadBalancer Services). It _does not_ support the `http`, `tls`, -or `ingress` sections, as it is used only for stream listens. - -| Parameter | Description | Default | -|------------------------------------|---------------------------------------------------------------------------------------|--------------------------| -| SVC.enabled | Create Service resource for SVC (admin, proxy, manager, etc.) | | -| SVC.http.enabled | Enables http on the service | | -| SVC.http.servicePort | Service port to use for http | | -| SVC.http.containerPort | Container port to use for http | | -| SVC.http.nodePort | Node port to use for http | | -| SVC.http.hostPort | Host port to use for http | | -| SVC.http.parameters | Array of additional listen parameters | `[]` | -| SVC.tls.enabled | Enables TLS on the service | | -| SVC.tls.containerPort | Container port to use for TLS | | -| SVC.tls.servicePort | Service port to use for TLS | | -| SVC.tls.nodePort | Node port to use for TLS | | -| SVC.tls.hostPort | Host port to use for TLS | | -| SVC.tls.overrideServiceTargetPort | Override service port to use for TLS without touching Kong containerPort | | -| SVC.tls.parameters | Array of additional listen parameters | `["http2"]` | -| SVC.type | k8s service type. Options: NodePort, ClusterIP, LoadBalancer | | -| SVC.clusterIP | k8s service clusterIP | | -| SVC.loadBalancerClass | loadBalancerClass to use for LoadBalancer provisionning | | -| SVC.loadBalancerSourceRanges | Limit service access to CIDRs if set and service type is `LoadBalancer` | `[]` | -| SVC.loadBalancerIP | Reuse an existing ingress static IP for the service | | -| SVC.externalIPs | IPs for which nodes in the cluster will also accept traffic for the servic | `[]` | -| SVC.externalTrafficPolicy | k8s service's externalTrafficPolicy. Options: Cluster, Local | | -| SVC.ingress.enabled | Enable ingress resource creation (works with SVC.type=ClusterIP) | `false` | -| SVC.ingress.ingressClassName | Set the ingressClassName to associate this Ingress with an IngressClass | | -| SVC.ingress.hostname | Ingress hostname | `""` | -| SVC.ingress.path | Ingress path. | `/` | -| SVC.ingress.pathType | Ingress pathType. One of `ImplementationSpecific`, `Exact` or `Prefix` | `ImplementationSpecific` | -| SVC.ingress.hosts | Slice of hosts configurations, including `hostname`, `path` and `pathType` keys | `[]` | -| SVC.ingress.tls | Name of secret resource or slice of `secretName` and `hosts` keys | | -| SVC.ingress.annotations | Ingress annotations. See documentation for your ingress controller for details | `{}` | -| SVC.ingress.labels | Ingress labels. Additional custom labels to add to the ingress. | `{}` | -| SVC.annotations | Service annotations | `{}` | -| SVC.labels | Service labels | `{}` | - -#### Admin Service mTLS - -On top of the common parameters listed above, the `admin` service supports parameters for mTLS client verification. -If any of `admin.tls.client.caBundle` or `admin.tls.client.secretName` are set, the admin service will be configured to -require mTLS client verification. If both are set, `admin.tls.client.caBundle` will take precedence. - -| Parameter | Description | Default | -|-----------------------------|---------------------------------------------------------------------------------------------|---------| -| admin.tls.client.caBundle | CA certificate to use for TLS verification of the Admin API client (PEM-encoded). | `""` | -| admin.tls.client.secretName | CA certificate secret name - must contain a `tls.crt` key with the PEM-encoded certificate. | `""` | - -#### Stream listens - -The proxy configuration additionally supports creating stream listens. These -are configured using an array of objects under `proxy.stream` and `udpProxy.stream`: - -| Parameter | Description | Default | -| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | -| protocol | The listen protocol, either "TCP" or "UDP" | | -| containerPort | Container port to use for a stream listen | | -| servicePort | Service port to use for a stream listen | | -| nodePort | Node port to use for a stream listen | | -| hostPort | Host port to use for a stream listen | | -| parameters | Array of additional listen parameters | `[]` | - -### Ingress Controller Parameters - -All of the following properties are nested under the `ingressController` -section of `values.yaml` file: - -| Parameter | Description | Default | -|--------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------| -| enabled | Deploy the ingress controller, rbac and crd | true | -| image.repository | Docker image with the ingress controller | kong/kubernetes-ingress-controller | -| image.tag | Version of the ingress controller | 2.0 | -| image.effectiveSemver | Version of the ingress controller used for version-specific features when image.tag is not a valid semantic version | | -| readinessProbe | Kong ingress controllers readiness probe | | -| livenessProbe | Kong ingress controllers liveness probe | | -| installCRDs | Legacy toggle for Helm 2-style CRD management. Should not be set [unless necessary due to cluster permissions](#removing-cluster-scoped-permissions). | false | -| env | Specify Kong Ingress Controller configuration via environment variables | | -| customEnv | Specify custom environment variables (without the CONTROLLER_ prefix) | | -| ingressClass | The name of this controller's ingressClass | kong | -| ingressClassAnnotations | The ingress-class value for controller | kong | -| args | List of ingress-controller cli arguments | [] | -| watchNamespaces | List of namespaces to watch. Watches all namespaces if empty | [] | -| admissionWebhook.enabled | Whether to enable the validating admission webhook | true | -| admissionWebhook.failurePolicy | How unrecognized errors from the admission endpoint are handled (Ignore or Fail) | Ignore | -| admissionWebhook.port | The port the ingress controller will listen on for admission webhooks | 8080 | -| admissionWebhook.annotations | Annotations for the Validation Webhook Configuration | | -| admissionWebhook.certificate.provided | Use a provided certificate. When set to false, the chart will automatically generate a certificate. | false | -| admissionWebhook.certificate.secretName | Name of the TLS secret for the provided webhook certificate | | -| admissionWebhook.certificate.caBundle | PEM encoded CA bundle which will be used to validate the provided webhook certificate | | -| admissionWebhook.namespaceSelector | Add namespaceSelector to the webhook. Please go to [Kubernetes doc for the specs](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector) | | -| userDefinedVolumes | Create volumes. Please go to Kubernetes doc for the spec of the volumes | | -| userDefinedVolumeMounts | Create volumeMounts. Please go to Kubernetes doc for the spec of the volumeMounts | | -| terminationGracePeriodSeconds | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pod | 30 | -| gatewayDiscovery.enabled | Enables Kong instance service discovery (for more details see [gatewayDiscovery section][gd_section]) | false | -| gatewayDiscovery.generateAdminApiService | Generate the admin API service name based on the release name (for more details see [gatewayDiscovery section][gd_section]) | false | -| gatewayDiscovery.adminApiService.namespace | The namespace of the Kong admin API service (for more details see [gatewayDiscovery section][gd_section]) | `.Release.Namespace` | -| gatewayDiscovery.adminApiService.name | The name of the Kong admin API service (for more details see [gatewayDiscovery section][gd_section]) | "" | -| konnect.enabled | Enable synchronisation of data plane configuration with Konnect Runtime Group | false | -| konnect.runtimeGroupID | Konnect Runtime Group's unique identifier. | | -| konnect.apiHostname | Konnect API hostname. Defaults to a production US-region. | us.kic.api.konghq.com | -| konnect.tlsClientCertSecretName | Name of the secret that contains Konnect Runtime Group's client TLS certificate. | konnect-client-tls | -| konnect.license.enabled | Enable automatic license provisioning for Gateways managed by Ingress Controller in Konnect mode. | false | -| adminApi.tls.client.enabled | Enable TLS client verification for the Admin API. By default, Helm will generate certificates automatically. | false | -| adminApi.tls.client.certProvided | Use user-provided certificates. If set to false, Helm will generate certificates. | false | -| adminApi.tls.client.secretName | Client TLS certificate/key pair secret name. Can be also set when `certProvided` is false to enforce a generated secret's name. | "" | -| adminApi.tls.client.caSecretName | CA TLS certificate/key pair secret name. Can be also set when `certProvided` is false to enforce a generated secret's name. | "" | - -[gd_section]: #the-gatewayDiscovery-section - -#### The `env` section -For a complete list of all configuration values you can set in the -`env` section, please read the Kong Ingress Controller's -[configuration document](https://github.com/Kong/docs.konghq.com/blob/main/src/kubernetes-ingress-controller/references/cli-arguments.md). - -#### The `customEnv` section - -The `customEnv` section can be used to configure all environment variables other than Ingress Controller configuration. -Any key value put under this section translates to environment variables. -Every key is upper-cased before setting the environment variable. - -An example: - -```yaml -kong: - ingressController: - customEnv: - TZ: "Europe/Berlin" -``` - -#### The `gatewayDiscovery` section - -Kong Ingress Controller v2.9 has introduced gateway discovery which allows -the controller to discover Gateway instances that it should configure using -an Admin API Kubernetes service. - -You'll be able to configure this feature through configuration section under -`ingressController.gatewayDiscovery`: - -- If `ingressController.gatewayDiscovery.enabled` is set to `false`: the ingress controller - will control a pre-determined set of Gateway instances based on Admin API URLs - (provided under the hood via `CONTROLLER_KONG_ADMIN_URL` environment variable). - -- If `ingressController.gatewayDiscovery.enabled` is set to `true`: the ingress controller - will dynamically locate Gateway instances by watching the specified Kubernetes - service. - (provided under the hood via `CONTROLLER_KONG_ADMIN_SVC` environment variable). - - The following admin API Service flags have to be present in order for gateway - discovery to work: - - - `ingressController.gatewayDiscovery.adminApiService.name` - - `ingressController.gatewayDiscovery.adminApiService.namespace` - - If you set `ingressController.gatewayDiscovery.generateAdminApiService` to `true`, - the chart will generate values for `name` and `namespace` based on the current release name and - namespace. This is useful when consuming the `kong` chart as a subchart. - -Using this feature requires a split release installation of Gateways and Ingress Controller. -For exemplar `values.yaml` files which use this feature please see: [examples README.md](./example-values/README.md). - -When using `gatewayDiscovery`, you should consider configuring the Admin service to use mTLS client verification to make -this interface secure. Without that, anyone who can access the Admin API from inside the cluster can configure the Gateway -instances. - -On the controller release side, that can be achieved by setting `ingressController.adminApi.tls.client.enabled` to `true`. -By default, Helm will generate a certificate Secret named `-admin-api-keypair` and -a CA Secret named `-admin-api-ca-keypair` for you. - -To provide your own cert, set `ingressController.adminApi.tls.client.certProvided` to -`true`, `ingressController.adminApi.tls.client.secretName` to the name of the Secret containing your client cert, and `ingressController.adminApi.tls.client.caSecretName` to the name of the Secret containing your CA cert. - -On the Gateway release side, set either `admin.tls.client.secretName` to the name of your CA Secret or set `admin.tls.client.caBundle` to the CA certificate string. - -### General Parameters - -| Parameter | Description | Default | -| ---------------------------------- | ------------------------------------------------------------------------------------- | ------------------- | -| namespace | Namespace to deploy chart resources | | -| deployment.kong.enabled | Enable or disable deploying Kong | `true` | -| deployment.minReadySeconds | Minimum number of seconds for which newly created pods should be ready without any of its container crashing, for it to be considered available. | | -| deployment.initContainers | Create initContainers. Please go to Kubernetes doc for the spec of the initContainers | | -| deployment.daemonset | Use a DaemonSet instead of a Deployment | `false` | -| deployment.hostNetwork | Enable hostNetwork, which binds to the ports to the host | `false` | -| deployment.userDefinedVolumes | Create volumes. Please go to Kubernetes doc for the spec of the volumes | | -| deployment.userDefinedVolumeMounts | Create volumeMounts. Please go to Kubernetes doc for the spec of the volumeMounts | | -| deployment.serviceAccount.create | Create Service Account for the Deployment / Daemonset and the migrations | `true` | -| deployment.serviceAccount.automountServiceAccountToken | Enable ServiceAccount token automount in Kong deployment | `false` | -| deployment.serviceAccount.name | Name of the Service Account, a default one will be generated if left blank. | "" | -| deployment.serviceAccount.annotations | Annotations for the Service Account | {} | -| deployment.test.enabled | Enable creation of test resources for use with "helm test" | `false` | -| autoscaling.enabled | Set this to `true` to enable autoscaling | `false` | -| autoscaling.minReplicas | Set minimum number of replicas | `2` | -| autoscaling.maxReplicas | Set maximum number of replicas | `5` | -| autoscaling.behavior | Sets the [behavior for scaling up and down](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior) | `{}` | -| autoscaling.targetCPUUtilizationPercentage | Target Percentage for when autoscaling takes affect. Only used if cluster does not support `autoscaling/v2` or `autoscaling/v2beta2` | `80` | -| autoscaling.metrics | metrics used for autoscaling for clusters that supports `autoscaling/v2` or `autoscaling/v2beta2` | See [values.yaml](values.yaml) | -| updateStrategy | update strategy for deployment | `{}` | -| readinessProbe | Kong readiness probe | | -| livenessProbe | Kong liveness probe | | -| startupProbe | Kong startup probe | | -| lifecycle | Proxy container lifecycle hooks | see `values.yaml` | -| terminationGracePeriodSeconds | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pods | 30 | -| affinity | Node/pod affinities | | -| topologySpreadConstraints | Control how Pods are spread across cluster among failure-domains | | -| nodeSelector | Node labels for pod assignment | `{}` | -| deploymentAnnotations | Annotations to add to deployment | see `values.yaml` | -| podAnnotations | Annotations to add to each pod | see `values.yaml` | -| podLabels | Labels to add to each pod | `{}` | -| resources | Pod resource requests & limits | `{}` | -| tolerations | List of node taints to tolerate | `[]` | -| dnsPolicy | Pod dnsPolicy | | -| dnsConfig | Pod dnsConfig | | -| podDisruptionBudget.enabled | Enable PodDisruptionBudget for Kong | `false` | -| podDisruptionBudget.maxUnavailable | Represents the minimum number of Pods that can be unavailable (integer or percentage) | `50%` | -| podDisruptionBudget.minAvailable | Represents the number of Pods that must be available (integer or percentage) | | -| podSecurityPolicy.enabled | Enable podSecurityPolicy for Kong | `false` | -| podSecurityPolicy.labels | Labels to add to podSecurityPolicy for Kong | `{}` | -| podSecurityPolicy.annotations | Annotations to add to podSecurityPolicy for Kong | `{}` | -| podSecurityPolicy.spec | Collection of [PodSecurityPolicy settings](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#what-is-a-pod-security-policy) | | -| priorityClassName | Set pod scheduling priority class for Kong pods | `""` | -| secretVolumes | Mount given secrets as a volume in Kong container to override default certs and keys. | `[]` | -| securityContext | Set the securityContext for Kong Pods | `{}` | -| containerSecurityContext | Set the securityContext for Containers | `{"readOnlyRootFilesystem": true}` | -| serviceMonitor.enabled | Create ServiceMonitor for Prometheus Operator | `false` | -| serviceMonitor.interval | Scraping interval | `30s` | -| serviceMonitor.namespace | Where to create ServiceMonitor | | -| serviceMonitor.labels | ServiceMonitor labels | `{}` | -| serviceMonitor.targetLabels | ServiceMonitor targetLabels | `{}` | -| serviceMonitor.honorLabels | ServiceMonitor honorLabels | `{}` | -| serviceMonitor.metricRelabelings | ServiceMonitor metricRelabelings | `{}` | -| extraConfigMaps | ConfigMaps to add to mounted volumes | `[]` | -| extraSecrets | Secrets to add to mounted volumes | `[]` | -| nameOverride | Replaces "kong" in resource names, like "RELEASENAME-nameOverride" instead of "RELEASENAME-kong" | `""` | -| fullnameOverride | Overrides the entire resource name string | `""` | -| extraObjects | Create additional k8s resources | `[]` | -**Note:** If you are using `deployment.hostNetwork` to bind to lower ports ( < 1024), which may be the desired option (ports 80 and 433), you also -need to tweak the `containerSecurityContext` configuration as in the example: - -```yaml -containerSecurityContext: # run as root to bind to lower ports - capabilities: - add: [NET_BIND_SERVICE] - runAsGroup: 0 - runAsNonRoot: false - runAsUser: 0 -``` - -**Note:** The default `podAnnotations` values disable inbound proxying for Kuma -and Istio. This is appropriate when using Kong as a gateway for external -traffic inbound into the cluster. - -If you want to use Kong as an internal proxy within the cluster network, you -should enable inbound the inbound mesh proxies: - -```yaml -# Enable inbound mesh proxying for Kuma and Istio -podAnnotations: - kuma.io/gateway: disabled - traffic.sidecar.istio.io/includeInboundPorts: "*" -``` - -#### The `env` section - -The `env` section can be used to configured all properties of Kong. -Any key value put under this section translates to environment variables -used to control Kong's configuration. Every key is prefixed with `KONG_` -and upper-cased before setting the environment variable. - -Furthermore, all `kong.env` parameters can also accept a mapping instead of a -value to ensure the parameters can be set through configmaps and secrets. - -An example: - -```yaml -kong: - env: # load PG password from a secret dynamically - pg_user: kong - pg_password: - valueFrom: - secretKeyRef: - key: kong - name: postgres - nginx_worker_processes: "2" -``` - -For complete list of Kong configurations please check the -[Kong configuration docs](https://docs.konghq.com/latest/configuration). - -> **Tip**: You can use the default [values.yaml](values.yaml) - -#### The `customEnv` section - -The `customEnv` section can be used to configure all custom properties of other than Kong. -Any key value put under this section translates to environment variables -that can be used in Kong's plugin configurations. Every key is upper-cased before setting the environment variable. - -An example: - -```yaml -kong: - customEnv: - api_token: - valueFrom: - secretKeyRef: - key: token - name: api_key - client_name: testClient -``` - -#### The `extraLabels` section - -The `extraLabels` section can be used to configure some extra labels that will be added to each Kubernetes object generated. - -For example, you can add the `acme.com/some-key: some-value` label to each Kubernetes object by putting the following in your Helm values: - -```yaml -extraLabels: - acme.com/some-key: some-value -``` - -## Kong Enterprise Parameters - -### Overview - -Kong Enterprise requires some additional configuration not needed when using -Kong Open-Source. To use Kong Enterprise, at the minimum, -you need to do the following: - -- Set `enterprise.enabled` to `true` in `values.yaml` file. -- Update values.yaml to use a Kong Enterprise image. -- Satisfy the two prerequisites below for Enterprise License and - Enterprise Docker Registry. -- (Optional) [set a `password` environment variable](#rbac) to create the - initial super-admin. Though not required, this is recommended for users that - wish to use RBAC, as it cannot be done after initial setup. - -Once you have these set, it is possible to install Kong Enterprise, -but please make sure to review the below sections for other settings that -you should consider configuring before installing Kong. - -Some of the more important configuration is grouped in sections -under the `.enterprise` key in values.yaml, though most enterprise-specific -configuration can be placed under the `.env` key. - -### Prerequisites - -#### Kong Enterprise License - -Kong Enterprise 2.3+ can run with or without a license. If you wish to run 2.3+ -without a license, you can skip this step and leave `enterprise.license_secret` -unset. In this case only a limited subset of features will be available. -Earlier versions require a license. - -If you have paid for a license, but you do not have a copy of yours, please -contact Kong Support. Once you have it, you will need to store it in a Secret: - -```bash -$ kubectl create secret generic kong-enterprise-license --from-file=license=./license.json -``` - -Set the secret name in `values.yaml`, in the `.enterprise.license_secret` key. -Please ensure the above secret is created in the same namespace in which -Kong is going to be deployed. - -#### Kong Enterprise Docker registry access - -Kong Enterprise versions 2.2 and earlier use a private Docker registry and -require a pull secret. **If you use 2.3 or newer, you can skip this step.** - -You should have received credentials to log into docker hub after -purchasing Kong Enterprise. After logging in, you can retrieve your API key -from \ \> Edit Profile \> API Key. Use this to create registry -secrets: - -```bash -$ kubectl create secret docker-registry kong-enterprise-edition-docker \ - --docker-server=hub.docker.io \ - --docker-username= \ - --docker-password= -secret/kong-enterprise-edition-docker created -``` - -Set the secret names in `values.yaml` in the `image.pullSecrets` section. -Again, please ensure the above secret is created in the same namespace in which -Kong is going to be deployed. - -### Service location hints - -Kong Enterprise add two GUIs, Kong Manager and the Kong Developer Portal, that -must know where other Kong services (namely the admin and files APIs) can be -accessed in order to function properly. Kong's default behavior for attempting -to locate these absent configuration is unlikely to work in common Kubernetes -environments. Because of this, you should set each of `admin_gui_url`, -`admin_gui_api_url`, `proxy_url`, `portal_api_url`, `portal_gui_host`, and -`portal_gui_protocol` under the `.env` key in values.yaml to locations where -each of their respective services can be accessed to ensure that Kong services -can locate one another and properly set CORS headers. See the -[Property Reference documentation](https://docs.konghq.com/enterprise/latest/property-reference/) -for more details on these settings. - -### RBAC - -You can create a default RBAC superuser when initially running `helm install` -by setting a `password` environment variable under `env` in values.yaml. It -should be a reference to a secret key containing your desired password. This -will create a `kong_admin` admin whose token and basic-auth password match the -value in the secret. For example: - -```yaml -env: - password: - valueFrom: - secretKeyRef: - name: kong-enterprise-superuser-password - key: password -``` - -If using the ingress controller, it needs access to the token as well, by -specifying `kong_admin_token` in its environment variables: - -```yaml -ingressController: - env: - kong_admin_token: - valueFrom: - secretKeyRef: - name: kong-enterprise-superuser-password - key: password -``` - -Although the above examples both use the initial super-admin, we recommend -[creating a less-privileged RBAC user](https://docs.konghq.com/enterprise/latest/kong-manager/administration/rbac/add-user/) -for the controller after installing. It needs at least workspace admin -privileges in its workspace (`default` by default, settable by adding a -`workspace` variable under `ingressController.env`). Once you create the -controller user, add its token to a secret and update your `kong_admin_token` -variable to use it. Remove the `password` variable from Kong's environment -variables and the secret containing the super-admin token after. - -### Sessions - -Login sessions for Kong Manager and the Developer Portal make use of -[the Kong Sessions plugin](https://docs.konghq.com/enterprise/latest/kong-manager/authentication/sessions). -When configured via values.yaml, their configuration must be stored in Secrets, -as it contains an HMAC key. - -Kong Manager's session configuration must be configured via values.yaml, -whereas this is optional for the Developer Portal on versions 0.36+. Providing -Portal session configuration in values.yaml provides the default session -configuration, which can be overridden on a per-workspace basis. - -``` -$ cat admin_gui_session_conf -{"cookie_name":"admin_session","cookie_samesite":"off","secret":"admin-secret-CHANGEME","cookie_secure":true,"storage":"kong"} -$ cat portal_session_conf -{"cookie_name":"portal_session","cookie_samesite":"off","secret":"portal-secret-CHANGEME","cookie_secure":true,"storage":"kong"} -$ kubectl create secret generic kong-session-config --from-file=admin_gui_session_conf --from-file=portal_session_conf -secret/kong-session-config created -``` -The exact plugin settings may vary in your environment. The `secret` should -always be changed for both configurations. - -After creating your secret, set its name in values.yaml in -`.enterprise.rbac.session_conf_secret`. If you create a Portal configuration, -add it at `env.portal_session_conf` using a secretKeyRef. - -### Email/SMTP - -Email is used to send invitations for -[Kong Admins](https://docs.konghq.com/enterprise/latest/kong-manager/networking/email) -and [Developers](https://docs.konghq.com/enterprise/latest/developer-portal/configuration/smtp). - -Email invitations rely on setting a number of SMTP settings at once. For -convenience, these are grouped under the `.enterprise.smtp` key in values.yaml. -Setting `.enterprise.smtp.disabled: true` will set `KONG_SMTP_MOCK=on` and -allow Admin/Developer invites to proceed without sending email. Note, however, -that these have limited functionality without sending email. - -If your SMTP server requires authentication, you must provide the `username` -and `smtp_password_secret` keys under `.enterprise.smtp.auth`. -`smtp_password_secret` must be a Secret containing an `smtp_password` key whose -value is your SMTP password. - -By default, SMTP uses `AUTH` `PLAIN` when you provide credentials. If your provider requires `AUTH LOGIN`, set `smtp_auth_type: login`. - -## Prometheus Operator integration - -The chart can configure a ServiceMonitor resource to instruct the [Prometheus -Operator](https://github.com/prometheus-operator/prometheus-operator) to -collect metrics from Kong Pods. To enable this, set -`serviceMonitor.enabled=true` in `values.yaml`. - -Kong exposes memory usage and connection counts by default. You can enable -traffic metrics for routes and services by configuring the [Prometheus -plugin](https://docs.konghq.com/hub/kong-inc/prometheus/). - -The ServiceMonitor requires an `enable-metrics: "true"` label on one of the -chart's Services to collect data. By default, this label is set on the proxy -Service. It should only be set on a single chart Service to avoid duplicate -data. If you disable the proxy Service (e.g. on a hybrid control plane instance -or Portal-only instance) and still wish to collect memory usage metrics, add -this label to another Service, e.g. on the admin API Service: - -``` -admin: - labels: - enable-metrics: "true" -``` - -## Argo CD Considerations - -The built-in database subchart (`postgresql.enabled` in values) is not -supported when installing the chart via Argo CD. - -Argo CD does not support the full Helm lifecycle. There is no distinction -between the initial install and upgrades. Both operations are a "sync" in Argo -terms. This affects when migration Jobs execute in database-backed Kong -installs. - -The chart sets the `Sync` and `BeforeHookCreation` deletion -[hook policies](https://argo-cd.readthedocs.io/en/stable/user-guide/resource_hooks/) -on the `init-migrations` and `pre-upgrade-migrations` Jobs. - -The `pre-upgrade-migrations` Job normally uses Helm's `pre-upgrade` policy. Argo -translates this to its `PreSync` policy, which would create the Job before all -sync phase resources. Doing this before various sync phase resources (such as -the ServiceAccount) are in place would prevent the Job from running -successfully. Overriding this with Argo's `Sync` policy starts the Job at the -same time as the upgraded Deployment Pods. The new Pods may fail to start -temporarily, but will eventually start normally once migrations complete. - -## Seeking help - -If you run into an issue, bug or have a question, please reach out to the Kong -community via [Kong Nation](https://discuss.konghq.com). -Please do not open issues in [this](https://github.com/helm/charts) repository -as the maintainers will not be notified and won't respond. +# kong + +![Version: 2.26.6](https://img.shields.io/badge/Version-2.26.6-informational?style=flat-square) ![AppVersion: 3.3](https://img.shields.io/badge/AppVersion-3.3-informational?style=flat-square) + +The Cloud-Native Ingress and API-management + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Source Code + +* + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami | postgresql | 11.9.13 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| admin.annotations | object | `{}` | | +| admin.enabled | bool | `false` | | +| admin.http.containerPort | int | `8001` | | +| admin.http.enabled | bool | `false` | | +| admin.http.parameters | list | `[]` | | +| admin.http.servicePort | int | `8001` | | +| admin.ingress.annotations | object | `{}` | | +| admin.ingress.enabled | bool | `false` | | +| admin.ingress.hostname | string | `nil` | | +| admin.ingress.ingressClassName | string | `nil` | | +| admin.ingress.path | string | `"/"` | | +| admin.ingress.pathType | string | `"ImplementationSpecific"` | | +| admin.labels | object | `{}` | | +| admin.loadBalancerClass | string | `nil` | | +| admin.tls.client.caBundle | string | `""` | | +| admin.tls.client.secretName | string | `""` | | +| admin.tls.containerPort | int | `8444` | | +| admin.tls.enabled | bool | `true` | | +| admin.tls.parameters[0] | string | `"http2"` | | +| admin.tls.servicePort | int | `8444` | | +| admin.type | string | `"NodePort"` | | +| autoscaling.behavior | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `5` | | +| autoscaling.metrics[0].resource.name | string | `"cpu"` | | +| autoscaling.metrics[0].resource.target.averageUtilization | int | `80` | | +| autoscaling.metrics[0].resource.target.type | string | `"Utilization"` | | +| autoscaling.metrics[0].type | string | `"Resource"` | | +| autoscaling.minReplicas | int | `2` | | +| autoscaling.targetCPUUtilizationPercentage | string | `nil` | | +| certificates | object | `{"admin":{"clusterIssuer":"","commonName":"kong.example","dnsNames":[],"enabled":true,"issuer":""},"cluster":{"clusterIssuer":"","commonName":"kong_clustering","dnsNames":[],"enabled":true,"issuer":""},"clusterIssuer":"","enabled":false,"issuer":"","portal":{"clusterIssuer":"","commonName":"developer.example","dnsNames":[],"enabled":true,"issuer":""},"proxy":{"clusterIssuer":"","commonName":"app.example","dnsNames":[],"enabled":true,"issuer":""}}` | --------------------------------------------------------------------------- | +| cluster.annotations | object | `{}` | | +| cluster.enabled | bool | `false` | | +| cluster.ingress.annotations | object | `{}` | | +| cluster.ingress.enabled | bool | `false` | | +| cluster.ingress.hostname | string | `nil` | | +| cluster.ingress.ingressClassName | string | `nil` | | +| cluster.ingress.path | string | `"/"` | | +| cluster.ingress.pathType | string | `"ImplementationSpecific"` | | +| cluster.labels | object | `{}` | | +| cluster.loadBalancerClass | string | `nil` | | +| cluster.tls.containerPort | int | `8005` | | +| cluster.tls.enabled | bool | `false` | | +| cluster.tls.parameters | list | `[]` | | +| cluster.tls.servicePort | int | `8005` | | +| cluster.type | string | `"ClusterIP"` | | +| clusterCaSecretName | string | `""` | | +| clustertelemetry.annotations | object | `{}` | | +| clustertelemetry.enabled | bool | `false` | | +| clustertelemetry.ingress.annotations | object | `{}` | | +| clustertelemetry.ingress.enabled | bool | `false` | | +| clustertelemetry.ingress.hostname | string | `nil` | | +| clustertelemetry.ingress.ingressClassName | string | `nil` | | +| clustertelemetry.ingress.path | string | `"/"` | | +| clustertelemetry.ingress.pathType | string | `"ImplementationSpecific"` | | +| clustertelemetry.labels | object | `{}` | | +| clustertelemetry.loadBalancerClass | string | `nil` | | +| clustertelemetry.tls.containerPort | int | `8006` | | +| clustertelemetry.tls.enabled | bool | `false` | | +| clustertelemetry.tls.parameters | list | `[]` | | +| clustertelemetry.tls.servicePort | int | `8006` | | +| clustertelemetry.type | string | `"ClusterIP"` | | +| containerSecurityContext.readOnlyRootFilesystem | bool | `true` | | +| dblessConfig.config | string | `""` | | +| dblessConfig.configMap | string | `""` | | +| dblessConfig.secret | string | `""` | | +| deployment.daemonset | bool | `false` | | +| deployment.hostNetwork | bool | `false` | | +| deployment.kong.enabled | bool | `true` | | +| deployment.prefixDir.sizeLimit | string | `"256Mi"` | | +| deployment.serviceAccount.automountServiceAccountToken | bool | `false` | | +| deployment.serviceAccount.create | bool | `true` | | +| deployment.test.enabled | bool | `false` | | +| deployment.tmpDir.sizeLimit | string | `"1Gi"` | | +| deploymentAnnotations | object | `{}` | | +| enterprise | object | `{"enabled":false,"portal":{"enabled":false},"rbac":{"admin_gui_auth":"basic-auth","admin_gui_auth_conf_secret":"CHANGEME-admin-gui-auth-conf-secret","enabled":false,"session_conf_secret":"kong-session-config"},"smtp":{"admin_emails_from":"none@example.com","admin_emails_reply_to":"none@example.com","auth":{"smtp_password_secret":"CHANGEME-smtp-password","smtp_username":""},"enabled":false,"portal_emails_from":"none@example.com","portal_emails_reply_to":"none@example.com","smtp_admin_emails":"none@example.com","smtp_auth_type":"","smtp_host":"smtp.example.com","smtp_port":587,"smtp_ssl":"nil","smtp_starttls":true},"vitals":{"enabled":true}}` | --------------------------------------------------------------------------- Toggle Kong Enterprise features on or off RBAC and SMTP configuration have additional options that must all be set together Other settings should be added to the "env" settings below | +| env | object | `{"admin_access_log":"/dev/stdout","admin_error_log":"/dev/stderr","admin_gui_access_log":"/dev/stdout","admin_gui_error_log":"/dev/stderr","database":"off","nginx_worker_processes":"2","portal_api_access_log":"/dev/stdout","portal_api_error_log":"/dev/stderr","prefix":"/kong_prefix/","proxy_access_log":"/dev/stdout","proxy_error_log":"/dev/stderr","router_flavor":"traditional"}` | --------------------------------------------------------------------------- Specify Kong configuration This chart takes all entries defined under `.env` and transforms them into into `KONG_*` environment variables for Kong containers. Their names here should match the names used in https://github.com/Kong/kong/blob/master/kong.conf.default See https://docs.konghq.com/latest/configuration also for additional details Values here take precedence over values from other sections of values.yaml, e.g. setting pg_user here will override the value normally set when postgresql.enabled is set below. In general, you should not set values here if they are set elsewhere. | +| extraConfigMaps | list | `[]` | | +| extraLabels | object | `{}` | | +| extraObjects | list | `[]` | | +| extraSecrets | list | `[]` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"kong"` | | +| image.tag | string | `"3.3"` | | +| ingressController | object | `{"adminApi":{"tls":{"client":{"caSecretName":"","certProvided":false,"enabled":false,"secretName":""}}},"admissionWebhook":{"certificate":{"provided":false},"enabled":true,"failurePolicy":"Ignore","namespaceSelector":{},"port":8080,"service":{"labels":{}}},"args":[],"enabled":true,"env":{"kong_admin_tls_skip_verify":true},"gatewayDiscovery":{"adminApiService":{"name":"","namespace":""},"enabled":false,"generateAdminApiService":false},"image":{"effectiveSemver":null,"repository":"kong/kubernetes-ingress-controller","tag":"2.11"},"ingressClass":"kong","ingressClassAnnotations":{},"konnect":{"apiHostname":"us.kic.api.konghq.com","enabled":false,"license":{"enabled":false},"runtimeGroupID":"","tlsClientCertSecretName":"konnect-client-tls"},"livenessProbe":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"rbac":{"create":true},"readinessProbe":{"failureThreshold":3,"httpGet":{"path":"/readyz","port":10254,"scheme":"HTTP"},"initialDelaySeconds":5,"periodSeconds":10,"successThreshold":1,"timeoutSeconds":5},"resources":{},"watchNamespaces":[]}` | --------------------------------------------------------------------------- Kong Ingress Controller's primary purpose is to satisfy Ingress resources created in k8s. It uses CRDs for more fine grained control over routing and for Kong specific configuration. | +| lifecycle.preStop.exec.command[0] | string | `"kong"` | | +| lifecycle.preStop.exec.command[1] | string | `"quit"` | | +| lifecycle.preStop.exec.command[2] | string | `"--wait=15"` | | +| livenessProbe.failureThreshold | int | `3` | | +| livenessProbe.httpGet.path | string | `"/status"` | | +| livenessProbe.httpGet.port | string | `"status"` | | +| livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.initialDelaySeconds | int | `5` | | +| livenessProbe.periodSeconds | int | `10` | | +| livenessProbe.successThreshold | int | `1` | | +| livenessProbe.timeoutSeconds | int | `5` | | +| manager.annotations | object | `{}` | | +| manager.enabled | bool | `true` | | +| manager.http.containerPort | int | `8002` | | +| manager.http.enabled | bool | `true` | | +| manager.http.parameters | list | `[]` | | +| manager.http.servicePort | int | `8002` | | +| manager.ingress.annotations | object | `{}` | | +| manager.ingress.enabled | bool | `false` | | +| manager.ingress.hostname | string | `nil` | | +| manager.ingress.ingressClassName | string | `nil` | | +| manager.ingress.path | string | `"/"` | | +| manager.ingress.pathType | string | `"ImplementationSpecific"` | | +| manager.labels | object | `{}` | | +| manager.loadBalancerClass | string | `nil` | | +| manager.tls.containerPort | int | `8445` | | +| manager.tls.enabled | bool | `true` | | +| manager.tls.parameters[0] | string | `"http2"` | | +| manager.tls.servicePort | int | `8445` | | +| manager.type | string | `"NodePort"` | | +| migrations.annotations."sidecar.istio.io/inject" | bool | `false` | | +| migrations.backoffLimit | string | `nil` | | +| migrations.jobAnnotations | object | `{}` | | +| migrations.postUpgrade | bool | `true` | | +| migrations.preUpgrade | bool | `true` | | +| migrations.resources | object | `{}` | | +| nodeSelector | object | `{}` | | +| plugins | object | `{}` | | +| podAnnotations."kuma.io/gateway" | string | `"enabled"` | | +| podAnnotations."traffic.sidecar.istio.io/includeInboundPorts" | string | `""` | | +| podDisruptionBudget.enabled | bool | `false` | | +| podLabels | object | `{}` | | +| podSecurityPolicy.annotations | object | `{}` | | +| podSecurityPolicy.enabled | bool | `false` | | +| podSecurityPolicy.labels | object | `{}` | | +| podSecurityPolicy.spec.allowPrivilegeEscalation | bool | `false` | | +| podSecurityPolicy.spec.fsGroup.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.hostIPC | bool | `false` | | +| podSecurityPolicy.spec.hostNetwork | bool | `false` | | +| podSecurityPolicy.spec.hostPID | bool | `false` | | +| podSecurityPolicy.spec.privileged | bool | `false` | | +| podSecurityPolicy.spec.readOnlyRootFilesystem | bool | `true` | | +| podSecurityPolicy.spec.runAsGroup.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.runAsUser.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.seLinux.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.supplementalGroups.rule | string | `"RunAsAny"` | | +| podSecurityPolicy.spec.volumes[0] | string | `"configMap"` | | +| podSecurityPolicy.spec.volumes[1] | string | `"secret"` | | +| podSecurityPolicy.spec.volumes[2] | string | `"emptyDir"` | | +| podSecurityPolicy.spec.volumes[3] | string | `"projected"` | | +| portal.annotations | object | `{}` | | +| portal.enabled | bool | `true` | | +| portal.http.containerPort | int | `8003` | | +| portal.http.enabled | bool | `true` | | +| portal.http.parameters | list | `[]` | | +| portal.http.servicePort | int | `8003` | | +| portal.ingress.annotations | object | `{}` | | +| portal.ingress.enabled | bool | `false` | | +| portal.ingress.hostname | string | `nil` | | +| portal.ingress.ingressClassName | string | `nil` | | +| portal.ingress.path | string | `"/"` | | +| portal.ingress.pathType | string | `"ImplementationSpecific"` | | +| portal.labels | object | `{}` | | +| portal.loadBalancerClass | string | `nil` | | +| portal.tls.containerPort | int | `8446` | | +| portal.tls.enabled | bool | `true` | | +| portal.tls.parameters[0] | string | `"http2"` | | +| portal.tls.servicePort | int | `8446` | | +| portal.type | string | `"NodePort"` | | +| portalapi.annotations | object | `{}` | | +| portalapi.enabled | bool | `true` | | +| portalapi.http.containerPort | int | `8004` | | +| portalapi.http.enabled | bool | `true` | | +| portalapi.http.parameters | list | `[]` | | +| portalapi.http.servicePort | int | `8004` | | +| portalapi.ingress.annotations | object | `{}` | | +| portalapi.ingress.enabled | bool | `false` | | +| portalapi.ingress.hostname | string | `nil` | | +| portalapi.ingress.ingressClassName | string | `nil` | | +| portalapi.ingress.path | string | `"/"` | | +| portalapi.ingress.pathType | string | `"ImplementationSpecific"` | | +| portalapi.labels | object | `{}` | | +| portalapi.loadBalancerClass | string | `nil` | | +| portalapi.tls.containerPort | int | `8447` | | +| portalapi.tls.enabled | bool | `true` | | +| portalapi.tls.parameters[0] | string | `"http2"` | | +| portalapi.tls.servicePort | int | `8447` | | +| portalapi.type | string | `"NodePort"` | | +| postgresql | object | `{"auth":{"database":"kong","username":"kong"},"enabled":false,"image":{"tag":"13.11.0-debian-11-r20"},"service":{"ports":{"postgresql":"5432"}}}` | --------------------------------------------------------------------------- Kong can run without a database or use either Postgres or Cassandra as a backend datatstore for it's configuration. By default, this chart installs Kong without a database. If you would like to use a database, there are two options: - (recommended) Deploy and maintain a database and pass the connection details to Kong via the `env` section. - You can use the below `postgresql` sub-chart to deploy a database along-with Kong as part of a single Helm release. Running a database independently is recommended for production, but the built-in Postgres is useful for quickly creating test instances. PostgreSQL chart documentation: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md WARNING: by default, the Postgres chart generates a random password each time it upgrades, which breaks access to existing volumes. You should set a password explicitly: https://github.com/Kong/charts/blob/main/charts/kong/FAQs.md#kong-fails-to-start-after-helm-upgrade-when-postgres-is-used-what-do-i-do | +| priorityClassName | string | `""` | | +| proxy.annotations | object | `{}` | | +| proxy.enabled | bool | `true` | | +| proxy.http.containerPort | int | `8000` | | +| proxy.http.enabled | bool | `true` | | +| proxy.http.parameters | list | `[]` | | +| proxy.http.servicePort | int | `80` | | +| proxy.ingress.annotations | object | `{}` | | +| proxy.ingress.enabled | bool | `false` | | +| proxy.ingress.hostname | string | `nil` | | +| proxy.ingress.hosts | list | `[]` | | +| proxy.ingress.ingressClassName | string | `nil` | | +| proxy.ingress.labels | object | `{}` | | +| proxy.ingress.path | string | `"/"` | | +| proxy.ingress.pathType | string | `"ImplementationSpecific"` | | +| proxy.labels.enable-metrics | string | `"true"` | | +| proxy.loadBalancerClass | string | `nil` | | +| proxy.nameOverride | string | `""` | | +| proxy.stream | list | `[]` | | +| proxy.tls.containerPort | int | `8443` | | +| proxy.tls.enabled | bool | `true` | | +| proxy.tls.parameters[0] | string | `"http2"` | | +| proxy.tls.servicePort | int | `443` | | +| proxy.type | string | `"LoadBalancer"` | | +| readinessProbe.failureThreshold | int | `3` | | +| readinessProbe.httpGet.path | string | `"/status/ready"` | | +| readinessProbe.httpGet.port | string | `"status"` | | +| readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| readinessProbe.initialDelaySeconds | int | `5` | | +| readinessProbe.periodSeconds | int | `10` | | +| readinessProbe.successThreshold | int | `1` | | +| readinessProbe.timeoutSeconds | int | `5` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| secretVolumes | list | `[]` | | +| securityContext | object | `{}` | | +| serviceMonitor.enabled | bool | `false` | | +| status.enabled | bool | `true` | | +| status.http.containerPort | int | `8100` | | +| status.http.enabled | bool | `true` | | +| status.http.parameters | list | `[]` | | +| status.tls.containerPort | int | `8543` | | +| status.tls.enabled | bool | `false` | | +| status.tls.parameters | list | `[]` | | +| terminationGracePeriodSeconds | int | `30` | | +| tolerations | list | `[]` | | +| udpProxy.annotations | object | `{}` | | +| udpProxy.enabled | bool | `false` | | +| udpProxy.labels | object | `{}` | | +| udpProxy.loadBalancerClass | string | `nil` | | +| udpProxy.stream | list | `[]` | | +| udpProxy.type | string | `"LoadBalancer"` | | +| updateStrategy | object | `{}` | | +| waitImage | object | `{"enabled":true,"pullPolicy":"IfNotPresent"}` | --------------------------------------------------------------------------- | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/kube-prometheus-stack/README.md b/charts/kube-prometheus-stack/README.md index 6bf66386f..41a7b78ac 100644 --- a/charts/kube-prometheus-stack/README.md +++ b/charts/kube-prometheus-stack/README.md @@ -1,853 +1,1005 @@ # kube-prometheus-stack -Installs the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator). - -See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) README for details about components, dashboards, and alerts. - -_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component._ - -## Prerequisites - -- Kubernetes 1.16+ -- Helm 3+ - -## Get Helm Repository Info - -```console -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm repo update -``` - -_See [`helm repo`](https://helm.sh/docs/helm/helm_repo/) for command documentation._ - -## Install Helm Chart - -```console -helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack -``` - -_See [configuration](#configuration) below._ - -_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ - -## Dependencies - -By default this chart installs additional, dependent charts: - -- [prometheus-community/kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) -- [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) -- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana) - -To disable dependencies during installation, see [multiple releases](#multiple-releases) below. - -_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ - -## Uninstall Helm Chart - -```console -helm uninstall [RELEASE_NAME] -``` - -This removes all the Kubernetes components associated with the chart and deletes the release. - -_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ - -CRDs created by this chart are not removed by default and should be manually cleaned up: - -```console -kubectl delete crd alertmanagerconfigs.monitoring.coreos.com -kubectl delete crd alertmanagers.monitoring.coreos.com -kubectl delete crd podmonitors.monitoring.coreos.com -kubectl delete crd probes.monitoring.coreos.com -kubectl delete crd prometheusagents.monitoring.coreos.com -kubectl delete crd prometheuses.monitoring.coreos.com -kubectl delete crd prometheusrules.monitoring.coreos.com -kubectl delete crd scrapeconfigs.monitoring.coreos.com -kubectl delete crd servicemonitors.monitoring.coreos.com -kubectl delete crd thanosrulers.monitoring.coreos.com -``` - -## Upgrading Chart - -```console -helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack -``` - -With Helm v3, CRDs created by this chart are not updated by default and should be manually updated. -Consult also the [Helm Documentation on CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions). - -_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ - -### Upgrading an existing Release to a new major version - -A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. - -### From 47.x to 48.x - -This version moved all CRDs into a dedicated sub-chart. No new CRDs are introduced in this version. -See [#3548](https://github.com/prometheus-community/helm-charts/issues/3548) for more context. - -We do not expect any breaking changes in this version. - -### From 46.x to 47.x - -This version upgrades Prometheus-Operator to v0.66.0 with new CRDs (PrometheusAgent and ScrapeConfig). - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.66.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 45.x to 46.x - -This version upgrades Prometheus-Operator to v0.65.1 with new CRDs (PrometheusAgent and ScrapeConfig), Prometheus to v2.44.0 and Thanos to v0.31.0. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 44.x to 45.x - -This version upgrades Prometheus-Operator to v0.63.0, Prometheus to v2.42.0 and Thanos to v0.30.2. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.63.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 43.x to 44.x - -This version upgrades Prometheus-Operator to v0.62.0, Prometheus to v2.41.0 and Thanos to v0.30.1. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.62.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -If you have explicitly set `prometheusOperator.admissionWebhooks.failurePolicy`, this value is now always used even when `.prometheusOperator.admissionWebhooks.patch.enabled` is `true` (the default). - -The values for `prometheusOperator.image.tag` & `prometheusOperator.prometheusConfigReloader.image.tag` are now empty by default and the Chart.yaml `appVersion` field is used instead. - -### From 42.x to 43.x - -This version upgrades Prometheus-Operator to v0.61.1, Prometheus to v2.40.5 and Thanos to v0.29.0. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 41.x to 42.x - -This includes the overridability of container registry for all containers at the global level using `global.imageRegistry` or per container image. The defaults have not changed but if you were using a custom image, you will have to override the registry of said custom container image before you upgrade. - -For instance, the prometheus-config-reloader used to be configured as follow: - -```yaml - image: - repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.60.1 - sha: "" -``` - -But it now moved to: - -```yaml - image: - registry: quay.io - repository: prometheus-operator/prometheus-config-reloader - tag: v0.60.1 - sha: "" -``` - -### From 40.x to 41.x - -This version upgrades Prometheus-Operator to v0.60.1, Prometheus to v2.39.1 and Thanos to v0.28.1. -This version also upgrades the Helm charts of kube-state-metrics to 4.20.2, prometheus-node-exporter to 4.3.0 and Grafana to 6.40.4. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -This version splits kubeScheduler recording and altering rules in separate config values. -Instead of `defaultRules.rules.kubeScheduler` the 2 new variables `defaultRules.rules.kubeSchedulerAlerting` and `defaultRules.rules.kubeSchedulerRecording` are used. - -### From 39.x to 40.x - -This version upgrades Prometheus-Operator to v0.59.1, Prometheus to v2.38.0, kube-state-metrics to v2.6.0 and Thanos to v0.28.0. -This version also upgrades the Helm charts of kube-state-metrics to 4.18.0 and prometheus-node-exporter to 4.2.0. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.59.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -Starting from prometheus-node-exporter version 4.0.0, the `node exporter` chart is using the [Kubernetes recommended labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/). Therefore you have to delete the daemonset before you upgrade. - -```console -kubectl delete daemonset -l app=prometheus-node-exporter -helm upgrade -i kube-prometheus-stack prometheus-community/kube-prometheus-stack -``` - -If you use your own custom [ServiceMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor) or [PodMonitor](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#podmonitor), please ensure to upgrade their `selector` fields accordingly to the new labels. - -### From 38.x to 39.x - -This upgraded prometheus-operator to v0.58.0 and prometheus to v2.37.0 - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.58.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 37.x to 38.x - -Reverted one of the default metrics relabelings for cAdvisor added in 36.x, due to it breaking container_network_* and various other statistics. If you do not want this change, you will need to override the `kubelet.cAdvisorMetricRelabelings`. - -### From 36.x to 37.x - -This includes some default metric relabelings for cAdvisor and apiserver metrics to reduce cardinality. If you do not want these defaults, you will need to override the `kubeApiServer.metricRelabelings` and or `kubelet.cAdvisorMetricRelabelings`. - -### From 35.x to 36.x - -This upgraded prometheus-operator to v0.57.0 and prometheus to v2.36.1 - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.57.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 34.x to 35.x - -This upgraded prometheus-operator to v0.56.0 and prometheus to v2.35.0 - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.56.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 33.x to 34.x - -This upgrades to prometheus-operator to v0.55.0 and prometheus to v2.33.5. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.55.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 32.x to 33.x - -This upgrades the prometheus-node-exporter Chart to v3.0.0. Please review the changes to this subchart if you make customizations to hostMountPropagation. - -### From 31.x to 32.x - -This upgrades to prometheus-operator to v0.54.0 and prometheus to v2.33.1. It also changes the default for `grafana.serviceMonitor.enabled` to `true. - -Run these commands to update the CRDs before applying the upgrade. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.54.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 30.x to 31.x - -This version removes the built-in grafana ServiceMonitor and instead relies on the ServiceMonitor of the sub-chart. -`grafana.serviceMonitor.enabled` must be set instead of `grafana.serviceMonitor.selfMonitor` and the old ServiceMonitor may -need to be manually cleaned up after deploying the new release. - -### From 29.x to 30.x - -This version updates kube-state-metrics to 4.3.0 and uses the new option `kube-state-metrics.releaseLabel=true` which adds the "release" label to kube-state-metrics labels, making scraping of the metrics by kube-prometheus-stack work out of the box again, independent of the used kube-prometheus-stack release name. If you already set the "release" label via `kube-state-metrics.customLabels` you might have to remove that and use it via the new option. - -### From 28.x to 29.x - -This version makes scraping port for kube-controller-manager and kube-scheduler dynamic to reflect changes to default serving ports -for those components in Kubernetes versions v1.22 and v1.23 respectively. - -If you deploy on clusters using version v1.22+, kube-controller-manager will be scraped over HTTPS on port 10257. - -If you deploy on clusters running version v1.23+, kube-scheduler will be scraped over HTTPS on port 10259. - -### From 27.x to 28.x - -This version disables PodSecurityPolicies by default because they are deprecated in Kubernetes 1.21 and will be removed in Kubernetes 1.25. - -If you are using PodSecurityPolicies you can enable the previous behaviour by setting `kube-state-metrics.podSecurityPolicy.enabled`, `prometheus-node-exporter.rbac.pspEnabled`, `grafana.rbac.pspEnabled` and `global.rbac.pspEnabled` to `true`. - -### From 26.x to 27.x - -This version splits prometheus-node-exporter chart recording and altering rules in separate config values. -Instead of `defaultRules.rules.node` the 2 new variables `defaultRules.rules.nodeExporterAlerting` and `defaultRules.rules.nodeExporterRecording` are used. - -Also the following defaultRules.rules has been removed as they had no effect: `kubeApiserverError`, `kubePrometheusNodeAlerting`, `kubernetesAbsent`, `time`. - -The ability to set a rubookUrl via `defaultRules.rules.rubookUrl` was reintroduced. - -### From 25.x to 26.x - -This version enables the prometheus-node-exporter subchart servicemonitor by default again, by setting `prometheus-node-exporter.prometheus.monitor.enabled` to `true`. - -### From 24.x to 25.x - -This version upgrade to prometheus-operator v0.53.1. It removes support for setting a runbookUrl, since the upstream format for runbooks changed. - -```console -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.53.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 23.x to 24.x - -The custom `ServiceMonitor` for the _kube-state-metrics_ & _prometheus-node-exporter_ charts have been removed in favour of the built-in sub-chart `ServiceMonitor`; for both sub-charts this means that `ServiceMonitor` customisations happen via the values passed to the chart. If you haven't directly customised this behaviour then there are no changes required to upgrade, but if you have please read the following. - -For _kube-state-metrics_ the `ServiceMonitor` customisation is now set via `kube-state-metrics.prometheus.monitor` and the `kubeStateMetrics.serviceMonitor.selfMonitor.enabled` value has moved to `kube-state-metrics.selfMonitor.enabled`. - -For _prometheus-node-exporter_ the `ServiceMonitor` customisation is now set via `prometheus-node-exporter.prometheus.monitor` and the `nodeExporter.jobLabel` values has moved to `prometheus-node-exporter.prometheus.monitor.jobLabel`. - -### From 22.x to 23.x - -Port names have been renamed for Istio's -[explicit protocol selection](https://istio.io/latest/docs/ops/configuration/traffic-management/protocol-selection/#explicit-protocol-selection). - -| | old value | new value | -|-|-----------|-----------| -| `alertmanager.alertmanagerSpec.portName` | `web` | `http-web` | -| `grafana.service.portName` | `service` | `http-web` | -| `prometheus-node-exporter.service.portName` | `metrics` (hardcoded) | `http-metrics` | -| `prometheus.prometheusSpec.portName` | `web` | `http-web` | - -### From 21.x to 22.x - -Due to the upgrade of the `kube-state-metrics` chart, removal of its deployment/stateful needs to done manually prior to upgrading: - -```console -kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus-operator,app.kubernetes.io/name=kube-state-metrics --cascade=orphan -``` - -or if you use autosharding: - -```console -kubectl delete statefulsets.apps -l app.kubernetes.io/instance=prometheus-operator,app.kubernetes.io/name=kube-state-metrics --cascade=orphan -``` - -### From 20.x to 21.x - -The config reloader values have been refactored. All the values have been moved to the key `prometheusConfigReloader` and the limits and requests can now be set separately. - -### From 19.x to 20.x - -Version 20 upgrades prometheus-operator from 0.50.x to 0.52.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.52.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 18.x to 19.x - -`kubeStateMetrics.serviceMonitor.namespaceOverride` was removed. -Please use `kube-state-metrics.namespaceOverride` instead. - -### From 17.x to 18.x - -Version 18 upgrades prometheus-operator from 0.49.x to 0.50.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.50.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 16.x to 17.x - -Version 17 upgrades prometheus-operator from 0.48.x to 0.49.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.49.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 15.x to 16.x - -Version 16 upgrades kube-state-metrics to v2.0.0. This includes changed command-line arguments and removed metrics, see this [blog post](https://kubernetes.io/blog/2021/04/13/kube-state-metrics-v-2-0/). This version also removes Grafana dashboards that supported Kubernetes 1.14 or earlier. - -### From 14.x to 15.x - -Version 15 upgrades prometheus-operator from 0.46.x to 0.47.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 13.x to 14.x - -Version 14 upgrades prometheus-operator from 0.45.x to 0.46.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml -``` - -### From 12.x to 13.x - -Version 13 upgrades prometheus-operator from 0.44.x to 0.45.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml -``` - -### From 11.x to 12.x - -Version 12 upgrades prometheus-operator from 0.43.x to 0.44.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.44/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml -``` - -The chart was migrated to support only helm v3 and later. - -### From 10.x to 11.x - -Version 11 upgrades prometheus-operator from 0.42.x to 0.43.x. Starting with 0.43.x an additional `AlertmanagerConfigs` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.43/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml -``` - -Version 11 removes the deprecated tlsProxy via ghostunnel in favor of native TLS support the prometheus-operator gained with v0.39.0. - -### From 9.x to 10.x - -Version 10 upgrades prometheus-operator from 0.38.x to 0.42.x. Starting with 0.40.x an additional `Probes` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: - -```console -kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.42/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml -``` - -### From 8.x to 9.x - -Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration. - -### From 7.x to 8.x - -Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x - -```console -helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0 -``` - -Then upgrade to 8.x.x - -```console -helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x] -``` - -Minimal recommended Prometheus version for this chart release is `2.12.x` - -### From 6.x to 7.x - -Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0. - -### From 5.x to 6.x - -Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified: - -```console -invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable -``` - -If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one - -## Configuration - -See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: - -```console -helm show values prometheus-community/kube-prometheus-stack -``` - -You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options. - -### Multiple releases - -The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`. - -## Work-Arounds for Known Issues - -### Running on private GKE clusters - -When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes cluster’s network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod. - -You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) - -Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`. - -## PrometheusRules Admission Webhooks - -With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster. - -### How the Chart Configures the Hooks - -A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks. - -1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end user certificates. If the certificate already exists, the hook exits. -2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate. -3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set. -4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations - -### Alternatives - -It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested. - -You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `prometheusOperator.admissionWebhooks.certManager.enabled` value to true. - -### Limitations - -Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default. - -## Developing Prometheus Rules and Grafana Dashboards - -This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repository](https://github.com/prometheus-operator/kube-prometheus/blob/main/docs/customizations/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts. - -## Further Information - -For more in-depth documentation of configuration options meanings, please see - -- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) -- [Prometheus](https://prometheus.io/docs/introduction/overview/) -- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart) - -## prometheus.io/scrape - -The prometheus operator does not support annotation-based discovery of services, using the `PodMonitor` or `ServiceMonitor` CRD in its place as they provide far more configuration options. -For information on how to use PodMonitors/ServiceMonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here: - -- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#include-servicemonitors) -- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/getting-started.md#include-podmonitors) -- [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/running-exporters.md) - -By default, Prometheus discovers PodMonitors and ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release. -Sometimes, you may need to discover custom PodMonitors/ServiceMonitors, for example used to scrape data from third-party applications. -An easy way of doing this, without compromising the default PodMonitors/ServiceMonitors discovery, is allowing Prometheus to discover all PodMonitors/ServiceMonitors within its namespace, without applying label filtering. -To do so, you can set `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` and `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`. - -## Migrating from stable/prometheus-operator chart - -## Zero downtime - -Since `kube-prometheus-stack` is fully compatible with the `stable/prometheus-operator` chart, a migration without downtime can be achieved. -However, the old name prefix needs to be kept. If you want the new name please follow the step by step guide below (with downtime). - -You can override the name to achieve this: - -```console -helm upgrade prometheus-operator prometheus-community/kube-prometheus-stack -n monitoring --reuse-values --set nameOverride=prometheus-operator -``` - -**Note**: It is recommended to run this first with `--dry-run --debug`. - -## Redeploy with new name (downtime) - -If the **prometheus-operator** values are compatible with the new **kube-prometheus-stack** chart, please follow the below steps for migration: - -> The guide presumes that chart is deployed in `monitoring` namespace and the deployments are running there. If in other namespace, please replace the `monitoring` to the deployed namespace. - -1. Patch the PersistenceVolume created/used by the prometheus-operator chart to `Retain` claim policy: - - ```console - kubectl patch pv/ -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' - ``` - - **Note:** To execute the above command, the user must have a cluster wide permission. Please refer [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) - -2. Uninstall the **prometheus-operator** release and delete the existing PersistentVolumeClaim, and verify PV become Released. - - ```console - helm uninstall prometheus-operator -n monitoring - kubectl delete pvc/ -n monitoring - ``` - - Additionally, you have to manually remove the remaining `prometheus-operator-kubelet` service. - - ```console - kubectl delete service/prometheus-operator-kubelet -n kube-system - ``` - - You can choose to remove all your existing CRDs (ServiceMonitors, Podmonitors, etc.) if you want to. - -3. Remove current `spec.claimRef` values to change the PV's status from Released to Available. - - ```console - kubectl patch pv/ --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]' -n monitoring - ``` - -**Note:** To execute the above command, the user must have a cluster wide permission. Please refer to [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) - -After these steps, proceed to a fresh **kube-prometheus-stack** installation and make sure the current release of **kube-prometheus-stack** matching the `volumeClaimTemplate` values in the `values.yaml`. - -The binding is done via matching a specific amount of storage requested and with certain access modes. - -For example, if you had storage specified as this with **prometheus-operator**: - -```yaml -volumeClaimTemplate: - spec: - storageClassName: gp2 - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 50Gi -``` - -You have to specify matching `volumeClaimTemplate` with 50Gi storage and `ReadWriteOnce` access mode. - -Additionally, you should check the current AZ of your legacy installation's PV, and configure the fresh release to use the same AZ as the old one. If the pods are in a different AZ than the PV, the release will fail to bind the existing one, hence creating a new PV. - -This can be achieved either by specifying the labels through `values.yaml`, e.g. setting `prometheus.prometheusSpec.nodeSelector` to: - -```yaml -nodeSelector: - failure-domain.beta.kubernetes.io/zone: east-west-1a -``` - -or passing these values as `--set` overrides during installation. - -The new release should now re-attach your previously released PV with its content. - -## Migrating from coreos/prometheus-operator chart - -The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster. - -There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support. - -The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy. - -You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765). - -### High-level overview of Changes - -#### Added dependencies - -The chart has added 3 [dependencies](#dependencies). - -- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components -- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md) - -#### Kubelet Service - -Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice. - -#### Persistent Volumes - -If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created: - -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pvc-prometheus-migration-prometheus-0 -spec: - accessModes: - - ReadWriteOnce - azureDisk: - cachingMode: None - diskName: pvc-prometheus-migration-prometheus-0 - diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0 - fsType: "" - kind: Managed - readOnly: false - capacity: - storage: 1Gi - persistentVolumeReclaimPolicy: Delete - storageClassName: prometheus - volumeMode: Filesystem -``` - -```yaml -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - app.kubernetes.io/name: prometheus - prometheus: prometheus-migration-prometheus - name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0 - namespace: monitoring -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - storageClassName: prometheus - volumeMode: Filesystem - volumeName: pvc-prometheus-migration-prometheus-0 -``` - -The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used. - -#### KubeProxy - -The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them. - -Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example: - -```console -kubectl -n kube-system edit cm kube-proxy -``` - -```yaml -apiVersion: v1 -data: - config.conf: |- - apiVersion: kubeproxy.config.k8s.io/v1alpha1 - kind: KubeProxyConfiguration - # ... - # metricsBindAddress: 127.0.0.1:10249 - metricsBindAddress: 0.0.0.0:10249 - # ... - kubeconfig.conf: |- - # ... -kind: ConfigMap -metadata: - labels: - app: kube-proxy - name: kube-proxy - namespace: kube-system -``` +![Version: 48.4.1](https://img.shields.io/badge/Version-48.4.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.66.0](https://img.shields.io/badge/AppVersion-v0.66.0-informational?style=flat-square) + +kube-prometheus-stack collects Kubernetes manifests, Grafana dashboards, and Prometheus rules combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with Prometheus using the Prometheus Operator. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Source Code + +* +* + +## Requirements + +Kubernetes: `>=1.16.0-0` + +| Repository | Name | Version | +|------------|------|---------| +| | crds | 0.0.0 | +| https://grafana.github.io/helm-charts | grafana | 6.58.* | +| https://prometheus-community.github.io/helm-charts | kube-state-metrics | 5.10.* | +| https://prometheus-community.github.io/helm-charts | prometheus-node-exporter | 4.21.* | +| https://prometheus-community.github.io/helm-charts | prometheus-windows-exporter | 0.1.* | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| additionalPrometheusRulesMap | object | `{}` | | +| alertmanager.alertmanagerSpec.additionalPeers | list | `[]` | | +| alertmanager.alertmanagerSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node_pool"` | | +| alertmanager.alertmanagerSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | +| alertmanager.alertmanagerSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"monitoring"` | | +| alertmanager.alertmanagerSpec.alertmanagerConfigMatcherStrategy | object | `{}` | | +| alertmanager.alertmanagerSpec.alertmanagerConfigNamespaceSelector | object | `{}` | | +| alertmanager.alertmanagerSpec.alertmanagerConfigSelector | object | `{}` | | +| alertmanager.alertmanagerSpec.alertmanagerConfiguration | object | `{}` | | +| alertmanager.alertmanagerSpec.clusterAdvertiseAddress | bool | `false` | | +| alertmanager.alertmanagerSpec.clusterGossipInterval | string | `""` | | +| alertmanager.alertmanagerSpec.clusterPeerTimeout | string | `""` | | +| alertmanager.alertmanagerSpec.clusterPushpullInterval | string | `""` | | +| alertmanager.alertmanagerSpec.configMaps | list | `[]` | | +| alertmanager.alertmanagerSpec.containers | list | `[]` | | +| alertmanager.alertmanagerSpec.externalUrl | string | `nil` | | +| alertmanager.alertmanagerSpec.forceEnableClusterMode | bool | `false` | | +| alertmanager.alertmanagerSpec.image.registry | string | `"quay.io"` | | +| alertmanager.alertmanagerSpec.image.repository | string | `"prometheus/alertmanager"` | | +| alertmanager.alertmanagerSpec.image.sha | string | `""` | | +| alertmanager.alertmanagerSpec.image.tag | string | `"v0.26.0"` | | +| alertmanager.alertmanagerSpec.initContainers | list | `[]` | | +| alertmanager.alertmanagerSpec.listenLocal | bool | `false` | | +| alertmanager.alertmanagerSpec.logFormat | string | `"logfmt"` | | +| alertmanager.alertmanagerSpec.logLevel | string | `"info"` | | +| alertmanager.alertmanagerSpec.minReadySeconds | int | `0` | | +| alertmanager.alertmanagerSpec.nodeSelector | object | `{}` | | +| alertmanager.alertmanagerSpec.paused | bool | `false` | | +| alertmanager.alertmanagerSpec.podAntiAffinity | string | `""` | | +| alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey | string | `"kubernetes.io/hostname"` | | +| alertmanager.alertmanagerSpec.podMetadata | object | `{}` | | +| alertmanager.alertmanagerSpec.portName | string | `"http-web"` | | +| alertmanager.alertmanagerSpec.priorityClassName | string | `""` | | +| alertmanager.alertmanagerSpec.replicas | int | `1` | | +| alertmanager.alertmanagerSpec.resources | object | `{}` | | +| alertmanager.alertmanagerSpec.retention | string | `"120h"` | | +| alertmanager.alertmanagerSpec.routePrefix | string | `"/"` | | +| alertmanager.alertmanagerSpec.scheme | string | `""` | | +| alertmanager.alertmanagerSpec.secrets | list | `[]` | | +| alertmanager.alertmanagerSpec.securityContext.fsGroup | int | `2000` | | +| alertmanager.alertmanagerSpec.securityContext.runAsGroup | int | `2000` | | +| alertmanager.alertmanagerSpec.securityContext.runAsNonRoot | bool | `true` | | +| alertmanager.alertmanagerSpec.securityContext.runAsUser | int | `1000` | | +| alertmanager.alertmanagerSpec.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| alertmanager.alertmanagerSpec.storage | object | `{}` | | +| alertmanager.alertmanagerSpec.tlsConfig | object | `{}` | | +| alertmanager.alertmanagerSpec.tolerations[0].effect | string | `"PreferNoSchedule"` | | +| alertmanager.alertmanagerSpec.tolerations[0].key | string | `"monitoring"` | | +| alertmanager.alertmanagerSpec.tolerations[0].operator | string | `"Equal"` | | +| alertmanager.alertmanagerSpec.tolerations[0].value | string | `"true"` | | +| alertmanager.alertmanagerSpec.topologySpreadConstraints | list | `[]` | | +| alertmanager.alertmanagerSpec.useExistingSecret | bool | `false` | | +| alertmanager.alertmanagerSpec.volumeMounts | list | `[]` | | +| alertmanager.alertmanagerSpec.volumes | list | `[]` | | +| alertmanager.alertmanagerSpec.web | object | `{}` | | +| alertmanager.annotations | object | `{}` | | +| alertmanager.apiVersion | string | `"v2"` | | +| alertmanager.config.global.resolve_timeout | string | `"5m"` | | +| alertmanager.config.inhibit_rules[0].equal[0] | string | `"namespace"` | | +| alertmanager.config.inhibit_rules[0].equal[1] | string | `"alertname"` | | +| alertmanager.config.inhibit_rules[0].source_matchers[0] | string | `"severity = critical"` | | +| alertmanager.config.inhibit_rules[0].target_matchers[0] | string | `"severity =~ warning|info"` | | +| alertmanager.config.inhibit_rules[1].equal[0] | string | `"namespace"` | | +| alertmanager.config.inhibit_rules[1].equal[1] | string | `"alertname"` | | +| alertmanager.config.inhibit_rules[1].source_matchers[0] | string | `"severity = warning"` | | +| alertmanager.config.inhibit_rules[1].target_matchers[0] | string | `"severity = info"` | | +| alertmanager.config.inhibit_rules[2].equal[0] | string | `"namespace"` | | +| alertmanager.config.inhibit_rules[2].source_matchers[0] | string | `"alertname = InfoInhibitor"` | | +| alertmanager.config.inhibit_rules[2].target_matchers[0] | string | `"severity = info"` | | +| alertmanager.config.receivers[0].name | string | `"null"` | | +| alertmanager.config.route.group_by[0] | string | `"namespace"` | | +| alertmanager.config.route.group_interval | string | `"5m"` | | +| alertmanager.config.route.group_wait | string | `"30s"` | | +| alertmanager.config.route.receiver | string | `"null"` | | +| alertmanager.config.route.repeat_interval | string | `"12h"` | | +| alertmanager.config.route.routes[0].matchers[0] | string | `"alertname =~ \"InfoInhibitor|Watchdog\""` | | +| alertmanager.config.route.routes[0].receiver | string | `"null"` | | +| alertmanager.config.templates[0] | string | `"/etc/alertmanager/config/*.tmpl"` | | +| alertmanager.enabled | bool | `false` | | +| alertmanager.extraSecret.annotations | object | `{}` | | +| alertmanager.extraSecret.data | object | `{}` | | +| alertmanager.ingress.annotations | object | `{}` | | +| alertmanager.ingress.enabled | bool | `false` | | +| alertmanager.ingress.hosts | list | `[]` | | +| alertmanager.ingress.labels | object | `{}` | | +| alertmanager.ingress.paths | list | `[]` | | +| alertmanager.ingress.tls | list | `[]` | | +| alertmanager.ingressPerReplica.annotations | object | `{}` | | +| alertmanager.ingressPerReplica.enabled | bool | `false` | | +| alertmanager.ingressPerReplica.hostDomain | string | `""` | | +| alertmanager.ingressPerReplica.hostPrefix | string | `""` | | +| alertmanager.ingressPerReplica.labels | object | `{}` | | +| alertmanager.ingressPerReplica.paths | list | `[]` | | +| alertmanager.ingressPerReplica.tlsSecretName | string | `""` | | +| alertmanager.ingressPerReplica.tlsSecretPerReplica.enabled | bool | `false` | | +| alertmanager.ingressPerReplica.tlsSecretPerReplica.prefix | string | `"alertmanager"` | | +| alertmanager.podDisruptionBudget.enabled | bool | `false` | | +| alertmanager.podDisruptionBudget.maxUnavailable | string | `""` | | +| alertmanager.podDisruptionBudget.minAvailable | int | `1` | | +| alertmanager.secret.annotations | object | `{}` | | +| alertmanager.service.additionalPorts | list | `[]` | | +| alertmanager.service.annotations | object | `{}` | | +| alertmanager.service.clusterIP | string | `""` | | +| alertmanager.service.externalIPs | list | `[]` | | +| alertmanager.service.externalTrafficPolicy | string | `"Cluster"` | | +| alertmanager.service.labels | object | `{}` | | +| alertmanager.service.loadBalancerIP | string | `""` | | +| alertmanager.service.loadBalancerSourceRanges | list | `[]` | | +| alertmanager.service.nodePort | int | `30903` | | +| alertmanager.service.port | int | `9093` | | +| alertmanager.service.sessionAffinity | string | `""` | | +| alertmanager.service.targetPort | int | `9093` | | +| alertmanager.service.type | string | `"ClusterIP"` | | +| alertmanager.serviceAccount.annotations | object | `{}` | | +| alertmanager.serviceAccount.automountServiceAccountToken | bool | `true` | | +| alertmanager.serviceAccount.create | bool | `true` | | +| alertmanager.serviceAccount.name | string | `""` | | +| alertmanager.serviceMonitor.additionalLabels | object | `{}` | | +| alertmanager.serviceMonitor.bearerTokenFile | string | `nil` | | +| alertmanager.serviceMonitor.enableHttp2 | bool | `true` | | +| alertmanager.serviceMonitor.interval | string | `""` | | +| alertmanager.serviceMonitor.labelLimit | int | `0` | | +| alertmanager.serviceMonitor.labelNameLengthLimit | int | `0` | | +| alertmanager.serviceMonitor.labelValueLengthLimit | int | `0` | | +| alertmanager.serviceMonitor.metricRelabelings | list | `[]` | | +| alertmanager.serviceMonitor.proxyUrl | string | `""` | | +| alertmanager.serviceMonitor.relabelings | list | `[]` | | +| alertmanager.serviceMonitor.sampleLimit | int | `0` | | +| alertmanager.serviceMonitor.scheme | string | `""` | | +| alertmanager.serviceMonitor.selfMonitor | bool | `true` | | +| alertmanager.serviceMonitor.targetLimit | int | `0` | | +| alertmanager.serviceMonitor.tlsConfig | object | `{}` | | +| alertmanager.servicePerReplica.annotations | object | `{}` | | +| alertmanager.servicePerReplica.enabled | bool | `false` | | +| alertmanager.servicePerReplica.externalTrafficPolicy | string | `"Cluster"` | | +| alertmanager.servicePerReplica.loadBalancerSourceRanges | list | `[]` | | +| alertmanager.servicePerReplica.nodePort | int | `30904` | | +| alertmanager.servicePerReplica.port | int | `9093` | | +| alertmanager.servicePerReplica.targetPort | int | `9093` | | +| alertmanager.servicePerReplica.type | string | `"ClusterIP"` | | +| alertmanager.stringConfig | string | `""` | | +| alertmanager.templateFiles | object | `{}` | | +| alertmanager.tplConfig | bool | `false` | | +| cleanPrometheusOperatorObjectNames | bool | `false` | | +| commonLabels | object | `{}` | | +| coreDns.enabled | bool | `false` | | +| coreDns.service.port | int | `9153` | | +| coreDns.service.targetPort | int | `9153` | | +| coreDns.serviceMonitor.additionalLabels | object | `{}` | | +| coreDns.serviceMonitor.interval | string | `""` | | +| coreDns.serviceMonitor.labelLimit | int | `0` | | +| coreDns.serviceMonitor.labelNameLengthLimit | int | `0` | | +| coreDns.serviceMonitor.labelValueLengthLimit | int | `0` | | +| coreDns.serviceMonitor.metricRelabelings | list | `[]` | | +| coreDns.serviceMonitor.proxyUrl | string | `""` | | +| coreDns.serviceMonitor.relabelings | list | `[]` | | +| coreDns.serviceMonitor.sampleLimit | int | `0` | | +| coreDns.serviceMonitor.targetLimit | int | `0` | | +| crds.enabled | bool | `true` | | +| defaultRules.additionalRuleAnnotations | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.alertmanager | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.configReloaders | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.etcd | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.general | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.k8s | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeApiserverAvailability | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeApiserverBurnrate | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeApiserverHistogram | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeApiserverSlos | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeControllerManager | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubePrometheusGeneral | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubePrometheusNodeRecording | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeProxy | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeSchedulerAlerting | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeSchedulerRecording | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubeStateMetrics | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubelet | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubernetesApps | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubernetesResources | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubernetesStorage | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.kubernetesSystem | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.network | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.node | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.nodeExporterAlerting | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.nodeExporterRecording | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.prometheus | object | `{}` | | +| defaultRules.additionalRuleGroupAnnotations.prometheusOperator | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.alertmanager | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.configReloaders | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.etcd | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.general | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.k8s | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeApiserverAvailability | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeApiserverBurnrate | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeApiserverHistogram | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeApiserverSlos | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeControllerManager | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubePrometheusGeneral | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubePrometheusNodeRecording | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeProxy | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeSchedulerAlerting | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeSchedulerRecording | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubeStateMetrics | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubelet | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubernetesApps | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubernetesResources | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubernetesStorage | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.kubernetesSystem | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.network | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.node | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.nodeExporterAlerting | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.nodeExporterRecording | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.prometheus | object | `{}` | | +| defaultRules.additionalRuleGroupLabels.prometheusOperator | object | `{}` | | +| defaultRules.additionalRuleLabels | object | `{}` | | +| defaultRules.annotations | object | `{}` | | +| defaultRules.appNamespacesTarget | string | `".*"` | | +| defaultRules.create | bool | `true` | | +| defaultRules.disabled | object | `{}` | | +| defaultRules.labels | object | `{}` | | +| defaultRules.rules.alertmanager | bool | `false` | | +| defaultRules.rules.configReloaders | bool | `true` | | +| defaultRules.rules.etcd | bool | `true` | | +| defaultRules.rules.general | bool | `true` | | +| defaultRules.rules.k8s | bool | `true` | | +| defaultRules.rules.kubeApiserverAvailability | bool | `true` | | +| defaultRules.rules.kubeApiserverBurnrate | bool | `true` | | +| defaultRules.rules.kubeApiserverHistogram | bool | `true` | | +| defaultRules.rules.kubeApiserverSlos | bool | `true` | | +| defaultRules.rules.kubeControllerManager | bool | `true` | | +| defaultRules.rules.kubePrometheusGeneral | bool | `true` | | +| defaultRules.rules.kubePrometheusNodeRecording | bool | `true` | | +| defaultRules.rules.kubeProxy | bool | `true` | | +| defaultRules.rules.kubeSchedulerAlerting | bool | `true` | | +| defaultRules.rules.kubeSchedulerRecording | bool | `true` | | +| defaultRules.rules.kubeStateMetrics | bool | `true` | | +| defaultRules.rules.kubelet | bool | `true` | | +| defaultRules.rules.kubernetesApps | bool | `true` | | +| defaultRules.rules.kubernetesResources | bool | `true` | | +| defaultRules.rules.kubernetesStorage | bool | `true` | | +| defaultRules.rules.kubernetesSystem | bool | `true` | | +| defaultRules.rules.network | bool | `true` | | +| defaultRules.rules.node | bool | `true` | | +| defaultRules.rules.nodeExporterAlerting | bool | `true` | | +| defaultRules.rules.nodeExporterRecording | bool | `true` | | +| defaultRules.rules.prometheus | bool | `false` | | +| defaultRules.rules.prometheusOperator | bool | `false` | | +| defaultRules.rules.windows | bool | `true` | | +| defaultRules.runbookUrl | string | `"https://runbooks.prometheus-operator.dev/runbooks"` | | +| extraManifests | list | `[]` | | +| fullnameOverride | string | `""` | | +| global.imagePullSecrets | list | `[]` | | +| global.imageRegistry | string | `""` | | +| global.rbac.create | bool | `true` | | +| global.rbac.createAggregateClusterRoles | bool | `false` | | +| global.rbac.pspAnnotations | object | `{}` | | +| global.rbac.pspEnabled | bool | `false` | | +| grafana.additionalDataSources | list | `[]` | | +| grafana.adminPassword | string | `"prom-operator"` | | +| grafana.defaultDashboardsEnabled | bool | `true` | | +| grafana.defaultDashboardsTimezone | string | `"utc"` | | +| grafana.deleteDatasources | list | `[]` | | +| grafana.enabled | bool | `false` | | +| grafana.extraConfigmapMounts | list | `[]` | | +| grafana.forceDeployDashboards | bool | `false` | | +| grafana.forceDeployDatasources | bool | `false` | | +| grafana.ingress.annotations | object | `{}` | | +| grafana.ingress.enabled | bool | `false` | | +| grafana.ingress.hosts | list | `[]` | | +| grafana.ingress.labels | object | `{}` | | +| grafana.ingress.path | string | `"/"` | | +| grafana.ingress.tls | list | `[]` | | +| grafana.namespaceOverride | string | `""` | | +| grafana.rbac.pspEnabled | bool | `false` | | +| grafana.service.portName | string | `"http-web"` | | +| grafana.serviceMonitor.enabled | bool | `true` | | +| grafana.serviceMonitor.interval | string | `""` | | +| grafana.serviceMonitor.labels | object | `{}` | | +| grafana.serviceMonitor.path | string | `"/metrics"` | | +| grafana.serviceMonitor.relabelings | list | `[]` | | +| grafana.serviceMonitor.scheme | string | `"http"` | | +| grafana.serviceMonitor.scrapeTimeout | string | `"30s"` | | +| grafana.serviceMonitor.tlsConfig | object | `{}` | | +| grafana.sidecar.dashboards.annotations | object | `{}` | | +| grafana.sidecar.dashboards.enabled | bool | `true` | | +| grafana.sidecar.dashboards.label | string | `"grafana_dashboard"` | | +| grafana.sidecar.dashboards.labelValue | string | `"1"` | | +| grafana.sidecar.dashboards.multicluster.etcd.enabled | bool | `false` | | +| grafana.sidecar.dashboards.multicluster.global.enabled | bool | `false` | | +| grafana.sidecar.dashboards.provider.allowUiUpdates | bool | `false` | | +| grafana.sidecar.dashboards.searchNamespace | string | `"ALL"` | | +| grafana.sidecar.datasources.alertmanager.enabled | bool | `true` | | +| grafana.sidecar.datasources.alertmanager.handleGrafanaManagedAlerts | bool | `false` | | +| grafana.sidecar.datasources.alertmanager.implementation | string | `"prometheus"` | | +| grafana.sidecar.datasources.alertmanager.uid | string | `"alertmanager"` | | +| grafana.sidecar.datasources.annotations | object | `{}` | | +| grafana.sidecar.datasources.createPrometheusReplicasDatasources | bool | `false` | | +| grafana.sidecar.datasources.defaultDatasourceEnabled | bool | `true` | | +| grafana.sidecar.datasources.enabled | bool | `true` | | +| grafana.sidecar.datasources.exemplarTraceIdDestinations | object | `{}` | | +| grafana.sidecar.datasources.httpMethod | string | `"POST"` | | +| grafana.sidecar.datasources.isDefaultDatasource | bool | `true` | | +| grafana.sidecar.datasources.label | string | `"grafana_datasource"` | | +| grafana.sidecar.datasources.labelValue | string | `"1"` | | +| grafana.sidecar.datasources.uid | string | `"prometheus"` | | +| kube-state-metrics.namespaceOverride | string | `""` | | +| kube-state-metrics.prometheus.monitor.enabled | bool | `true` | | +| kube-state-metrics.prometheus.monitor.honorLabels | bool | `true` | | +| kube-state-metrics.prometheus.monitor.interval | string | `""` | | +| kube-state-metrics.prometheus.monitor.labelLimit | int | `0` | | +| kube-state-metrics.prometheus.monitor.labelNameLengthLimit | int | `0` | | +| kube-state-metrics.prometheus.monitor.labelValueLengthLimit | int | `0` | | +| kube-state-metrics.prometheus.monitor.metricRelabelings | list | `[]` | | +| kube-state-metrics.prometheus.monitor.proxyUrl | string | `""` | | +| kube-state-metrics.prometheus.monitor.relabelings | list | `[]` | | +| kube-state-metrics.prometheus.monitor.sampleLimit | int | `0` | | +| kube-state-metrics.prometheus.monitor.scrapeTimeout | string | `""` | | +| kube-state-metrics.prometheus.monitor.targetLimit | int | `0` | | +| kube-state-metrics.rbac.create | bool | `true` | | +| kube-state-metrics.releaseLabel | bool | `true` | | +| kube-state-metrics.selfMonitor.enabled | bool | `false` | | +| kubeApiServer.enabled | bool | `false` | | +| kubeApiServer.serviceMonitor.additionalLabels | object | `{}` | | +| kubeApiServer.serviceMonitor.interval | string | `""` | | +| kubeApiServer.serviceMonitor.jobLabel | string | `"component"` | | +| kubeApiServer.serviceMonitor.labelLimit | int | `0` | | +| kubeApiServer.serviceMonitor.labelNameLengthLimit | int | `0` | | +| kubeApiServer.serviceMonitor.labelValueLengthLimit | int | `0` | | +| kubeApiServer.serviceMonitor.metricRelabelings[0].action | string | `"drop"` | | +| kubeApiServer.serviceMonitor.metricRelabelings[0].regex | string | `"apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50)"` | | +| kubeApiServer.serviceMonitor.metricRelabelings[0].sourceLabels[0] | string | `"__name__"` | | +| kubeApiServer.serviceMonitor.metricRelabelings[0].sourceLabels[1] | string | `"le"` | | +| kubeApiServer.serviceMonitor.proxyUrl | string | `""` | | +| kubeApiServer.serviceMonitor.relabelings | list | `[]` | | +| kubeApiServer.serviceMonitor.sampleLimit | int | `0` | | +| kubeApiServer.serviceMonitor.selector.matchLabels.component | string | `"apiserver"` | | +| kubeApiServer.serviceMonitor.selector.matchLabels.provider | string | `"kubernetes"` | | +| kubeApiServer.serviceMonitor.targetLimit | int | `0` | | +| kubeApiServer.tlsConfig.insecureSkipVerify | bool | `false` | | +| kubeApiServer.tlsConfig.serverName | string | `"kubernetes"` | | +| kubeControllerManager.enabled | bool | `true` | | +| kubeControllerManager.endpoints | list | `[]` | | +| kubeControllerManager.service.enabled | bool | `true` | | +| kubeControllerManager.service.port | string | `nil` | | +| kubeControllerManager.service.targetPort | string | `nil` | | +| kubeControllerManager.serviceMonitor.additionalLabels | object | `{}` | | +| kubeControllerManager.serviceMonitor.enabled | bool | `true` | | +| kubeControllerManager.serviceMonitor.https | string | `nil` | | +| kubeControllerManager.serviceMonitor.insecureSkipVerify | string | `nil` | | +| kubeControllerManager.serviceMonitor.interval | string | `""` | | +| kubeControllerManager.serviceMonitor.labelLimit | int | `0` | | +| kubeControllerManager.serviceMonitor.labelNameLengthLimit | int | `0` | | +| kubeControllerManager.serviceMonitor.labelValueLengthLimit | int | `0` | | +| kubeControllerManager.serviceMonitor.metricRelabelings | list | `[]` | | +| kubeControllerManager.serviceMonitor.proxyUrl | string | `""` | | +| kubeControllerManager.serviceMonitor.relabelings | list | `[]` | | +| kubeControllerManager.serviceMonitor.sampleLimit | int | `0` | | +| kubeControllerManager.serviceMonitor.serverName | string | `nil` | | +| kubeControllerManager.serviceMonitor.targetLimit | int | `0` | | +| kubeDns.enabled | bool | `false` | | +| kubeDns.service.dnsmasq.port | int | `10054` | | +| kubeDns.service.dnsmasq.targetPort | int | `10054` | | +| kubeDns.service.skydns.port | int | `10055` | | +| kubeDns.service.skydns.targetPort | int | `10055` | | +| kubeDns.serviceMonitor.additionalLabels | object | `{}` | | +| kubeDns.serviceMonitor.dnsmasqMetricRelabelings | list | `[]` | | +| kubeDns.serviceMonitor.dnsmasqRelabelings | list | `[]` | | +| kubeDns.serviceMonitor.interval | string | `""` | | +| kubeDns.serviceMonitor.labelLimit | int | `0` | | +| kubeDns.serviceMonitor.labelNameLengthLimit | int | `0` | | +| kubeDns.serviceMonitor.labelValueLengthLimit | int | `0` | | +| kubeDns.serviceMonitor.metricRelabelings | list | `[]` | | +| kubeDns.serviceMonitor.proxyUrl | string | `""` | | +| kubeDns.serviceMonitor.relabelings | list | `[]` | | +| kubeDns.serviceMonitor.sampleLimit | int | `0` | | +| kubeDns.serviceMonitor.targetLimit | int | `0` | | +| kubeEtcd.enabled | bool | `true` | | +| kubeEtcd.endpoints | list | `[]` | | +| kubeEtcd.service.enabled | bool | `true` | | +| kubeEtcd.service.port | int | `2381` | | +| kubeEtcd.service.targetPort | int | `2381` | | +| kubeEtcd.serviceMonitor.additionalLabels | object | `{}` | | +| kubeEtcd.serviceMonitor.caFile | string | `""` | | +| kubeEtcd.serviceMonitor.certFile | string | `""` | | +| kubeEtcd.serviceMonitor.enabled | bool | `true` | | +| kubeEtcd.serviceMonitor.insecureSkipVerify | bool | `false` | | +| kubeEtcd.serviceMonitor.interval | string | `""` | | +| kubeEtcd.serviceMonitor.keyFile | string | `""` | | +| kubeEtcd.serviceMonitor.labelLimit | int | `0` | | +| kubeEtcd.serviceMonitor.labelNameLengthLimit | int | `0` | | +| kubeEtcd.serviceMonitor.labelValueLengthLimit | int | `0` | | +| kubeEtcd.serviceMonitor.metricRelabelings | list | `[]` | | +| kubeEtcd.serviceMonitor.proxyUrl | string | `""` | | +| kubeEtcd.serviceMonitor.relabelings | list | `[]` | | +| kubeEtcd.serviceMonitor.sampleLimit | int | `0` | | +| kubeEtcd.serviceMonitor.scheme | string | `"http"` | | +| kubeEtcd.serviceMonitor.serverName | string | `""` | | +| kubeEtcd.serviceMonitor.targetLimit | int | `0` | | +| kubeProxy.enabled | bool | `true` | | +| kubeProxy.endpoints | list | `[]` | | +| kubeProxy.service.enabled | bool | `true` | | +| kubeProxy.service.port | int | `10249` | | +| kubeProxy.service.targetPort | int | `10249` | | +| kubeProxy.serviceMonitor.additionalLabels | object | `{}` | | +| kubeProxy.serviceMonitor.enabled | bool | `true` | | +| kubeProxy.serviceMonitor.https | bool | `false` | | +| kubeProxy.serviceMonitor.interval | string | `""` | | +| kubeProxy.serviceMonitor.labelLimit | int | `0` | | +| kubeProxy.serviceMonitor.labelNameLengthLimit | int | `0` | | +| kubeProxy.serviceMonitor.labelValueLengthLimit | int | `0` | | +| kubeProxy.serviceMonitor.metricRelabelings | list | `[]` | | +| kubeProxy.serviceMonitor.proxyUrl | string | `""` | | +| kubeProxy.serviceMonitor.relabelings | list | `[]` | | +| kubeProxy.serviceMonitor.sampleLimit | int | `0` | | +| kubeProxy.serviceMonitor.targetLimit | int | `0` | | +| kubeScheduler.enabled | bool | `true` | | +| kubeScheduler.endpoints | list | `[]` | | +| kubeScheduler.service.enabled | bool | `true` | | +| kubeScheduler.service.port | string | `nil` | | +| kubeScheduler.service.targetPort | string | `nil` | | +| kubeScheduler.serviceMonitor.additionalLabels | object | `{}` | | +| kubeScheduler.serviceMonitor.enabled | bool | `true` | | +| kubeScheduler.serviceMonitor.https | string | `nil` | | +| kubeScheduler.serviceMonitor.insecureSkipVerify | string | `nil` | | +| kubeScheduler.serviceMonitor.interval | string | `""` | | +| kubeScheduler.serviceMonitor.labelLimit | int | `0` | | +| kubeScheduler.serviceMonitor.labelNameLengthLimit | int | `0` | | +| kubeScheduler.serviceMonitor.labelValueLengthLimit | int | `0` | | +| kubeScheduler.serviceMonitor.metricRelabelings | list | `[]` | | +| kubeScheduler.serviceMonitor.proxyUrl | string | `""` | | +| kubeScheduler.serviceMonitor.relabelings | list | `[]` | | +| kubeScheduler.serviceMonitor.sampleLimit | int | `0` | | +| kubeScheduler.serviceMonitor.serverName | string | `nil` | | +| kubeScheduler.serviceMonitor.targetLimit | int | `0` | | +| kubeStateMetrics.enabled | bool | `true` | | +| kubeTargetVersionOverride | string | `""` | | +| kubeVersionOverride | string | `""` | | +| kubelet.enabled | bool | `true` | | +| kubelet.namespace | string | `"kube-system"` | | +| kubelet.serviceMonitor.additionalLabels | object | `{}` | | +| kubelet.serviceMonitor.cAdvisor | bool | `true` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[0].action | string | `"drop"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[0].regex | string | `"container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[0].sourceLabels[0] | string | `"__name__"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[1].action | string | `"drop"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[1].regex | string | `"container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[1].sourceLabels[0] | string | `"__name__"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[2].action | string | `"drop"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[2].regex | string | `"container_memory_(mapped_file|swap)"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[2].sourceLabels[0] | string | `"__name__"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[3].action | string | `"drop"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[3].regex | string | `"container_(file_descriptors|tasks_state|threads_max)"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[3].sourceLabels[0] | string | `"__name__"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[4].action | string | `"drop"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[4].regex | string | `"container_spec.*"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[4].sourceLabels[0] | string | `"__name__"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[5].action | string | `"drop"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[5].regex | string | `".+;"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[5].sourceLabels[0] | string | `"id"` | | +| kubelet.serviceMonitor.cAdvisorMetricRelabelings[5].sourceLabels[1] | string | `"pod"` | | +| kubelet.serviceMonitor.cAdvisorRelabelings[0].action | string | `"replace"` | | +| kubelet.serviceMonitor.cAdvisorRelabelings[0].sourceLabels[0] | string | `"__metrics_path__"` | | +| kubelet.serviceMonitor.cAdvisorRelabelings[0].targetLabel | string | `"metrics_path"` | | +| kubelet.serviceMonitor.https | bool | `true` | | +| kubelet.serviceMonitor.interval | string | `""` | | +| kubelet.serviceMonitor.labelLimit | int | `0` | | +| kubelet.serviceMonitor.labelNameLengthLimit | int | `0` | | +| kubelet.serviceMonitor.labelValueLengthLimit | int | `0` | | +| kubelet.serviceMonitor.metricRelabelings | list | `[]` | | +| kubelet.serviceMonitor.probes | bool | `true` | | +| kubelet.serviceMonitor.probesMetricRelabelings | list | `[]` | | +| kubelet.serviceMonitor.probesRelabelings[0].action | string | `"replace"` | | +| kubelet.serviceMonitor.probesRelabelings[0].sourceLabels[0] | string | `"__metrics_path__"` | | +| kubelet.serviceMonitor.probesRelabelings[0].targetLabel | string | `"metrics_path"` | | +| kubelet.serviceMonitor.proxyUrl | string | `""` | | +| kubelet.serviceMonitor.relabelings[0].action | string | `"replace"` | | +| kubelet.serviceMonitor.relabelings[0].sourceLabels[0] | string | `"__metrics_path__"` | | +| kubelet.serviceMonitor.relabelings[0].targetLabel | string | `"metrics_path"` | | +| kubelet.serviceMonitor.resource | bool | `false` | | +| kubelet.serviceMonitor.resourcePath | string | `"/metrics/resource/v1alpha1"` | | +| kubelet.serviceMonitor.resourceRelabelings[0].action | string | `"replace"` | | +| kubelet.serviceMonitor.resourceRelabelings[0].sourceLabels[0] | string | `"__metrics_path__"` | | +| kubelet.serviceMonitor.resourceRelabelings[0].targetLabel | string | `"metrics_path"` | | +| kubelet.serviceMonitor.sampleLimit | int | `0` | | +| kubelet.serviceMonitor.targetLimit | int | `0` | | +| kubernetesServiceMonitors.enabled | bool | `true` | | +| nameOverride | string | `""` | | +| namespaceOverride | string | `""` | | +| nodeExporter.enabled | bool | `true` | | +| prometheus-node-exporter.extraArgs[0] | string | `"--collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)"` | | +| prometheus-node-exporter.extraArgs[1] | string | `"--collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"` | | +| prometheus-node-exporter.namespaceOverride | string | `""` | | +| prometheus-node-exporter.podLabels.jobLabel | string | `"node-exporter"` | | +| prometheus-node-exporter.prometheus.monitor.enabled | bool | `true` | | +| prometheus-node-exporter.prometheus.monitor.interval | string | `""` | | +| prometheus-node-exporter.prometheus.monitor.jobLabel | string | `"jobLabel"` | | +| prometheus-node-exporter.prometheus.monitor.labelLimit | int | `0` | | +| prometheus-node-exporter.prometheus.monitor.labelNameLengthLimit | int | `0` | | +| prometheus-node-exporter.prometheus.monitor.labelValueLengthLimit | int | `0` | | +| prometheus-node-exporter.prometheus.monitor.metricRelabelings | list | `[]` | | +| prometheus-node-exporter.prometheus.monitor.proxyUrl | string | `""` | | +| prometheus-node-exporter.prometheus.monitor.relabelings | list | `[]` | | +| prometheus-node-exporter.prometheus.monitor.sampleLimit | int | `0` | | +| prometheus-node-exporter.prometheus.monitor.scrapeTimeout | string | `""` | | +| prometheus-node-exporter.prometheus.monitor.targetLimit | int | `0` | | +| prometheus-node-exporter.rbac.pspEnabled | bool | `false` | | +| prometheus-node-exporter.releaseLabel | bool | `true` | | +| prometheus-node-exporter.service.portName | string | `"http-metrics"` | | +| prometheus.additionalPodMonitors | list | `[]` | | +| prometheus.additionalRulesForClusterRole | list | `[]` | | +| prometheus.additionalServiceMonitors | list | `[]` | | +| prometheus.agentMode | bool | `false` | | +| prometheus.annotations | object | `{}` | | +| prometheus.enabled | bool | `true` | | +| prometheus.extraSecret.annotations | object | `{}` | | +| prometheus.extraSecret.data | object | `{}` | | +| prometheus.ingress.annotations | object | `{}` | | +| prometheus.ingress.enabled | bool | `false` | | +| prometheus.ingress.hosts | list | `[]` | | +| prometheus.ingress.labels | object | `{}` | | +| prometheus.ingress.paths | list | `[]` | | +| prometheus.ingress.tls | list | `[]` | | +| prometheus.ingressPerReplica.annotations | object | `{}` | | +| prometheus.ingressPerReplica.enabled | bool | `false` | | +| prometheus.ingressPerReplica.hostDomain | string | `""` | | +| prometheus.ingressPerReplica.hostPrefix | string | `""` | | +| prometheus.ingressPerReplica.labels | object | `{}` | | +| prometheus.ingressPerReplica.paths | list | `[]` | | +| prometheus.ingressPerReplica.tlsSecretName | string | `""` | | +| prometheus.ingressPerReplica.tlsSecretPerReplica.enabled | bool | `false` | | +| prometheus.ingressPerReplica.tlsSecretPerReplica.prefix | string | `"prometheus"` | | +| prometheus.networkPolicy.enabled | bool | `false` | | +| prometheus.networkPolicy.flavor | string | `"kubernetes"` | | +| prometheus.podDisruptionBudget.enabled | bool | `false` | | +| prometheus.podDisruptionBudget.maxUnavailable | string | `""` | | +| prometheus.podDisruptionBudget.minAvailable | int | `1` | | +| prometheus.podSecurityPolicy.allowedCapabilities | list | `[]` | | +| prometheus.podSecurityPolicy.allowedHostPaths | list | `[]` | | +| prometheus.podSecurityPolicy.volumes | list | `[]` | | +| prometheus.prometheusSpec.additionalAlertManagerConfigs | list | `[]` | | +| prometheus.prometheusSpec.additionalAlertManagerConfigsSecret | object | `{}` | | +| prometheus.prometheusSpec.additionalAlertRelabelConfigs | list | `[]` | | +| prometheus.prometheusSpec.additionalAlertRelabelConfigsSecret | object | `{}` | | +| prometheus.prometheusSpec.additionalArgs | list | `[]` | | +| prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | object | `{}` | | +| prometheus.prometheusSpec.additionalRemoteRead | list | `[]` | | +| prometheus.prometheusSpec.additionalRemoteWrite | list | `[]` | | +| prometheus.prometheusSpec.additionalScrapeConfigsSecret | object | `{}` | | +| prometheus.prometheusSpec.additionalScrapeConfigs[0].job_name | string | `"prometheus"` | | +| prometheus.prometheusSpec.additionalScrapeConfigs[0].scrape_interval | string | `"5s"` | | +| prometheus.prometheusSpec.additionalScrapeConfigs[0].static_configs[0].targets[0] | string | `"localhost:9090"` | | +| prometheus.prometheusSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node_pool"` | | +| prometheus.prometheusSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | +| prometheus.prometheusSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"monitoring"` | | +| prometheus.prometheusSpec.alertingEndpoints | list | `[]` | | +| prometheus.prometheusSpec.allowOverlappingBlocks | bool | `false` | | +| prometheus.prometheusSpec.apiserverConfig | object | `{}` | | +| prometheus.prometheusSpec.arbitraryFSAccessThroughSMs | bool | `false` | | +| prometheus.prometheusSpec.configMaps | list | `[]` | | +| prometheus.prometheusSpec.containers | list | `[]` | | +| prometheus.prometheusSpec.disableCompaction | bool | `false` | | +| prometheus.prometheusSpec.enableAdminAPI | bool | `false` | | +| prometheus.prometheusSpec.enableFeatures | list | `[]` | | +| prometheus.prometheusSpec.enableRemoteWriteReceiver | bool | `false` | | +| prometheus.prometheusSpec.enforcedLabelLimit | bool | `false` | | +| prometheus.prometheusSpec.enforcedLabelNameLengthLimit | bool | `false` | | +| prometheus.prometheusSpec.enforcedLabelValueLengthLimit | bool | `false` | | +| prometheus.prometheusSpec.enforcedNamespaceLabel | string | `""` | | +| prometheus.prometheusSpec.enforcedSampleLimit | bool | `false` | | +| prometheus.prometheusSpec.enforcedTargetLimit | bool | `false` | | +| prometheus.prometheusSpec.evaluationInterval | string | `""` | | +| prometheus.prometheusSpec.excludedFromEnforcement | list | `[]` | | +| prometheus.prometheusSpec.exemplars | string | `""` | | +| prometheus.prometheusSpec.externalLabels | object | `{}` | | +| prometheus.prometheusSpec.externalUrl | string | `""` | | +| prometheus.prometheusSpec.hostAliases | list | `[]` | | +| prometheus.prometheusSpec.hostNetwork | bool | `false` | | +| prometheus.prometheusSpec.ignoreNamespaceSelectors | bool | `false` | | +| prometheus.prometheusSpec.image.registry | string | `"quay.io"` | | +| prometheus.prometheusSpec.image.repository | string | `"prometheus/prometheus"` | | +| prometheus.prometheusSpec.image.sha | string | `""` | | +| prometheus.prometheusSpec.image.tag | string | `"v2.45.0"` | | +| prometheus.prometheusSpec.initContainers | list | `[]` | | +| prometheus.prometheusSpec.listenLocal | bool | `false` | | +| prometheus.prometheusSpec.logFormat | string | `"logfmt"` | | +| prometheus.prometheusSpec.logLevel | string | `"info"` | | +| prometheus.prometheusSpec.minReadySeconds | int | `0` | | +| prometheus.prometheusSpec.nodeSelector | object | `{}` | | +| prometheus.prometheusSpec.overrideHonorLabels | bool | `false` | | +| prometheus.prometheusSpec.overrideHonorTimestamps | bool | `false` | | +| prometheus.prometheusSpec.paused | bool | `false` | | +| prometheus.prometheusSpec.podAntiAffinity | string | `""` | | +| prometheus.prometheusSpec.podAntiAffinityTopologyKey | string | `"kubernetes.io/hostname"` | | +| prometheus.prometheusSpec.podMetadata | object | `{}` | | +| prometheus.prometheusSpec.podMonitorNamespaceSelector | object | `{}` | | +| prometheus.prometheusSpec.podMonitorSelector | object | `{}` | | +| prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues | bool | `true` | | +| prometheus.prometheusSpec.portName | string | `"http-web"` | | +| prometheus.prometheusSpec.priorityClassName | string | `""` | | +| prometheus.prometheusSpec.probeNamespaceSelector | object | `{}` | | +| prometheus.prometheusSpec.probeSelector | object | `{}` | | +| prometheus.prometheusSpec.probeSelectorNilUsesHelmValues | bool | `true` | | +| prometheus.prometheusSpec.prometheusExternalLabelName | string | `""` | | +| prometheus.prometheusSpec.prometheusExternalLabelNameClear | bool | `false` | | +| prometheus.prometheusSpec.prometheusRulesExcludedFromEnforce | list | `[]` | | +| prometheus.prometheusSpec.query | object | `{}` | | +| prometheus.prometheusSpec.queryLogFile | bool | `false` | | +| prometheus.prometheusSpec.remoteRead | list | `[]` | | +| prometheus.prometheusSpec.remoteWrite | list | `[]` | | +| prometheus.prometheusSpec.remoteWriteDashboards | bool | `false` | | +| prometheus.prometheusSpec.replicaExternalLabelName | string | `""` | | +| prometheus.prometheusSpec.replicaExternalLabelNameClear | bool | `false` | | +| prometheus.prometheusSpec.replicas | int | `1` | | +| prometheus.prometheusSpec.resources | object | `{}` | | +| prometheus.prometheusSpec.retention | string | `"10d"` | | +| prometheus.prometheusSpec.retentionSize | string | `""` | | +| prometheus.prometheusSpec.routePrefix | string | `"/"` | | +| prometheus.prometheusSpec.ruleNamespaceSelector | object | `{}` | | +| prometheus.prometheusSpec.ruleSelector | object | `{}` | | +| prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues | bool | `true` | | +| prometheus.prometheusSpec.scrapeConfigNamespaceSelector | object | `{}` | | +| prometheus.prometheusSpec.scrapeConfigSelector | object | `{}` | | +| prometheus.prometheusSpec.scrapeConfigSelectorNilUsesHelmValues | bool | `true` | | +| prometheus.prometheusSpec.scrapeInterval | string | `""` | | +| prometheus.prometheusSpec.scrapeTimeout | string | `""` | | +| prometheus.prometheusSpec.secrets | list | `[]` | | +| prometheus.prometheusSpec.securityContext.fsGroup | int | `2000` | | +| prometheus.prometheusSpec.securityContext.runAsGroup | int | `2000` | | +| prometheus.prometheusSpec.securityContext.runAsNonRoot | bool | `true` | | +| prometheus.prometheusSpec.securityContext.runAsUser | int | `1000` | | +| prometheus.prometheusSpec.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| prometheus.prometheusSpec.serviceMonitorNamespaceSelector | object | `{}` | | +| prometheus.prometheusSpec.serviceMonitorSelector | object | `{}` | | +| prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues | bool | `true` | | +| prometheus.prometheusSpec.shards | int | `1` | | +| prometheus.prometheusSpec.storageSpec | object | `{}` | | +| prometheus.prometheusSpec.thanos | object | `{}` | | +| prometheus.prometheusSpec.tolerations[0].effect | string | `"PreferNoSchedule"` | | +| prometheus.prometheusSpec.tolerations[0].key | string | `"monitoring"` | | +| prometheus.prometheusSpec.tolerations[0].operator | string | `"Equal"` | | +| prometheus.prometheusSpec.tolerations[0].value | string | `"true"` | | +| prometheus.prometheusSpec.topologySpreadConstraints | list | `[]` | | +| prometheus.prometheusSpec.tracingConfig | object | `{}` | | +| prometheus.prometheusSpec.tsdb.outOfOrderTimeWindow | string | `"0s"` | | +| prometheus.prometheusSpec.version | string | `""` | | +| prometheus.prometheusSpec.volumeMounts | list | `[]` | | +| prometheus.prometheusSpec.volumes | list | `[]` | | +| prometheus.prometheusSpec.walCompression | bool | `true` | | +| prometheus.prometheusSpec.web | object | `{}` | | +| prometheus.service.additionalPorts | list | `[]` | | +| prometheus.service.annotations | object | `{}` | | +| prometheus.service.clusterIP | string | `""` | | +| prometheus.service.externalIPs | list | `[]` | | +| prometheus.service.externalTrafficPolicy | string | `"Cluster"` | | +| prometheus.service.labels | object | `{}` | | +| prometheus.service.loadBalancerIP | string | `""` | | +| prometheus.service.loadBalancerSourceRanges | list | `[]` | | +| prometheus.service.nodePort | int | `30090` | | +| prometheus.service.port | int | `9090` | | +| prometheus.service.publishNotReadyAddresses | bool | `false` | | +| prometheus.service.sessionAffinity | string | `""` | | +| prometheus.service.targetPort | int | `9090` | | +| prometheus.service.type | string | `"ClusterIP"` | | +| prometheus.serviceAccount.annotations | object | `{}` | | +| prometheus.serviceAccount.create | bool | `true` | | +| prometheus.serviceAccount.name | string | `""` | | +| prometheus.serviceMonitor.additionalLabels | object | `{}` | | +| prometheus.serviceMonitor.bearerTokenFile | string | `nil` | | +| prometheus.serviceMonitor.interval | string | `""` | | +| prometheus.serviceMonitor.labelLimit | int | `0` | | +| prometheus.serviceMonitor.labelNameLengthLimit | int | `0` | | +| prometheus.serviceMonitor.labelValueLengthLimit | int | `0` | | +| prometheus.serviceMonitor.metricRelabelings | list | `[]` | | +| prometheus.serviceMonitor.relabelings | list | `[]` | | +| prometheus.serviceMonitor.sampleLimit | int | `0` | | +| prometheus.serviceMonitor.scheme | string | `""` | | +| prometheus.serviceMonitor.selfMonitor | bool | `true` | | +| prometheus.serviceMonitor.targetLimit | int | `0` | | +| prometheus.serviceMonitor.tlsConfig | object | `{}` | | +| prometheus.servicePerReplica.annotations | object | `{}` | | +| prometheus.servicePerReplica.enabled | bool | `false` | | +| prometheus.servicePerReplica.externalTrafficPolicy | string | `"Cluster"` | | +| prometheus.servicePerReplica.loadBalancerSourceRanges | list | `[]` | | +| prometheus.servicePerReplica.nodePort | int | `30091` | | +| prometheus.servicePerReplica.port | int | `9090` | | +| prometheus.servicePerReplica.targetPort | int | `9090` | | +| prometheus.servicePerReplica.type | string | `"ClusterIP"` | | +| prometheus.service_lb.additionalPorts | list | `[]` | | +| prometheus.service_lb.annotations."cloud.google.com/load-balancer-type" | string | `"internal"` | | +| prometheus.service_lb.annotations."external-dns.alpha.kubernetes.io/hostname" | string | `"prometheus.juno.dev"` | | +| prometheus.service_lb.annotations."networking.gke.io/internal-load-balancer-allow-global-access" | string | `"true"` | | +| prometheus.service_lb.clusterIP | string | `""` | | +| prometheus.service_lb.enabled | bool | `false` | | +| prometheus.service_lb.externalIPs | list | `[]` | | +| prometheus.service_lb.externalTrafficPolicy | string | `"Cluster"` | | +| prometheus.service_lb.labels | object | `{}` | | +| prometheus.service_lb.loadBalancerIP | string | `""` | | +| prometheus.service_lb.loadBalancerSourceRanges | list | `[]` | | +| prometheus.service_lb.nodePort | int | `30090` | | +| prometheus.service_lb.port | int | `80` | | +| prometheus.service_lb.publishNotReadyAddresses | bool | `false` | | +| prometheus.service_lb.sessionAffinity | string | `""` | | +| prometheus.service_lb.targetPort | int | `9090` | | +| prometheus.service_lb.type | string | `"LoadBalancer"` | | +| prometheus.thanosIngress.annotations | object | `{}` | | +| prometheus.thanosIngress.enabled | bool | `false` | | +| prometheus.thanosIngress.hosts | list | `[]` | | +| prometheus.thanosIngress.labels | object | `{}` | | +| prometheus.thanosIngress.nodePort | int | `30901` | | +| prometheus.thanosIngress.paths | list | `[]` | | +| prometheus.thanosIngress.servicePort | int | `10901` | | +| prometheus.thanosIngress.tls | list | `[]` | | +| prometheus.thanosService.annotations | object | `{}` | | +| prometheus.thanosService.clusterIP | string | `"None"` | | +| prometheus.thanosService.enabled | bool | `false` | | +| prometheus.thanosService.externalTrafficPolicy | string | `"Cluster"` | | +| prometheus.thanosService.httpNodePort | int | `30902` | | +| prometheus.thanosService.httpPort | int | `10902` | | +| prometheus.thanosService.httpPortName | string | `"http"` | | +| prometheus.thanosService.labels | object | `{}` | | +| prometheus.thanosService.nodePort | int | `30901` | | +| prometheus.thanosService.port | int | `10901` | | +| prometheus.thanosService.portName | string | `"grpc"` | | +| prometheus.thanosService.targetHttpPort | string | `"http"` | | +| prometheus.thanosService.targetPort | string | `"grpc"` | | +| prometheus.thanosService.type | string | `"ClusterIP"` | | +| prometheus.thanosServiceExternal.annotations | object | `{}` | | +| prometheus.thanosServiceExternal.enabled | bool | `false` | | +| prometheus.thanosServiceExternal.externalTrafficPolicy | string | `"Cluster"` | | +| prometheus.thanosServiceExternal.httpNodePort | int | `30902` | | +| prometheus.thanosServiceExternal.httpPort | int | `10902` | | +| prometheus.thanosServiceExternal.httpPortName | string | `"http"` | | +| prometheus.thanosServiceExternal.labels | object | `{}` | | +| prometheus.thanosServiceExternal.loadBalancerIP | string | `""` | | +| prometheus.thanosServiceExternal.loadBalancerSourceRanges | list | `[]` | | +| prometheus.thanosServiceExternal.nodePort | int | `30901` | | +| prometheus.thanosServiceExternal.port | int | `10901` | | +| prometheus.thanosServiceExternal.portName | string | `"grpc"` | | +| prometheus.thanosServiceExternal.targetHttpPort | string | `"http"` | | +| prometheus.thanosServiceExternal.targetPort | string | `"grpc"` | | +| prometheus.thanosServiceExternal.type | string | `"LoadBalancer"` | | +| prometheus.thanosServiceMonitor.additionalLabels | object | `{}` | | +| prometheus.thanosServiceMonitor.bearerTokenFile | string | `nil` | | +| prometheus.thanosServiceMonitor.enabled | bool | `false` | | +| prometheus.thanosServiceMonitor.interval | string | `""` | | +| prometheus.thanosServiceMonitor.metricRelabelings | list | `[]` | | +| prometheus.thanosServiceMonitor.relabelings | list | `[]` | | +| prometheus.thanosServiceMonitor.scheme | string | `""` | | +| prometheus.thanosServiceMonitor.tlsConfig | object | `{}` | | +| prometheusOperator.admissionWebhooks.annotations | object | `{}` | | +| prometheusOperator.admissionWebhooks.caBundle | string | `""` | | +| prometheusOperator.admissionWebhooks.certManager.admissionCert.duration | string | `""` | | +| prometheusOperator.admissionWebhooks.certManager.enabled | bool | `false` | | +| prometheusOperator.admissionWebhooks.certManager.rootCert.duration | string | `""` | | +| prometheusOperator.admissionWebhooks.createSecretJob.securityContext.allowPrivilegeEscalation | bool | `false` | | +| prometheusOperator.admissionWebhooks.createSecretJob.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| prometheusOperator.admissionWebhooks.createSecretJob.securityContext.readOnlyRootFilesystem | bool | `true` | | +| prometheusOperator.admissionWebhooks.enabled | bool | `true` | | +| prometheusOperator.admissionWebhooks.failurePolicy | string | `""` | | +| prometheusOperator.admissionWebhooks.patch.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node_pool"` | | +| prometheusOperator.admissionWebhooks.patch.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | +| prometheusOperator.admissionWebhooks.patch.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"monitoring"` | | +| prometheusOperator.admissionWebhooks.patch.annotations | object | `{}` | | +| prometheusOperator.admissionWebhooks.patch.enabled | bool | `true` | | +| prometheusOperator.admissionWebhooks.patch.image.pullPolicy | string | `"IfNotPresent"` | | +| prometheusOperator.admissionWebhooks.patch.image.registry | string | `"registry.k8s.io"` | | +| prometheusOperator.admissionWebhooks.patch.image.repository | string | `"ingress-nginx/kube-webhook-certgen"` | | +| prometheusOperator.admissionWebhooks.patch.image.sha | string | `""` | | +| prometheusOperator.admissionWebhooks.patch.image.tag | string | `"v20221220-controller-v1.5.1-58-g787ea74b6"` | | +| prometheusOperator.admissionWebhooks.patch.nodeSelector | object | `{}` | | +| prometheusOperator.admissionWebhooks.patch.podAnnotations | object | `{}` | | +| prometheusOperator.admissionWebhooks.patch.priorityClassName | string | `""` | | +| prometheusOperator.admissionWebhooks.patch.resources | object | `{}` | | +| prometheusOperator.admissionWebhooks.patch.securityContext.runAsGroup | int | `2000` | | +| prometheusOperator.admissionWebhooks.patch.securityContext.runAsNonRoot | bool | `true` | | +| prometheusOperator.admissionWebhooks.patch.securityContext.runAsUser | int | `2000` | | +| prometheusOperator.admissionWebhooks.patch.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| prometheusOperator.admissionWebhooks.patch.tolerations[0].effect | string | `"PreferNoSchedule"` | | +| prometheusOperator.admissionWebhooks.patch.tolerations[0].key | string | `"monitoring"` | | +| prometheusOperator.admissionWebhooks.patch.tolerations[0].operator | string | `"Equal"` | | +| prometheusOperator.admissionWebhooks.patch.tolerations[0].value | string | `"true"` | | +| prometheusOperator.admissionWebhooks.patchWebhookJob.securityContext.allowPrivilegeEscalation | bool | `false` | | +| prometheusOperator.admissionWebhooks.patchWebhookJob.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| prometheusOperator.admissionWebhooks.patchWebhookJob.securityContext.readOnlyRootFilesystem | bool | `true` | | +| prometheusOperator.admissionWebhooks.timeoutSeconds | int | `10` | | +| prometheusOperator.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node_pool"` | | +| prometheusOperator.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | +| prometheusOperator.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"monitoring"` | | +| prometheusOperator.alertmanagerConfigNamespaces | list | `[]` | | +| prometheusOperator.alertmanagerInstanceNamespaces | list | `[]` | | +| prometheusOperator.alertmanagerInstanceSelector | string | `""` | | +| prometheusOperator.annotations | object | `{}` | | +| prometheusOperator.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| prometheusOperator.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| prometheusOperator.containerSecurityContext.readOnlyRootFilesystem | bool | `true` | | +| prometheusOperator.denyNamespaces | list | `[]` | | +| prometheusOperator.dnsConfig | object | `{}` | | +| prometheusOperator.enabled | bool | `true` | | +| prometheusOperator.hostNetwork | bool | `false` | | +| prometheusOperator.image.pullPolicy | string | `"IfNotPresent"` | | +| prometheusOperator.image.registry | string | `"quay.io"` | | +| prometheusOperator.image.repository | string | `"prometheus-operator/prometheus-operator"` | | +| prometheusOperator.image.sha | string | `""` | | +| prometheusOperator.image.tag | string | `""` | | +| prometheusOperator.kubeletService.enabled | bool | `true` | | +| prometheusOperator.kubeletService.name | string | `""` | | +| prometheusOperator.kubeletService.namespace | string | `"kube-system"` | | +| prometheusOperator.labels | object | `{}` | | +| prometheusOperator.namespaces | object | `{}` | | +| prometheusOperator.networkPolicy.enabled | bool | `false` | | +| prometheusOperator.networkPolicy.flavor | string | `"kubernetes"` | | +| prometheusOperator.nodeSelector | object | `{}` | | +| prometheusOperator.podAnnotations | object | `{}` | | +| prometheusOperator.podLabels | object | `{}` | | +| prometheusOperator.prometheusConfigReloader.enableProbe | bool | `false` | | +| prometheusOperator.prometheusConfigReloader.image.registry | string | `"quay.io"` | | +| prometheusOperator.prometheusConfigReloader.image.repository | string | `"prometheus-operator/prometheus-config-reloader"` | | +| prometheusOperator.prometheusConfigReloader.image.sha | string | `""` | | +| prometheusOperator.prometheusConfigReloader.image.tag | string | `""` | | +| prometheusOperator.prometheusConfigReloader.resources.limits.cpu | string | `"200m"` | | +| prometheusOperator.prometheusConfigReloader.resources.limits.memory | string | `"50Mi"` | | +| prometheusOperator.prometheusConfigReloader.resources.requests.cpu | string | `"200m"` | | +| prometheusOperator.prometheusConfigReloader.resources.requests.memory | string | `"50Mi"` | | +| prometheusOperator.prometheusInstanceNamespaces | list | `[]` | | +| prometheusOperator.prometheusInstanceSelector | string | `""` | | +| prometheusOperator.resources | object | `{}` | | +| prometheusOperator.revisionHistoryLimit | int | `10` | | +| prometheusOperator.secretFieldSelector | string | `"type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1"` | | +| prometheusOperator.securityContext.fsGroup | int | `65534` | | +| prometheusOperator.securityContext.runAsGroup | int | `65534` | | +| prometheusOperator.securityContext.runAsNonRoot | bool | `true` | | +| prometheusOperator.securityContext.runAsUser | int | `65534` | | +| prometheusOperator.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| prometheusOperator.service.additionalPorts | list | `[]` | | +| prometheusOperator.service.annotations | object | `{}` | | +| prometheusOperator.service.clusterIP | string | `""` | | +| prometheusOperator.service.externalIPs | list | `[]` | | +| prometheusOperator.service.externalTrafficPolicy | string | `"Cluster"` | | +| prometheusOperator.service.labels | object | `{}` | | +| prometheusOperator.service.loadBalancerIP | string | `""` | | +| prometheusOperator.service.loadBalancerSourceRanges | list | `[]` | | +| prometheusOperator.service.nodePort | int | `30080` | | +| prometheusOperator.service.nodePortTls | int | `30443` | | +| prometheusOperator.service.type | string | `"ClusterIP"` | | +| prometheusOperator.serviceAccount.create | bool | `true` | | +| prometheusOperator.serviceAccount.name | string | `""` | | +| prometheusOperator.serviceMonitor.additionalLabels | object | `{}` | | +| prometheusOperator.serviceMonitor.interval | string | `""` | | +| prometheusOperator.serviceMonitor.labelLimit | int | `0` | | +| prometheusOperator.serviceMonitor.labelNameLengthLimit | int | `0` | | +| prometheusOperator.serviceMonitor.labelValueLengthLimit | int | `0` | | +| prometheusOperator.serviceMonitor.metricRelabelings | list | `[]` | | +| prometheusOperator.serviceMonitor.relabelings | list | `[]` | | +| prometheusOperator.serviceMonitor.sampleLimit | int | `0` | | +| prometheusOperator.serviceMonitor.scrapeTimeout | string | `""` | | +| prometheusOperator.serviceMonitor.selfMonitor | bool | `true` | | +| prometheusOperator.serviceMonitor.targetLimit | int | `0` | | +| prometheusOperator.thanosImage.registry | string | `"quay.io"` | | +| prometheusOperator.thanosImage.repository | string | `"thanos/thanos"` | | +| prometheusOperator.thanosImage.sha | string | `""` | | +| prometheusOperator.thanosImage.tag | string | `"v0.31.0"` | | +| prometheusOperator.thanosRulerInstanceNamespaces | list | `[]` | | +| prometheusOperator.thanosRulerInstanceSelector | string | `""` | | +| prometheusOperator.tls.enabled | bool | `true` | | +| prometheusOperator.tls.internalPort | int | `10250` | | +| prometheusOperator.tls.tlsMinVersion | string | `"VersionTLS13"` | | +| prometheusOperator.tolerations[0].effect | string | `"PreferNoSchedule"` | | +| prometheusOperator.tolerations[0].key | string | `"monitoring"` | | +| prometheusOperator.tolerations[0].operator | string | `"Equal"` | | +| prometheusOperator.tolerations[0].value | string | `"true"` | | +| prometheusOperator.verticalPodAutoscaler.controlledResources | list | `[]` | | +| prometheusOperator.verticalPodAutoscaler.enabled | bool | `false` | | +| prometheusOperator.verticalPodAutoscaler.maxAllowed | object | `{}` | | +| prometheusOperator.verticalPodAutoscaler.minAllowed | object | `{}` | | +| prometheusOperator.verticalPodAutoscaler.updatePolicy.updateMode | string | `"Auto"` | | +| thanosRuler.annotations | object | `{}` | | +| thanosRuler.enabled | bool | `false` | | +| thanosRuler.extraSecret.annotations | object | `{}` | | +| thanosRuler.extraSecret.data | object | `{}` | | +| thanosRuler.ingress.annotations | object | `{}` | | +| thanosRuler.ingress.enabled | bool | `false` | | +| thanosRuler.ingress.hosts | list | `[]` | | +| thanosRuler.ingress.labels | object | `{}` | | +| thanosRuler.ingress.paths | list | `[]` | | +| thanosRuler.ingress.tls | list | `[]` | | +| thanosRuler.podDisruptionBudget.enabled | bool | `false` | | +| thanosRuler.podDisruptionBudget.maxUnavailable | string | `""` | | +| thanosRuler.podDisruptionBudget.minAvailable | int | `1` | | +| thanosRuler.service.additionalPorts | list | `[]` | | +| thanosRuler.service.annotations | object | `{}` | | +| thanosRuler.service.clusterIP | string | `""` | | +| thanosRuler.service.externalIPs | list | `[]` | | +| thanosRuler.service.externalTrafficPolicy | string | `"Cluster"` | | +| thanosRuler.service.labels | object | `{}` | | +| thanosRuler.service.loadBalancerIP | string | `""` | | +| thanosRuler.service.loadBalancerSourceRanges | list | `[]` | | +| thanosRuler.service.nodePort | int | `30905` | | +| thanosRuler.service.port | int | `10902` | | +| thanosRuler.service.targetPort | int | `10902` | | +| thanosRuler.service.type | string | `"ClusterIP"` | | +| thanosRuler.serviceAccount.annotations | object | `{}` | | +| thanosRuler.serviceAccount.create | bool | `true` | | +| thanosRuler.serviceAccount.name | string | `""` | | +| thanosRuler.serviceMonitor.additionalLabels | object | `{}` | | +| thanosRuler.serviceMonitor.bearerTokenFile | string | `nil` | | +| thanosRuler.serviceMonitor.interval | string | `""` | | +| thanosRuler.serviceMonitor.labelLimit | int | `0` | | +| thanosRuler.serviceMonitor.labelNameLengthLimit | int | `0` | | +| thanosRuler.serviceMonitor.labelValueLengthLimit | int | `0` | | +| thanosRuler.serviceMonitor.metricRelabelings | list | `[]` | | +| thanosRuler.serviceMonitor.proxyUrl | string | `""` | | +| thanosRuler.serviceMonitor.relabelings | list | `[]` | | +| thanosRuler.serviceMonitor.sampleLimit | int | `0` | | +| thanosRuler.serviceMonitor.scheme | string | `""` | | +| thanosRuler.serviceMonitor.selfMonitor | bool | `true` | | +| thanosRuler.serviceMonitor.targetLimit | int | `0` | | +| thanosRuler.serviceMonitor.tlsConfig | object | `{}` | | +| thanosRuler.thanosRulerSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node_pool"` | | +| thanosRuler.thanosRulerSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | +| thanosRuler.thanosRulerSpec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"monitoring"` | | +| thanosRuler.thanosRulerSpec.alertmanagersConfig | object | `{}` | | +| thanosRuler.thanosRulerSpec.containers | list | `[]` | | +| thanosRuler.thanosRulerSpec.evaluationInterval | string | `""` | | +| thanosRuler.thanosRulerSpec.externalPrefix | string | `nil` | | +| thanosRuler.thanosRulerSpec.image.registry | string | `"quay.io"` | | +| thanosRuler.thanosRulerSpec.image.repository | string | `"thanos/thanos"` | | +| thanosRuler.thanosRulerSpec.image.sha | string | `""` | | +| thanosRuler.thanosRulerSpec.image.tag | string | `"v0.31.0"` | | +| thanosRuler.thanosRulerSpec.initContainers | list | `[]` | | +| thanosRuler.thanosRulerSpec.labels | object | `{}` | | +| thanosRuler.thanosRulerSpec.listenLocal | bool | `false` | | +| thanosRuler.thanosRulerSpec.logFormat | string | `"logfmt"` | | +| thanosRuler.thanosRulerSpec.logLevel | string | `"info"` | | +| thanosRuler.thanosRulerSpec.nodeSelector | object | `{}` | | +| thanosRuler.thanosRulerSpec.objectStorageConfig | object | `{}` | | +| thanosRuler.thanosRulerSpec.objectStorageConfigFile | string | `""` | | +| thanosRuler.thanosRulerSpec.paused | bool | `false` | | +| thanosRuler.thanosRulerSpec.podAntiAffinity | string | `""` | | +| thanosRuler.thanosRulerSpec.podAntiAffinityTopologyKey | string | `"kubernetes.io/hostname"` | | +| thanosRuler.thanosRulerSpec.podMetadata | object | `{}` | | +| thanosRuler.thanosRulerSpec.portName | string | `"web"` | | +| thanosRuler.thanosRulerSpec.priorityClassName | string | `""` | | +| thanosRuler.thanosRulerSpec.queryConfig | object | `{}` | | +| thanosRuler.thanosRulerSpec.queryEndpoints | list | `[]` | | +| thanosRuler.thanosRulerSpec.replicas | int | `1` | | +| thanosRuler.thanosRulerSpec.resources | object | `{}` | | +| thanosRuler.thanosRulerSpec.retention | string | `"24h"` | | +| thanosRuler.thanosRulerSpec.routePrefix | string | `"/"` | | +| thanosRuler.thanosRulerSpec.ruleNamespaceSelector | object | `{}` | | +| thanosRuler.thanosRulerSpec.ruleSelector | object | `{}` | | +| thanosRuler.thanosRulerSpec.ruleSelectorNilUsesHelmValues | bool | `true` | | +| thanosRuler.thanosRulerSpec.securityContext.fsGroup | int | `2000` | | +| thanosRuler.thanosRulerSpec.securityContext.runAsGroup | int | `2000` | | +| thanosRuler.thanosRulerSpec.securityContext.runAsNonRoot | bool | `true` | | +| thanosRuler.thanosRulerSpec.securityContext.runAsUser | int | `1000` | | +| thanosRuler.thanosRulerSpec.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| thanosRuler.thanosRulerSpec.storage | object | `{}` | | +| thanosRuler.thanosRulerSpec.tolerations[0].effect | string | `"PreferNoSchedule"` | | +| thanosRuler.thanosRulerSpec.tolerations[0].key | string | `"monitoring"` | | +| thanosRuler.thanosRulerSpec.tolerations[0].operator | string | `"Equal"` | | +| thanosRuler.thanosRulerSpec.tolerations[0].value | string | `"true"` | | +| thanosRuler.thanosRulerSpec.topologySpreadConstraints | list | `[]` | | +| thanosRuler.thanosRulerSpec.volumeMounts | list | `[]` | | +| thanosRuler.thanosRulerSpec.volumes | list | `[]` | | +| windowsMonitoring.enabled | bool | `false` | | +| windowsMonitoring.job | string | `"prometheus-windows-exporter"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/lodestar/README.md b/charts/lodestar/README.md new file mode 100644 index 000000000..b3c9060ae --- /dev/null +++ b/charts/lodestar/README.md @@ -0,0 +1,62 @@ +# lodestar + +![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) + +A Helm chart to deploy the Lodestar Consensus Client using Kubernetes + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | +| stdevMac | | | +| refl3ction | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| env[0].name | string | `"BEACON_NODE_ADDRESS"` | | +| env[0].value | string | `""` | | +| env[1].name | string | `"NETWORK"` | | +| env[1].value | string | `"holesky"` | | +| env[2].name | string | `"BUILDER_API_ENABLED"` | | +| env[2].value | string | `"true"` | | +| env[3].name | string | `"BUILDER_SELECTION"` | | +| env[3].value | string | `"builderonly"` | | +| fullnameOverride | string | `""` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"chainsafe/lodestar"` | | +| image.tag | string | `"v1.11.3"` | | +| imagePullSecrets | list | `[]` | | +| ingress.annotations | object | `{}` | | +| ingress.className | string | `""` | | +| ingress.enabled | bool | `false` | | +| ingress.hosts[0].host | string | `"chart-example.local"` | | +| ingress.hosts[0].paths[0].path | string | `"/"` | | +| ingress.hosts[0].paths[0].pathType | string | `"ImplementationSpecific"` | | +| ingress.tls | list | `[]` | | +| livnessProbe | string | `nil` | | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | | +| readinessProbe | string | `nil` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | | +| secrets | object | `{"validatorKeys":"validator-keys"}` | Kubernetes secrets names | +| secrets.validatorKeys | string | `"validator-keys"` | validators keys | +| securityContext | object | `{}` | | +| service.ports.http | int | `9596` | | +| service.ports.metrics | int | `5064` | | +| service.ports.p2p | int | `9000` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `""` | | +| tolerations | list | `[]` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/loki/README.md b/charts/loki/README.md index fa6338684..697eaed80 100644 --- a/charts/loki/README.md +++ b/charts/loki/README.md @@ -1,82 +1,140 @@ -# Loki Helm Chart - -## Prerequisites - -Make sure you have Helm [installed](https://helm.sh/docs/using_helm/#installing-helm). - -## Get Repo Info - -```console -helm repo add grafana https://grafana.github.io/helm-charts -helm repo update -``` - -_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ - - -## Deploy Loki only - -```bash -helm upgrade --install loki grafana/loki -``` - -## Run Loki behind https ingress - -If Loki and Promtail are deployed on different clusters you can add an Ingress in front of Loki. -By adding a certificate you create an https endpoint. For extra security enable basic authentication on the Ingress. - -In Promtail set the following values to communicate with https and basic auth - -```yaml -loki: - serviceScheme: https - user: user - password: pass -``` - -Sample helm template for ingress: - -```yaml -apiVersion: networking.k8s.io/v1 -kind: Ingress -metadata: - name: loki - annotations: - kubernetes.io/ingress.class: {{ .Values.ingress.class }} - ingress.kubernetes.io/auth-type: basic - ingress.kubernetes.io/auth-secret: {{ .Values.ingress.basic.secret }} -spec: - rules: - - host: {{ .Values.ingress.host }} - http: - paths: - - backend: - service: - name: loki - port: - number: 3100 - path: / - pathType: Prefix - tls: - - hosts: - - {{ .Values.ingress.host }} - secretName: {{ .Values.ingress.cert }} -``` - -## Use Loki Alerting - -You can add your own alerting rules with `alerting_groups` in `values.yaml`. This will create a ConfigMap with your rules and additional volumes and mounts for Loki. - -This does **not** enable the Loki `ruler` component which does the evaluation of your rules. The `values.yaml` file does contain a simple example. For more details take a look at the official [alerting docs](https://grafana.com/docs/loki/latest/rules/). - -## Enable retention policy (log deletion) - -Set Helm value `config.compactor.retention_enabled` to enable retention using the default policy, which deletes logs after 31 days. - -```yaml -config: - compactor: - retention_enabled: true -``` - -See [the documentation](https://grafana.com/docs/loki/latest/operations/storage/retention/) for additional options. +# loki + +![Version: 2.11.2](https://img.shields.io/badge/Version-2.11.2-informational?style=flat-square) ![AppVersion: v2.5.0](https://img.shields.io/badge/AppVersion-v2.5.0-informational?style=flat-square) + +Loki: like Prometheus, but for logs. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Source Code + +* + +## Requirements + +Kubernetes: `^1.10.0-0` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].key | string | `"node_pool"` | | +| affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].operator | string | `"In"` | | +| affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values[0] | string | `"monitoring"` | | +| alerting_groups | list | `[]` | | +| annotations | object | `{}` | | +| client | object | `{}` | | +| config.auth_enabled | bool | `false` | | +| config.chunk_store_config.max_look_back_period | string | `"0s"` | | +| config.compactor.shared_store | string | `"gcs"` | | +| config.compactor.working_directory | string | `"/data/loki/boltdb-shipper-compactor"` | | +| config.ingester.chunk_block_size | int | `262144` | | +| config.ingester.chunk_idle_period | string | `"3m"` | | +| config.ingester.chunk_retain_period | string | `"1m"` | | +| config.ingester.lifecycler.ring.kvstore.store | string | `"inmemory"` | | +| config.ingester.lifecycler.ring.replication_factor | int | `1` | | +| config.ingester.max_transfer_retries | int | `0` | | +| config.ingester.wal.dir | string | `"/data/loki/wal"` | | +| config.limits_config.enforce_metric_name | bool | `false` | | +| config.limits_config.max_entries_limit_per_query | int | `5000` | | +| config.limits_config.reject_old_samples | bool | `true` | | +| config.limits_config.reject_old_samples_max_age | string | `"168h"` | | +| config.schema_config.configs[0].from | string | `"2020-10-24"` | | +| config.schema_config.configs[0].index.period | string | `"24h"` | | +| config.schema_config.configs[0].index.prefix | string | `"index_"` | | +| config.schema_config.configs[0].object_store | string | `"gcs"` | | +| config.schema_config.configs[0].schema | string | `"v11"` | | +| config.schema_config.configs[0].store | string | `"boltdb-shipper"` | | +| config.server.http_listen_port | int | `3100` | | +| config.storage_config.boltdb_shipper.active_index_directory | string | `"/data/loki/boltdb-shipper-active"` | | +| config.storage_config.boltdb_shipper.cache_location | string | `"/data/loki/boltdb-shipper-cache"` | | +| config.storage_config.boltdb_shipper.cache_ttl | string | `"24h"` | | +| config.storage_config.boltdb_shipper.shared_store | string | `"gcs"` | | +| config.storage_config.gcs.bucket_name | string | `"loki-logs-juno-dev"` | | +| config.table_manager.retention_deletes_enabled | bool | `false` | | +| config.table_manager.retention_period | string | `"0s"` | | +| env | list | `[]` | | +| extraArgs | object | `{}` | | +| extraContainers | list | `[]` | | +| extraPorts | list | `[]` | | +| extraVolumeMounts | list | `[]` | | +| extraVolumes | list | `[]` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"grafana/loki"` | | +| image.tag | string | `"2.5.0"` | | +| ingress.annotations | object | `{}` | | +| ingress.enabled | bool | `false` | | +| ingress.hosts[0].host | string | `"chart-example.local"` | | +| ingress.hosts[0].paths | list | `[]` | | +| ingress.tls | list | `[]` | | +| initContainers | list | `[]` | | +| livenessProbe.httpGet.path | string | `"/ready"` | | +| livenessProbe.httpGet.port | string | `"http-metrics"` | | +| livenessProbe.initialDelaySeconds | int | `45` | | +| networkPolicy.enabled | bool | `false` | | +| nodeSelector | object | `{}` | | +| persistence.accessModes[0] | string | `"ReadWriteOnce"` | | +| persistence.annotations | object | `{}` | | +| persistence.enabled | bool | `false` | | +| persistence.size | string | `"10Gi"` | | +| podAnnotations."prometheus.io/port" | string | `"http-metrics"` | | +| podAnnotations."prometheus.io/scrape" | string | `"true"` | | +| podDisruptionBudget | object | `{}` | | +| podLabels | object | `{}` | | +| podManagementPolicy | string | `"OrderedReady"` | | +| rbac.create | bool | `true` | | +| rbac.pspEnabled | bool | `true` | | +| readinessProbe.httpGet.path | string | `"/ready"` | | +| readinessProbe.httpGet.port | string | `"http-metrics"` | | +| readinessProbe.initialDelaySeconds | int | `45` | | +| replicas | int | `1` | | +| resources | object | `{}` | | +| securityContext.fsGroup | int | `10001` | | +| securityContext.runAsGroup | int | `10001` | | +| securityContext.runAsNonRoot | bool | `true` | | +| securityContext.runAsUser | int | `10001` | | +| service.annotations | object | `{}` | | +| service.labels | object | `{}` | | +| service.nodePort | string | `nil` | | +| service.port | int | `3100` | | +| service.targetPort | string | `"http-metrics"` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations."iam.gke.io/gcp-service-account" | string | `"monitoring-sa@juno-dev-nth.iam.gserviceaccount.com"` | | +| serviceAccount.automountServiceAccountToken | bool | `true` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.name | string | `nil` | | +| serviceMonitor.additionalLabels | object | `{}` | | +| serviceMonitor.annotations | object | `{}` | | +| serviceMonitor.enabled | bool | `false` | | +| serviceMonitor.interval | string | `""` | | +| serviceMonitor.prometheusRule.additionalLabels | object | `{}` | | +| serviceMonitor.prometheusRule.enabled | bool | `false` | | +| serviceMonitor.prometheusRule.rules | list | `[]` | | +| service_lb.annotations."cloud.google.com/load-balancer-type" | string | `"internal"` | | +| service_lb.annotations."external-dns.alpha.kubernetes.io/hostname" | string | `"loki.juno.dev"` | | +| service_lb.annotations."networking.gke.io/internal-load-balancer-allow-global-access" | string | `"true"` | | +| service_lb.enable | bool | `true` | | +| service_lb.labels | object | `{}` | | +| service_lb.nodePort | string | `nil` | | +| service_lb.port | int | `80` | | +| service_lb.targetPort | string | `"http-metrics"` | | +| service_lb.type | string | `"LoadBalancer"` | | +| terminationGracePeriodSeconds | int | `4800` | | +| tolerations[0].effect | string | `"PreferNoSchedule"` | | +| tolerations[0].key | string | `"monitoring"` | | +| tolerations[0].operator | string | `"Equal"` | | +| tolerations[0].value | bool | `true` | | +| tracing.jaegerAgentHost | string | `nil` | | +| updateStrategy.type | string | `"RollingUpdate"` | | +| useExistingAlertingGroup.configmapName | string | `""` | | +| useExistingAlertingGroup.enabled | bool | `false` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/mev-boost/README.md b/charts/mev-boost/README.md new file mode 100644 index 000000000..ec2ab8f04 --- /dev/null +++ b/charts/mev-boost/README.md @@ -0,0 +1,60 @@ +# mev-boost + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.6.0](https://img.shields.io/badge/AppVersion-1.6.0-informational?style=flat-square) + +mev-boost allows proof-of-stake Ethereum consensus clients to outsource block construction + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| file://../common | common | 1.0.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | | +| autoscaling.enabled | bool | `false` | | +| autoscaling.maxReplicas | int | `100` | | +| autoscaling.minReplicas | int | `1` | | +| autoscaling.targetCPUUtilizationPercentage | int | `80` | | +| extraFlags | list | `[]` | Extra flags for mev-boost | +| fullnameOverride | string | `""` | | +| global.imagePullSecrets | list | `[]` | Credentials to fetch images from private registry ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| global.network | string | `"mainnet"` | Options: mainnet, goerli, sepolia | +| global.podSecurityContext | object | `{"fsGroup":10000,"runAsNonRoot":true,"runAsUser":10000}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| global.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| global.securityContext.readOnlyRootFilesystem | bool | `true` | | +| global.securityContext.runAsNonRoot | bool | `true` | | +| global.securityContext.runAsUser | int | `10000` | | +| global.serviceAccount.create | bool | `true` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"flashbots/mev-boost"` | | +| image.tag | string | `"1.6"` | Overrides the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | | +| logLevel | string | `"info"` | minimum loglevel: trace, debug, info, warn/warning, error, fatal, panic | +| nameOverride | string | `""` | | +| nodeSelector | object | `{}` | | +| podAnnotations | object | `{}` | | +| relays | object | `{"goerli":["https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@builder-relay-goerli.flashbots.net"],"mainnet":["https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net"],"sepolia":["https://0x845bd072b7cd566f02faeb0a4033ce9399e42839ced64e8b2adcfc859ed1e8e1a5a293336a49feac6d9a5edb779be53a@builder-relay-sepolia.flashbots.net"]}` | Relay URLs | +| replicaCount | int | `1` | | +| resources | object | `{}` | We usually recommend not to specify default resources and to leave this as a conscious choice for the user. This also increases chances charts run on environments with little resources, such as Minikube. If you do want to specify resources, uncomment the following lines, adjust them as necessary, and remove the curly braces after 'resources:'. | +| service.port | int | `18550` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| tolerations | list | `[]` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/mev-boost/values.yaml b/charts/mev-boost/values.yaml index 1738ae5c6..462ede56c 100644 --- a/charts/mev-boost/values.yaml +++ b/charts/mev-boost/values.yaml @@ -3,19 +3,19 @@ # Declare variables to be passed into your templates. global: - # Options: mainnet, goerli, sepolia + # -- Options: mainnet, goerli, sepolia network: "mainnet" - ## Credentials to fetch images from private registry - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## + # -- Credentials to fetch images from private registry + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + # imagePullSecrets: [] serviceAccount: create: true - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## + # -- Pod Security Context + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + # podSecurityContext: runAsNonRoot: true runAsUser: 10000 @@ -29,7 +29,7 @@ global: drop: - ALL -# Relay URLs +# -- Relay URLs relays: mainnet: # Flashbots @@ -39,11 +39,11 @@ relays: sepolia: - "https://0x845bd072b7cd566f02faeb0a4033ce9399e42839ced64e8b2adcfc859ed1e8e1a5a293336a49feac6d9a5edb779be53a@builder-relay-sepolia.flashbots.net" -# minimum loglevel: trace, debug, info, warn/warning, error, fatal, panic +# -- minimum loglevel: trace, debug, info, warn/warning, error, fatal, panic logLevel: info -## Extra flags for mev-boost -## +# -- Extra flags for mev-boost +# extraFlags: [] replicaCount: 1 @@ -51,7 +51,7 @@ replicaCount: 1 image: repository: flashbots/mev-boost pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. + # -- Overrides the image tag whose default is the chart appVersion. tag: "1.6" imagePullSecrets: [] @@ -59,11 +59,11 @@ nameOverride: "" fullnameOverride: "" serviceAccount: - # Specifies whether a service account should be created + # -- Specifies whether a service account should be created create: true - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" @@ -73,11 +73,11 @@ service: type: ClusterIP port: 18550 +# -- We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. # limits: # cpu: 100m # memory: 128Mi diff --git a/charts/mysql/README.md b/charts/mysql/README.md new file mode 100644 index 000000000..c3b75b10b --- /dev/null +++ b/charts/mysql/README.md @@ -0,0 +1,25 @@ +# my-mysql-chart + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![AppVersion: 5.7](https://img.shields.io/badge/AppVersion-5.7-informational?style=flat-square) + +A Helm chart for deploying MySQL with StatefulSet, Service, Secret, and PVC. + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| mysqlDatabase | string | `"mydb"` | | +| mysqlPassword | string | `""` | | +| mysqlRootPassword | string | `""` | | +| mysqlUser | string | `"myuser"` | | +| storageSize | string | `"100Gi"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/posmoni/README.md b/charts/posmoni/README.md new file mode 100644 index 000000000..0f992f8e6 --- /dev/null +++ b/charts/posmoni/README.md @@ -0,0 +1,60 @@ +# posmoni + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.0.1](https://img.shields.io/badge/AppVersion-v0.0.1-informational?style=flat-square) + +A Helm chart for installing and configuring Posmoni + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| file://../common | common | 1.0.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| global.serviceAccount.create | bool | `true` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"nethermindeth/posmoni"` | | +| image.tag | string | `"enable-monitoring"` | | +| metricsPort | int | `2112` | | +| nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| posmoni.consensus | string | `"http://localhost:4000"` | | +| posmoni.execution | string | `"http://localhost:8545"` | | +| posmoni.logs.logLevel | string | `"INFO"` | | +| posmoni.validators[0] | int | `1` | | +| posmoni.validators[1] | int | `2` | | +| posmoni.validators[2] | int | `3` | | +| replicaCount | int | `1` | | +| resources | object | `{}` | Configure resource requests and limits. ref: http://kubernetes.io/docs/user-guide/compute-resources/ | +| securityContext | object | `{}` | | +| service.port | int | `80` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount | object | `{"annotations":{},"name":""}` | ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| serviceMonitor.relabellings | list | `[]` | Metrics RelabelConfigs to apply to samples before scraping. | +| serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/posmoni/values.yaml b/charts/posmoni/values.yaml index df77cb549..4097a128e 100644 --- a/charts/posmoni/values.yaml +++ b/charts/posmoni/values.yaml @@ -22,20 +22,20 @@ posmoni: metricsPort: 2112 -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" podAnnotations: {} -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -## +# -- Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +# podSecurityContext: {} securityContext: {} @@ -50,9 +50,9 @@ service: type: ClusterIP port: 80 -## Configure resource requests and limits. -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## +# -- Configure resource requests and limits. +# ref: http://kubernetes.io/docs/user-guide/compute-resources/ +# resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -65,47 +65,47 @@ resources: {} # cpu: 100m # memory: 128Mi -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## +# -- Node labels for pod assignment +# ref: https://kubernetes.io/docs/user-guide/node-selection/ +# nodeSelector: {} -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## +# -- Tolerations for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +# tolerations: {} -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## +# -- Affinity for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} -## Prometheus Service Monitor -## ref: https://github.com/coreos/prometheus-operator -## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint -## +# -- Prometheus Service Monitor +# ref: https://github.com/coreos/prometheus-operator +# https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint +# serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created + # namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped + # interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended + # scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- Metrics RelabelConfigs to apply to samples before scraping. + # relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. + # metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint + # honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + # additionalLabels: {} diff --git a/charts/promtail/README.md b/charts/promtail/README.md index ae16c8581..6d6275716 100644 --- a/charts/promtail/README.md +++ b/charts/promtail/README.md @@ -1,6 +1,6 @@ # promtail -![Version: 6.14.2](https://img.shields.io/badge/Version-6.14.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.8.3](https://img.shields.io/badge/AppVersion-2.8.3-informational?style=flat-square) +![Version: 6.14.3](https://img.shields.io/badge/Version-6.14.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 2.8.3](https://img.shields.io/badge/AppVersion-2.8.3-informational?style=flat-square) Promtail is an agent which ships the contents of local logs to a Loki instance diff --git a/charts/rpc-saas-secretStore/README.md b/charts/rpc-saas-secretStore/README.md new file mode 100644 index 000000000..b34c17e35 --- /dev/null +++ b/charts/rpc-saas-secretStore/README.md @@ -0,0 +1,26 @@ +# rpc-saas-css + +![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![AppVersion: 1](https://img.shields.io/badge/AppVersion-1-informational?style=flat-square) + +A Helm chart for deploying ClusterSecretStore for RPC Saas Service + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Manjeet | | | +| Anish | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| rpcSaas.clustersecretstore.clusterLocation | string | `"dummy"` | | +| rpcSaas.clustersecretstore.clusterName | string | `"dummy"` | | +| rpcSaas.clustersecretstore.name | string | `"dummy"` | | +| rpcSaas.clustersecretstore.projectID | string | `"dummy"` | | +| rpcSaas.clustersecretstore.serviceAccountname | string | `"dummy"` | | +| rpcSaas.clustersecretstore.serviceAccountnamespace | string | `"dummy"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/validator-ejector/README.md b/charts/validator-ejector/README.md new file mode 100644 index 000000000..206d3e48b --- /dev/null +++ b/charts/validator-ejector/README.md @@ -0,0 +1,101 @@ +# validator-ejector + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.0.1](https://img.shields.io/badge/AppVersion-v0.0.1-informational?style=flat-square) + +A Helm chart for installing and configuring Lido's validator-ejector + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| file://../common | common | 1.0.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| ejector.blocksLoop | int | `32` | Amount of blocks to load events from on every poll. Defaults to 1 epoch | +| ejector.blocksPreload | int | `7200` | Amount of blocks to load events from on start. Increase if daemon was not running for some time. Defaults to a day of blocks | +| ejector.consensus | string | `"http://localhost:4000"` | Ethereum Consensus Node endpoint | +| ejector.dryRun | bool | `false` | Run the service without actually sending out exit messages | +| ejector.execution | string | `"http://localhost:8545"` | Ethereum Execution Node endpoint | +| ejector.httpPort | int | `2122` | Port to serve metrics and health check on | +| ejector.jobInterval | int | `384000` | Time interval in milliseconds to run checks. Defaults to time of 1 epoch | +| ejector.locatorAddress | string | `"0x12cd349E19Ab2ADBE478Fc538A66C059Cf40CFeC"` | Address of the Locator contract, can be found in the lido-dao repo | +| ejector.loggerFormat | string | `"simple"` | Simple or JSON log output: simple/json | +| ejector.loggerLevel | string | `"info"` | Severity level from which to start showing errors eg info will hide debug messages | +| ejector.messagesLocation | string | `"messages"` | Folder to load json exit message files from | +| ejector.operatorId | string | `"123"` | Operator ID in the Node Operators registry, easiest to get from Operators UI | +| ejector.runHealthCheck | bool | `true` | Enable health check endpoint | +| ejector.runMetrics | bool | `true` | Enable metrics endpoint | +| ejector.stakingModuleId | string | `"123"` | Staking Module ID for which operator ID is set | +| ejector.validatorExitWebhook | string | `""` | POST validator info to an endpoint instead of sending out an exit message in order to initiate an exit. Required if you are using webhook mode | +| global.externalSecrets.data[0].remoteRef.key | string | `"validatorEjectorSecrets"` | | +| global.externalSecrets.data[0].remoteRef.property | string | `"message"` | | +| global.externalSecrets.data[0].secretKey | string | `"MESSAGES_PASSWORD"` | | +| global.externalSecrets.data[1].remoteRef.key | string | `"validatorEjectorSecrets"` | | +| global.externalSecrets.data[1].remoteRef.property | string | `"loggerSecrets"` | | +| global.externalSecrets.data[1].secretKey | string | `"LOGGER_SECRETS"` | | +| global.externalSecrets.data[2].remoteRef.key | string | `"validatorEjectorSecrets"` | | +| global.externalSecrets.data[2].remoteRef.property | string | `"oracleAddressesAllowlist"` | | +| global.externalSecrets.data[2].secretKey | string | `"ORACLE_ADDRESSES_ALLOWLIST"` | | +| global.externalSecrets.enabled | bool | `false` | | +| global.externalSecrets.secretStoreRef | string | `"secretStoreRef"` | | +| global.image.pullPolicy | string | `"IfNotPresent"` | | +| global.image.repository | string | `"lidofinance/validator-ejector"` | | +| global.image.tag | string | `"1.2.0"` | | +| global.initImage.pullPolicy | string | `"IfNotPresent"` | | +| global.initImage.repository | string | `"bitnami/kubectl"` | | +| global.initImage.tag | string | `"1.28"` | | +| global.loader.pullPolicy | string | `"IfNotPresent"` | | +| global.loader.repository | string | `"nethermindeth/eth-exit-messages"` | | +| global.loader.tag | string | `"v0.0.26"` | | +| global.replicaCount | int | `1` | | +| global.serviceAccount.create | bool | `true` | | +| loader.BEACON_ENDPOINT | string | `"http://192.168.11.104:5052"` | | +| loader.CUSTODIAN_WEB3SIGNER_MAPPER | string | `"{\"custodian1/second\": \"http://192.168.11.104:9110\"}"` | | +| loader.EIP2335_PASSWORD | string | `"test"` | | +| loader.ENCRYPTION | string | `"EIP2335"` | | +| loader.ENCRYPTION_TYPE | string | `"EIP2335"` | | +| loader.ENCRYPT_WITH_METADATA | string | `"false"` | | +| loader.FETCH_INTERVAL | string | `"60"` | | +| loader.KEY_LOADER_TYPE | string | `"WEB3SIGNER"` | | +| loader.LOADER_MAPPER | string | `"{}"` | | +| loader.LidoKAPI_KEYS_PERCENT | string | `"5"` | | +| loader.LidoKAPI_OPERATOR_ID | string | `""` | | +| loader.SIGNER_MAPPER | string | `"{}"` | | +| loader.STORAGE_LOCATION | string | `"local/"` | | +| nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| persistence | object | `{"accessModes":["ReadWriteOnce"],"annotations":{},"enabled":true,"size":"5Gi","storageClassName":""}` | Whether or not to allocate persistent volume disk for the data directory. In case of node failure, the node data directory will still persist. | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| resources | object | `{}` | Configure resource requests and limits. ref: http://kubernetes.io/docs/user-guide/compute-resources/ | +| securityContext | object | `{}` | | +| service.port | int | `80` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount | object | `{"annotations":{},"name":""}` | ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabellings":[],"scrapeTimeout":""}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| serviceMonitor.relabellings | list | `[]` | Metrics RelabelConfigs to apply to samples before scraping. | +| serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/validator-ejector/values.yaml b/charts/validator-ejector/values.yaml index 3886f477f..ece53f075 100644 --- a/charts/validator-ejector/values.yaml +++ b/charts/validator-ejector/values.yaml @@ -56,60 +56,60 @@ loader: LidoKAPI_KEYS_PERCENT: "5" ejector: - ## Ethereum Consensus Node endpoint + # -- Ethereum Consensus Node endpoint consensus: "http://localhost:4000" - ## Ethereum Execution Node endpoint + # -- Ethereum Execution Node endpoint execution: "http://localhost:8545" - ## Address of the Locator contract, can be found in the lido-dao repo + # -- Address of the Locator contract, can be found in the lido-dao repo locatorAddress: "0x12cd349E19Ab2ADBE478Fc538A66C059Cf40CFeC" - ## Staking Module ID for which operator ID is set + # -- Staking Module ID for which operator ID is set stakingModuleId: "123" - ## Operator ID in the Node Operators registry, easiest to get from Operators UI + # -- Operator ID in the Node Operators registry, easiest to get from Operators UI operatorId: "123" - ## Folder to load json exit message files from + # -- Folder to load json exit message files from messagesLocation: "messages" - ## POST validator info to an endpoint instead of sending out an exit message in order to initiate an exit. - ## Required if you are using webhook mode + # -- POST validator info to an endpoint instead of sending out an exit message in order to initiate an exit. + # Required if you are using webhook mode validatorExitWebhook: "" - ## Amount of blocks to load events from on start. - ## Increase if daemon was not running for some time. Defaults to a day of blocks + # -- Amount of blocks to load events from on start. + # Increase if daemon was not running for some time. Defaults to a day of blocks blocksPreload: 7200 - ## Amount of blocks to load events from on every poll. Defaults to 1 epoch + # -- Amount of blocks to load events from on every poll. Defaults to 1 epoch blocksLoop: 32 - ## Time interval in milliseconds to run checks. Defaults to time of 1 epoch + # -- Time interval in milliseconds to run checks. Defaults to time of 1 epoch jobInterval: 384000 - ## Port to serve metrics and health check on + # -- Port to serve metrics and health check on httpPort: 2122 - ## Enable metrics endpoint + # -- Enable metrics endpoint runMetrics: true - ## Enable health check endpoint + # -- Enable health check endpoint runHealthCheck: true - ## Severity level from which to start showing errors eg info will hide debug messages + # -- Severity level from which to start showing errors eg info will hide debug messages loggerLevel: "info" - ## Simple or JSON log output: simple/json + # -- Simple or JSON log output: simple/json loggerFormat: "simple" - ## Run the service without actually sending out exit messages + # -- Run the service without actually sending out exit messages dryRun: false -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" podAnnotations: {} -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -## +# -- Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +# podSecurityContext: {} -## Whether or not to allocate persistent volume disk for the data directory. -## In case of node failure, the node data directory will still persist. -## +# -- Whether or not to allocate persistent volume disk for the data directory. +# In case of node failure, the node data directory will still persist. +# persistence: enabled: true storageClassName: "" @@ -130,11 +130,11 @@ service: type: ClusterIP port: 80 -## Configure resource requests and limits. -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## +# -- Configure resource requests and limits. +# ref: http://kubernetes.io/docs/user-guide/compute-resources/ +# resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious + # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources:'. @@ -145,47 +145,47 @@ resources: {} # cpu: 100m # memory: 128Mi -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## +# -- Node labels for pod assignment +# ref: https://kubernetes.io/docs/user-guide/node-selection/ +# nodeSelector: {} -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## +# -- Tolerations for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +# tolerations: {} -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## +# -- Affinity for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} -## Prometheus Service Monitor -## ref: https://github.com/coreos/prometheus-operator -## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint -## +# -- Prometheus Service Monitor +# ref: https://github.com/coreos/prometheus-operator +# https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint +# serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created + # namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped + # interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended + # scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- Metrics RelabelConfigs to apply to samples before scraping. + # relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. + # metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint + # honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + # additionalLabels: {} diff --git a/charts/validator-kapi/README.md b/charts/validator-kapi/README.md new file mode 100644 index 000000000..a05588cb1 --- /dev/null +++ b/charts/validator-kapi/README.md @@ -0,0 +1,89 @@ +# validator-kapi + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.0.1](https://img.shields.io/badge/AppVersion-v0.0.1-informational?style=flat-square) + +A Helm chart for installing and configuring Lido's validator-kapi + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://charts.bitnami.com/bitnami/ | postgresql | 13.2.23 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| global.externalSecrets.data[0].remoteRef.key | string | `"validatorKapiSecrets"` | | +| global.externalSecrets.data[0].remoteRef.property | string | `"dbName"` | | +| global.externalSecrets.data[0].secretKey | string | `"DB_NAME"` | | +| global.externalSecrets.data[1].remoteRef.key | string | `"validatorKapiSecrets"` | | +| global.externalSecrets.data[1].remoteRef.property | string | `"dbPort"` | | +| global.externalSecrets.data[1].secretKey | string | `"DB_PORT"` | | +| global.externalSecrets.data[2].remoteRef.key | string | `"validatorKapiSecrets"` | | +| global.externalSecrets.data[2].remoteRef.property | string | `"dbHost"` | | +| global.externalSecrets.data[2].secretKey | string | `"DB_HOST"` | | +| global.externalSecrets.data[3].remoteRef.key | string | `"validatorKapiSecrets"` | | +| global.externalSecrets.data[3].remoteRef.property | string | `"dbUser"` | | +| global.externalSecrets.data[3].secretKey | string | `"DB_USER"` | | +| global.externalSecrets.data[4].remoteRef.key | string | `"validatorKapiSecrets"` | | +| global.externalSecrets.data[4].remoteRef.property | string | `"dbPassword"` | | +| global.externalSecrets.data[4].secretKey | string | `"DB_PASSWORD"` | | +| global.externalSecrets.enabled | bool | `false` | | +| global.externalSecrets.secretStoreRef | string | `"secretStoreRef"` | | +| global.image.pullPolicy | string | `"IfNotPresent"` | | +| global.image.repository | string | `"lidofinance/lido-keys-api"` | | +| global.image.tag | string | `"0.10.1"` | | +| global.replicaCount | int | `1` | | +| global.serviceAccount.create | bool | `true` | | +| kapi.chainId | int | `1` | chain id | +| kapi.consensus | string | `"http://your_cl_node1,http://your_cl_node2"` | CL api urls if VALIDATOR_REGISTRY_ENABLE=false , there are no need to provide CL_API_URLS | +| kapi.corsWhitelistRegexp | string | `""` | The number of seconds that each request will last in storage | +| kapi.env | string | `"production"` | | +| kapi.execution | string | `"http://your_el_node1,http://your_el_node2"` | EL Node provider You could provide few providers for fallback | +| kapi.globalCacheTTL | int | `1` | Cache expiration time in seconds | +| kapi.globalThrottleLimit | int | `100` | The maximum number of requests within the TTL limit | +| kapi.globlaThrottleTTL | int | `5` | | +| kapi.jobIntervalRegistry | string | `"*/5 * * * * *"` | | +| kapi.jobIntervalValidatorsRegistry | string | `"*/10 * * * * *"` | | +| kapi.logFormat | string | `"json"` | Log format: simple or json | +| kapi.logLevel | string | `"debug"` | Log level: debug, info, notice, warning or error | +| kapi.port | int | `3000` | Application port | +| kapi.providerBatchAggregationWaitMs | int | `10` | | +| kapi.providerConcurrentRequests | int | `5` | | +| kapi.providerJsonRpcMaxBatchSize | int | `100` | FallbackProviderModule request policy parameters values below are default | +| kapi.validatorRegistryEnable | bool | `true` | It is possible to enable/disable collecting of validators value below is default | +| nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| postgresql | object | `{"audit":{"logHostname":true},"auth":{"database":"kapi","existingSecret":"eso-validator-kapi","secretKeys":{"adminPasswordKey":"DB_PASSWORD","replicationPasswordKey":"","userPasswordKey":"DB_PASSWORD"},"username":"postgres"},"containerSecurityContext":{"enabled":true,"runAsUser":1001},"enabled":true,"hostname":"postgresql","metrics":{"enabled":true},"networkPolicy":{"enabled":false},"primary":{"persistence":{"accessModes":["ReadWriteOnce"],"annotations":{},"enabled":true,"mountPath":"/bitnami/postgresql","selector":{},"size":"20Gi","subPath":""},"resources":{"requests":{"cpu":"250m","memory":"384Mi"}}},"rbac":{"create":false},"securityContext":{"enabled":true,"fsGroup":1001},"service":{"ports":{"postgresql":5432}},"tls":{"enabled":false},"volumePermissions":{"enabled":true,"image":{"pullPolicy":"Always","registry":"docker.io","repository":"bitnami/bitnami-shell","tag":"11-debian-11-r99"},"securityContext":{"runAsUser":0}}}` | PostgreSQL Server ref: https://github.com/bitnami/charts/tree/master/bitnami/postgresql | +| resources | object | `{}` | Configure resource requests and limits. ref: http://kubernetes.io/docs/user-guide/compute-resources/ | +| securityContext | object | `{}` | | +| service.port | int | `80` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount | object | `{"annotations":{},"name":""}` | ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabellings":[],"scrapeTimeout":""}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| serviceMonitor.relabellings | list | `[]` | Metrics RelabelConfigs to apply to samples before scraping. | +| serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/validator-kapi/values.yaml b/charts/validator-kapi/values.yaml index c9a5581bc..55a0a6842 100644 --- a/charts/validator-kapi/values.yaml +++ b/charts/validator-kapi/values.yaml @@ -40,52 +40,52 @@ global: kapi: env: "production" - # Application port + # -- Application port port: 3000 - # The number of seconds that each request will last in storage + # -- The number of seconds that each request will last in storage corsWhitelistRegexp: "" jobIntervalRegistry: "*/5 * * * * *" jobIntervalValidatorsRegistry: "*/10 * * * * *" globlaThrottleTTL: 5 - # FallbackProviderModule request policy parameters + # -- FallbackProviderModule request policy parameters # values below are default providerJsonRpcMaxBatchSize: 100 providerConcurrentRequests: 5 providerBatchAggregationWaitMs: 10 - # The maximum number of requests within the TTL limit + # -- The maximum number of requests within the TTL limit globalThrottleLimit: 100 - # Cache expiration time in seconds + # -- Cache expiration time in seconds globalCacheTTL: 1 - # Log level: debug, info, notice, warning or error + # -- Log level: debug, info, notice, warning or error logLevel: debug - # Log format: simple or json + # -- Log format: simple or json logFormat: json - # EL Node provider + # -- EL Node provider # You could provide few providers for fallback execution: http://your_el_node1,http://your_el_node2 - # chain id + # -- chain id chainId: 1 - # It is possible to enable/disable collecting of validators + # -- It is possible to enable/disable collecting of validators # value below is default validatorRegistryEnable: true - # CL api urls + # -- CL api urls # if VALIDATOR_REGISTRY_ENABLE=false , there are no need to provide CL_API_URLS consensus: http://your_cl_node1,http://your_cl_node2 -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" podAnnotations: {} -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -## +# -- Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +# podSecurityContext: {} securityContext: {} @@ -100,9 +100,9 @@ service: type: ClusterIP port: 80 -## PostgreSQL Server -## ref: https://github.com/bitnami/charts/tree/master/bitnami/postgresql -## +# -- PostgreSQL Server +# ref: https://github.com/bitnami/charts/tree/master/bitnami/postgresql +# postgresql: enabled: true volumePermissions: @@ -157,9 +157,9 @@ postgresql: metrics: enabled: true -## Configure resource requests and limits. -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## +# -- Configure resource requests and limits. +# ref: http://kubernetes.io/docs/user-guide/compute-resources/ +# resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -172,47 +172,47 @@ resources: {} # cpu: 100m # memory: 128Mi -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## +# -- Node labels for pod assignment +# ref: https://kubernetes.io/docs/user-guide/node-selection/ +# nodeSelector: {} -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## +# -- Tolerations for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +# tolerations: {} -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## +# -- Affinity for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} -## Prometheus Service Monitor -## ref: https://github.com/coreos/prometheus-operator -## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint -## +# -- Prometheus Service Monitor +# ref: https://github.com/coreos/prometheus-operator +# https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint +# serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created + # namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped + # interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended + # scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- Metrics RelabelConfigs to apply to samples before scraping. + # relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. + # metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint + # honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + # additionalLabels: {} diff --git a/charts/validators/README.md b/charts/validators/README.md new file mode 100644 index 000000000..5bbee12ce --- /dev/null +++ b/charts/validators/README.md @@ -0,0 +1,125 @@ +# validators + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.0.0](https://img.shields.io/badge/AppVersion-v1.0.0-informational?style=flat-square) + +A Helm chart for installing validators with the web3signer. + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| beaconChainRpcEndpoints | list | `[]` | List of Beacon Chain node addresses | +| beaconChainRpcEndpointsRandomized | list | `[]` | Randomize endpoints if specified Has a higher priority than beaconChainRpcEndpoints, all specified hosts will be randomly assigned to all validators, used to more evenly distribute the load | +| cliImage | object | `{"pullPolicy":"IfNotPresent","repository":"nethermindeth/keystores-cli","tag":"v1.0.0"}` | CLI image is used to fetch public keys. | +| enableBuilder | bool | `false` | Whether a builder should be used for proposalConfig | +| enableWatcher | bool | `false` | | +| enabled | bool | `true` | Whether to enable validator statefulset or not Can be used to temporarily disable validators until synchronization of eth1/eth2 nodes is complete | +| externalSecrets.data | list | `[]` | | +| externalSecrets.enabled | bool | `true` | | +| externalSecrets.secretStoreRef.kind | string | `"secretStoreKind"` | | +| externalSecrets.secretStoreRef.name | string | `"secretStoreName"` | | +| extraFlags | object | `{"lighthouse":[],"lodestar":[],"nimbus":[],"prysm":[],"teku":[]}` | Validators extra flags | +| fallbackRpcEndpoints | list | `[]` | If using beaconChainRpcEndpointsRandomized fallbackRpcEndpoints will be appended to the list always serving as the last, failover endpoint | +| flags | object | `{"lighthouse":["lighthouse","vc","--datadir=/data/lighthouse","--init-slashing-protection","--logfile-compress","--logfile-max-size=64","--logfile-max-number=2"],"lodestar":["validator","--dataDir=/data/lodestar","--logLevel=info"],"nimbus":["--data-dir=/data/nimbus","--non-interactive","--log-level=INFO","--doppelganger-detection=off"],"prysm":["--datadir=/data/prysm","--accept-terms-of-use","--disable-rewards-penalties-logging","--disable-account-metrics"],"teku":["validator-client","--log-destination=CONSOLE","--data-base-path=/data"]}` | Validators flags | +| fullnameOverride | string | `""` | Provide a name to substitute for the full names of resources | +| global.imagePullSecrets | list | `[]` | | +| global.network | string | `"mainnet"` | | +| global.owner | string | `""` | | +| global.podSecurityContext | object | `{"fsGroup":10000,"runAsNonRoot":true,"runAsUser":10000}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| global.project | string | `""` | | +| global.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| global.securityContext.readOnlyRootFilesystem | bool | `true` | | +| global.securityContext.runAsNonRoot | bool | `true` | | +| global.securityContext.runAsUser | int | `10000` | | +| graffiti | string | `""` | You can use the graffiti to add a string to your proposed blocks, which will be seen on the block explorer. ref: https://docs.prylabs.network/docs/prysm-usage/parameters#validator-configuration | +| image | object | `{"lighthouse":{"repository":"sigp/lighthouse","tag":"v4.5.0"},"lodestar":{"repository":"chainsafe/lodestar","tag":"v1.12.0"},"nimbus":{"repository":"statusim/nimbus-validator-client","tag":"multiarch-v23.11.0"},"prysm":{"repository":"gcr.io/prylabs-dev/prysm/validator","tag":"v4.1.1"},"pullPolicy":"IfNotPresent","teku":{"repository":"consensys/teku","tag":"23.11.0"}}` | Validators image version ref: https://gcr.io/prysmaticlabs/prysm/validator ref: https://hub.docker.com/r/sigp/lighthouse | +| imagePullSecrets | list | `[]` | Credentials to fetch images from private registry ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ | +| initImageBusybox | object | `{"pullPolicy":"IfNotPresent","repository":"busybox","tag":"1.36"}` | Init image is used to chown data volume, initialise genesis, etc. | +| livenessProbe.lighthouse.failureThreshold | int | `3` | | +| livenessProbe.lighthouse.httpGet.path | string | `"/metrics"` | | +| livenessProbe.lighthouse.httpGet.port | string | `"metrics"` | | +| livenessProbe.lighthouse.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.lighthouse.initialDelaySeconds | int | `60` | | +| livenessProbe.lighthouse.periodSeconds | int | `60` | | +| livenessProbe.lighthouse.successThreshold | int | `1` | | +| livenessProbe.lighthouse.timeoutSeconds | int | `1` | | +| livenessProbe.lodestar.failureThreshold | int | `3` | | +| livenessProbe.lodestar.httpGet.path | string | `"/metrics"` | | +| livenessProbe.lodestar.httpGet.port | string | `"metrics"` | | +| livenessProbe.lodestar.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.lodestar.initialDelaySeconds | int | `60` | | +| livenessProbe.lodestar.periodSeconds | int | `60` | | +| livenessProbe.lodestar.successThreshold | int | `1` | | +| livenessProbe.lodestar.timeoutSeconds | int | `1` | | +| livenessProbe.nimbus.failureThreshold | int | `3` | | +| livenessProbe.nimbus.httpGet.path | string | `"/metrics"` | | +| livenessProbe.nimbus.httpGet.port | string | `"metrics"` | | +| livenessProbe.nimbus.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.nimbus.initialDelaySeconds | int | `60` | | +| livenessProbe.nimbus.periodSeconds | int | `60` | | +| livenessProbe.nimbus.successThreshold | int | `1` | | +| livenessProbe.nimbus.timeoutSeconds | int | `1` | | +| livenessProbe.prysm.failureThreshold | int | `3` | | +| livenessProbe.prysm.httpGet.path | string | `"/healthz"` | | +| livenessProbe.prysm.httpGet.port | string | `"metrics"` | | +| livenessProbe.prysm.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.prysm.initialDelaySeconds | int | `60` | | +| livenessProbe.prysm.periodSeconds | int | `60` | | +| livenessProbe.prysm.successThreshold | int | `1` | | +| livenessProbe.prysm.timeoutSeconds | int | `1` | | +| livenessProbe.teku.failureThreshold | int | `3` | | +| livenessProbe.teku.httpGet.path | string | `"/metrics"` | | +| livenessProbe.teku.httpGet.port | string | `"metrics"` | | +| livenessProbe.teku.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.teku.initialDelaySeconds | int | `60` | | +| livenessProbe.teku.periodSeconds | int | `60` | | +| livenessProbe.teku.successThreshold | int | `1` | | +| livenessProbe.teku.timeoutSeconds | int | `1` | | +| metrics | object | `{"enabled":true,"flags":{"lighthouse":["--metrics","--metrics-port=9090","--metrics-address=0.0.0.0"],"lodestar":["--metrics","--metrics.address=0.0.0.0","--metrics.port=9090"],"nimbus":["--metrics","--metrics-port=9090","--metrics-address=0.0.0.0"],"prysm":["--monitoring-port=9090","--monitoring-host=0.0.0.0"],"teku":["--metrics-enabled=true","--metrics-host-allowlist=*","--metrics-interface=0.0.0.0","--metrics-port=9090"]},"port":9090,"prometheusRule":{"additionalLabels":{},"default":true,"enabled":false,"namespace":"","rules":{}},"serviceMonitor":{"additionalLabels":{},"enabled":false,"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabelings":[],"scrapeTimeout":""}}` | Monitoring | +| metrics.enabled | bool | `true` | Whether to enable metrics collection or not | +| metrics.flags | object | `{"lighthouse":["--metrics","--metrics-port=9090","--metrics-address=0.0.0.0"],"lodestar":["--metrics","--metrics.address=0.0.0.0","--metrics.port=9090"],"nimbus":["--metrics","--metrics-port=9090","--metrics-address=0.0.0.0"],"prysm":["--monitoring-port=9090","--monitoring-host=0.0.0.0"],"teku":["--metrics-enabled=true","--metrics-host-allowlist=*","--metrics-interface=0.0.0.0","--metrics-port=9090"]}` | Extra flags to pass for collecting metrics | +| metrics.port | int | `9090` | Prometheus exporter port | +| metrics.prometheusRule | object | `{"additionalLabels":{},"default":true,"enabled":false,"namespace":"","rules":{}}` | Custom PrometheusRule to be defined ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions | +| metrics.prometheusRule.additionalLabels | object | `{}` | Additional labels for the prometheusRule | +| metrics.prometheusRule.default | bool | `true` | Create a default set of Alerts | +| metrics.prometheusRule.enabled | bool | `false` | Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator | +| metrics.prometheusRule.namespace | string | `""` | The namespace in which the prometheusRule will be created | +| metrics.prometheusRule.rules | object | `{}` | Custom Prometheus rules | +| metrics.serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabelings":[],"scrapeTimeout":""}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| metrics.serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| metrics.serviceMonitor.enabled | bool | `false` | ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| metrics.serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| metrics.serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| metrics.serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| metrics.serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| metrics.serviceMonitor.relabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before scraping. | +| metrics.serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| nameOverride | string | `""` | Provide a name in place of operator for `app:` labels | +| network | string | `"mainnet"` | Network ID Options for Ethereum: mainnet, prater Options for Gnosis: gnosis | +| nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| priorityClassName | string | `""` | used to assign priority to pods ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ | +| rbac | object | `{"create":true,"name":"","rules":[{"apiGroups":[""],"resources":["services","pods"],"verbs":["list","get","patch"]}]}` | RBAC configuration. ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ | +| readinessProbe | object | `{"lighthouse":{"failureThreshold":3,"httpGet":{"path":"/metrics","port":"metrics","scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":60,"successThreshold":1,"timeoutSeconds":1},"lodestar":{"failureThreshold":3,"httpGet":{"path":"/metrics","port":"metrics","scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":60,"successThreshold":1,"timeoutSeconds":1},"nimbus":{"failureThreshold":3,"httpGet":{"path":"/metrics","port":"metrics","scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":60,"successThreshold":1,"timeoutSeconds":1},"prysm":{"failureThreshold":3,"httpGet":{"path":"/healthz","port":"metrics","scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":60,"successThreshold":1,"timeoutSeconds":1},"teku":{"failureThreshold":3,"httpGet":{"path":"/metrics","port":"metrics","scheme":"HTTP"},"initialDelaySeconds":60,"periodSeconds":60,"successThreshold":1,"timeoutSeconds":1}}` | Configure liveness and readiness probes https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ | +| resources | object | `{}` | Configure resource requests and limits. ref: http://kubernetes.io/docs/user-guide/compute-resources/ | +| serviceAccount | object | `{"annotations":{},"create":true,"name":""}` | Spearate service account per validator. ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.create | bool | `true` | Specifies whether a service account should be created | +| strictFeeRecipientCheck | bool | `false` | Lodestar specific setting Enables strict checking of the validator's feeRecipient with the one returned by engine | +| terminationGracePeriodSeconds | int | `120` | | +| tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| type | string | `"teku"` | What type of validator to use. Options for Ethereum: prysm, lighthouse, teku, nimbus, lodestar Options for Gnosis: teku, lighthouse, nimbus | +| validatorsCount | int | `0` | How many validators to run **NB! Each validtor hosts a certain number of keys specified when using CLI sync-db command, so the number of validators must be >= (total number of validator keys) / validator capacity specified in CLI | +| validatorsKeyIndex | int | `0` | If you want to run multiple validator types (e.g lighouse, teku,...) you need to adjust the key index to prevent double signing. Please be careful with this setting and be aware of boundaries of each validator client you would like to run. Example: - 700 Keys in Web3Signer database - 4 Lighthouse validators - 3 Prysm validators Deployment 1 => validatorsKeyIndex: 0 validatorsCount: 4 type: lighthouse Deployment 2 => validatorsKeyIndex: 4 validatorsCount: 3 type: prysm **NB! If you not fully understand what is done here, keep this value at 0 and set validatorsCount to the maximum of your keys and do not spin up this Chart multiple times! | +| validatorsNoOfKeys | int | `100` | | +| web3signerEndpoint | string | `""` | Web3Signer Endpoint | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/validators/values.yaml b/charts/validators/values.yaml index eb39e768c..588d25089 100644 --- a/charts/validators/values.yaml +++ b/charts/validators/values.yaml @@ -7,9 +7,9 @@ global: project: "" imagePullSecrets: [] network: mainnet - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## + # -- Pod Security Context + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + # podSecurityContext: runAsNonRoot: true runAsUser: 10000 @@ -23,17 +23,17 @@ global: drop: - ALL -## Provide a name in place of operator for `app:` labels -## +# -- Provide a name in place of operator for `app:` labels +# nameOverride: "" -## Provide a name to substitute for the full names of resources -## +# -- Provide a name to substitute for the full names of resources +# fullnameOverride: "" -## RBAC configuration. -## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -## +# -- RBAC configuration. +# ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +# rbac: create: true name: "" @@ -47,15 +47,15 @@ rbac: - "get" - "patch" -## Init image is used to chown data volume, initialise genesis, etc. -## +# -- Init image is used to chown data volume, initialise genesis, etc. +# initImageBusybox: repository: "busybox" tag: "1.36" pullPolicy: IfNotPresent -## CLI image is used to fetch public keys. -## +# -- CLI image is used to fetch public keys. +# cliImage: repository: nethermindeth/keystores-cli tag: "v1.0.0" @@ -68,59 +68,59 @@ externalSecrets: kind: secretStoreKind data: [] -## Configuration for validators -## ref: https://lighthouse-book.sigmaprime.io/validator-management.html -## ref: https://docs.prylabs.network/docs/getting-started -## +# Configuration for validators +# ref: https://lighthouse-book.sigmaprime.io/validator-management.html +# ref: https://docs.prylabs.network/docs/getting-started +# -## Whether to enable validator statefulset or not -## Can be used to temporarily disable validators -## until synchronization of eth1/eth2 nodes is complete -## +# -- Whether to enable validator statefulset or not +# Can be used to temporarily disable validators +# until synchronization of eth1/eth2 nodes is complete +# enabled: true -## What type of validator to use. -## Options for Ethereum: prysm, lighthouse, teku, nimbus, lodestar -## Options for Gnosis: teku, lighthouse, nimbus -## +# -- What type of validator to use. +# Options for Ethereum: prysm, lighthouse, teku, nimbus, lodestar +# Options for Gnosis: teku, lighthouse, nimbus +# type: teku -## If you want to run multiple validator types (e.g lighouse, teku,...) -## you need to adjust the key index to prevent double signing. -## Please be careful with this setting and be aware of boundaries -## of each validator client you would like to run. -## -## Example: -## - 700 Keys in Web3Signer database -## - 4 Lighthouse validators -## - 3 Prysm validators -## -## Deployment 1 => validatorsKeyIndex: 0 -## validatorsCount: 4 -## type: lighthouse -## -## Deployment 2 => validatorsKeyIndex: 4 -## validatorsCount: 3 -## type: prysm -## -## **NB! If you not fully understand what is done here, keep this value at 0 and set validatorsCount to the maximum of -## your keys and do not spin up this Chart multiple times! +# -- If you want to run multiple validator types (e.g lighouse, teku,...) +# you need to adjust the key index to prevent double signing. +# Please be careful with this setting and be aware of boundaries +# of each validator client you would like to run. +# +# Example: +# - 700 Keys in Web3Signer database +# - 4 Lighthouse validators +# - 3 Prysm validators +# +# Deployment 1 => validatorsKeyIndex: 0 +# validatorsCount: 4 +# type: lighthouse +# +# Deployment 2 => validatorsKeyIndex: 4 +# validatorsCount: 3 +# type: prysm +# +# **NB! If you not fully understand what is done here, keep this value at 0 and set validatorsCount to the maximum of +# your keys and do not spin up this Chart multiple times! validatorsKeyIndex: 0 -## How many validators to run -## **NB! Each validtor hosts a certain number of keys specified when using CLI sync-db command, -## so the number of validators must be >= (total number of validator keys) / validator capacity specified in CLI -## +# -- How many validators to run +# **NB! Each validtor hosts a certain number of keys specified when using CLI sync-db command, +# so the number of validators must be >= (total number of validator keys) / validator capacity specified in CLI +# validatorsCount: 0 validatorsNoOfKeys: 100 -## Whether a builder should be used for proposalConfig +# -- Whether a builder should be used for proposalConfig enableBuilder: false -## Validators image version -## ref: https://gcr.io/prysmaticlabs/prysm/validator -## ref: https://hub.docker.com/r/sigp/lighthouse +# -- Validators image version +# ref: https://gcr.io/prysmaticlabs/prysm/validator +# ref: https://hub.docker.com/r/sigp/lighthouse image: pullPolicy: IfNotPresent prysm: @@ -139,51 +139,51 @@ image: repository: "chainsafe/lodestar" tag: "v1.12.0" -## Credentials to fetch images from private registry -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ -## +# -- Credentials to fetch images from private registry +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +# imagePullSecrets: [] terminationGracePeriodSeconds: 120 -## Spearate service account per validator. -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- Spearate service account per validator. +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Specifies whether a service account should be created + # -- Specifies whether a service account should be created create: true name: "" - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## +# -- Node labels for pod assignment +# ref: https://kubernetes.io/docs/user-guide/node-selection/ +# nodeSelector: {} -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## +# -- Tolerations for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +# tolerations: {} -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## +# -- Affinity for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} -## used to assign priority to pods -## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -## +# -- used to assign priority to pods +# ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +# priorityClassName: "" -## Network ID -## Options for Ethereum: mainnet, prater -## Options for Gnosis: gnosis -## +# -- Network ID +# Options for Ethereum: mainnet, prater +# Options for Gnosis: gnosis +# network: "mainnet" -## Validators flags -## +# -- Validators flags +# flags: prysm: - "--datadir=/data/prysm" @@ -212,8 +212,8 @@ flags: - "--dataDir=/data/lodestar" - "--logLevel=info" -## Validators extra flags -## +# -- Validators extra flags +# extraFlags: prysm: [] lighthouse: [] @@ -221,49 +221,49 @@ extraFlags: nimbus: [] lodestar: [] -## Web3Signer Endpoint -## +# -- Web3Signer Endpoint +# web3signerEndpoint: "" -## List of Beacon Chain node addresses -## +# -- List of Beacon Chain node addresses +# beaconChainRpcEndpoints: [] -## Randomize endpoints if specified -## Has a higher priority than beaconChainRpcEndpoints, -## all specified hosts will be randomly assigned to all validators, -## used to more evenly distribute the load -## +# -- Randomize endpoints if specified +# Has a higher priority than beaconChainRpcEndpoints, +# all specified hosts will be randomly assigned to all validators, +# used to more evenly distribute the load +# beaconChainRpcEndpointsRandomized: [] -## if using beaconChainRpcEndpointsRandomized -## fallbackRpcEndpoints will be appended to the list -## always serving as the last, failover endpoint -## +# -- If using beaconChainRpcEndpointsRandomized +# fallbackRpcEndpoints will be appended to the list +# always serving as the last, failover endpoint +# fallbackRpcEndpoints: [] -## You can use the graffiti to add a string to your proposed blocks, -## which will be seen on the block explorer. -## ref: https://docs.prylabs.network/docs/prysm-usage/parameters#validator-configuration -## +# -- You can use the graffiti to add a string to your proposed blocks, +# which will be seen on the block explorer. +# ref: https://docs.prylabs.network/docs/prysm-usage/parameters#validator-configuration +# graffiti: "" enableWatcher: false -## Lodestar specific setting -## Enables strict checking of the validator's feeRecipient with the one returned by engine +# -- Lodestar specific setting +# Enables strict checking of the validator's feeRecipient with the one returned by engine strictFeeRecipientCheck: false -## Monitoring -## +# -- Monitoring +# metrics: - # Whether to enable metrics collection or not + # -- Whether to enable metrics collection or not enabled: true - # Prometheus exporter port + # -- Prometheus exporter port port: 9090 - # Extra flags to pass for collecting metrics + # -- Extra flags to pass for collecting metrics flags: prysm: - "--monitoring-port=9090" @@ -286,81 +286,81 @@ metrics: - "--metrics.address=0.0.0.0" - "--metrics.port=9090" - ## Prometheus Service Monitor - ## ref: https://github.com/coreos/prometheus-operator - ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## + # -- Prometheus Service Monitor + # ref: https://github.com/coreos/prometheus-operator + # https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + # serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created + # namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped + # interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended + # scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- Metrics RelabelConfigs to apply to samples before scraping. + # relabelings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. + # metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint + # honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + # additionalLabels: {} - ## Custom PrometheusRule to be defined - ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions - ## + # -- Custom PrometheusRule to be defined + # ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + # prometheusRule: - ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator - ## + # -- Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.prometheusRule.default Create a default set of Alerts - ## + # -- Create a default set of Alerts + # default: true - ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created - ## + # -- The namespace in which the prometheusRule will be created + # namespace: "" - ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule - ## + # -- Additional labels for the prometheusRule + # additionalLabels: {} - ## @param metrics.prometheusRule.rules Custom Prometheus rules - ## e.g: - ## rules: - ## - alert: PrysmValidatorHourlyEarningLessOrEqual0 - ## expr: sum(validator_balance) - sum(validator_balance offset 1h) - count(validator_balance > 16)*32 + count(validator_balance offset 1h > 0)*32 - ## for: 5m - ## labels: - ## severity: critical - ## annotations: - ## summary: Prysm validator hourly earning <= 0 - ## description: Check validators immediately. Pod - {{ printf "{{ $labels.pod }}" }}. Namespace - {{ printf "{{ $labels.namespace }}" }} - ## - alert: PrysmValidatorAlotOfErrorsLastHour - ## expr: sum(delta(log_entries_total{job='{{ include "operator.fullname" . }}-validator', level="error"}[1h]) > 0) - ## for: 5m - ## labels: - ## severity: warning - ## annotations: - ## summary: Many validator errors or warnings last hour - ## description: Check validator {{ printf "{{ $labels.pod }}" }}. Namespace - {{ printf "{{ $labels.namespace }}" }} - ## + # -- Custom Prometheus rules rules: {} - -## Configure resource requests and limits. -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## + # e.g: + # rules: + # - alert: PrysmValidatorHourlyEarningLessOrEqual0 + # expr: sum(validator_balance) - sum(validator_balance offset 1h) - count(validator_balance > 16)*32 + count(validator_balance offset 1h > 0)*32 + # for: 5m + # labels: + # severity: critical + # annotations: + # summary: Prysm validator hourly earning <= 0 + # description: Check validators immediately. Pod - {{ printf "{{ $labels.pod }}" }}. Namespace - {{ printf "{{ $labels.namespace }}" }} + # - alert: PrysmValidatorAlotOfErrorsLastHour + # expr: sum(delta(log_entries_total{job='{{ include "operator.fullname" . }}-validator', level="error"}[1h]) > 0) + # for: 5m + # labels: + # severity: warning + # annotations: + # summary: Many validator errors or warnings last hour + # description: Check validator {{ printf "{{ $labels.pod }}" }}. Namespace - {{ printf "{{ $labels.namespace }}" }} + # + +# -- Configure resource requests and limits. +# ref: http://kubernetes.io/docs/user-guide/compute-resources/ +# resources: {} -## Configure liveness and readiness probes -## https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ -## +# -- Configure liveness and readiness probes +# https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +# readinessProbe: prysm: initialDelaySeconds: 60 diff --git a/charts/vouch/README.md b/charts/vouch/README.md new file mode 100644 index 000000000..246990eee --- /dev/null +++ b/charts/vouch/README.md @@ -0,0 +1,108 @@ +# vouch + +![Version: 1.1.1](https://img.shields.io/badge/Version-1.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.7.6](https://img.shields.io/badge/AppVersion-1.7.6-informational?style=flat-square) + +A Helm chart for installing and configuring large scale ETH staking infrastructure on top of the Kubernetes + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| file://../common | common | 1.0.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| cliImage.pullPolicy | string | `"IfNotPresent"` | | +| cliImage.repository | string | `"nethermindeth/keystores-cli"` | | +| cliImage.tag | string | `"v1.0.0"` | | +| externalSecrets.dataFrom.key | string | `"vouch"` | | +| externalSecrets.enabled | bool | `false` | | +| externalSecrets.secretStoreRef.kind | string | `"SecretStore"` | | +| externalSecrets.secretStoreRef.name | string | `"secretStoreRef"` | | +| fullnameOverride | string | `""` | Provide a name to substitute for the full names of resources | +| global.serviceAccount.create | bool | `true` | | +| httpPort | int | `8881` | Port on which vouch HTTP listens. | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"attestant/vouch"` | | +| image.tag | string | `"1.7.6"` | Overrides the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | | +| initImage | object | `{"pullPolicy":"IfNotPresent","repository":"bash","tag":"5.2"}` | Init image is used to chown data volume, etc. | +| livenessProbe.failureThreshold | int | `3` | | +| livenessProbe.httpGet.path | string | `"/metrics"` | | +| livenessProbe.httpGet.port | string | `"metrics"` | | +| livenessProbe.httpGet.scheme | string | `"HTTP"` | | +| livenessProbe.initialDelaySeconds | int | `60` | | +| livenessProbe.periodSeconds | int | `10` | | +| livenessProbe.successThreshold | int | `1` | | +| livenessProbe.timeoutSeconds | int | `3` | | +| loggingLevel | string | `"INFO"` | Sets logging verbosity. Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. | +| metricsPort | int | `8081` | The port (TCP) on which Prometheus accesses metrics | +| nameOverride | string | `""` | Provide a name in place of operator for `app:` labels | +| nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| podAnnotations | object | `{}` | | +| podSecurityContext | object | `{"fsGroup":1000,"runAsUser":1000}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| readinessProbe.enabled | bool | `true` | | +| readinessProbe.failureThreshold | int | `3` | | +| readinessProbe.httpGet.path | string | `"/metrics"` | | +| readinessProbe.httpGet.port | string | `"metrics"` | | +| readinessProbe.httpGet.scheme | string | `"HTTP"` | | +| readinessProbe.initialDelaySeconds | int | `10` | | +| readinessProbe.periodSeconds | int | `5` | | +| readinessProbe.successThreshold | int | `2` | | +| readinessProbe.timeoutSeconds | int | `3` | | +| relays | list | `[]` | | +| resources | object | `{}` | Configure resource requests and limits. ref: http://kubernetes.io/docs/user-guide/compute-resources/ | +| securityContext.capabilities.drop[0] | string | `"ALL"` | | +| securityContext.fsGroup | int | `10000` | | +| securityContext.readOnlyRootFilesystem | bool | `true` | | +| securityContext.runAsNonRoot | bool | `true` | | +| securityContext.runAsUser | int | `10000` | | +| service.httpPort | int | `8881` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount | object | `{"annotations":{},"name":""}` | ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabellings":[],"scrapeTimeout":""}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| serviceMonitor.enabled | bool | `false` | ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| serviceMonitor.relabellings | list | `[]` | RelabelConfigs to apply to samples before scraping. | +| serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| vouch.accountmanager.dirk.accounts[0] | string | `"Validators"` | | +| vouch.accountmanager.dirk.endpoints[0] | string | `"dirk-1:8881"` | | +| vouch.accountmanager.dirk.endpoints[1] | string | `"dirk-2:8881"` | | +| vouch.accountmanager.dirk.endpoints[2] | string | `"dirk-3:8881"` | | +| vouch.accountmanager.dirk.timeout | string | `"1m"` | | +| vouch.beaconnodeaddress | string | `"localhost:5052"` | | +| vouch.beaconnodeaddresses[0] | string | `"localhost:5051"` | | +| vouch.beaconnodeaddresses[1] | string | `"localhost:5052"` | | +| vouch.blockrelay.fallbackfeerecipient | string | `"0x0000000000000000000000000000000000000001"` | | +| vouch.feerecipient.defaultaddress | string | `"0x0000000000000000000000000000000000000001"` | | +| vouch.graffiti.static.value | string | `"My graffiti"` | | +| vouch.loglevel | string | `"debug"` | | +| vouch.metrics.prometheus.listenaddress | string | `"0.0.0.0:8081"` | | +| vouch.metrics.prometheus.loglevel | string | `"trace"` | | +| vouch.strategies | string | `nil` | | +| vouch.submitter | string | `nil` | | +| vouch.tracing | string | `nil` | | +| vouchDataDir | string | `"/data/vouch"` | | +| vouchFullConfig | string | `nil` | use vouchFullConfig: to provide all vouch.yaml values use vouch: to populate good defaults and to do minimal changes | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/vouch/values.yaml b/charts/vouch/values.yaml index cac484047..57f567458 100644 --- a/charts/vouch/values.yaml +++ b/charts/vouch/values.yaml @@ -9,11 +9,11 @@ global: image: repository: attestant/vouch pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. + # -- Overrides the image tag whose default is the chart appVersion. tag: "1.7.6" -## Init image is used to chown data volume, etc. -## +# -- Init image is used to chown data volume, etc. +# initImage: repository: bash tag: "5.2" @@ -34,7 +34,7 @@ externalSecrets: vouchDataDir: /data/vouch -# use vouchFullConfig: to provide all vouch.yaml values +# -- use vouchFullConfig: to provide all vouch.yaml values # use vouch: to populate good defaults and to do minimal changes vouchFullConfig: vouch: @@ -71,24 +71,24 @@ relays: [] imagePullSecrets: [] -## Provide a name in place of operator for `app:` labels -## +# -- Provide a name in place of operator for `app:` labels +# nameOverride: "" -## Provide a name to substitute for the full names of resources -## +# -- Provide a name to substitute for the full names of resources +# fullnameOverride: "" -## Sets logging verbosity. -## Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. +# -- Sets logging verbosity. +# Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. loggingLevel: "INFO" -## Port on which vouch HTTP listens. -## +# -- Port on which vouch HTTP listens. +# httpPort: 8881 -## The port (TCP) on which Prometheus accesses metrics -## +# -- The port (TCP) on which Prometheus accesses metrics +# metricsPort: 8081 livenessProbe: @@ -115,20 +115,20 @@ readinessProbe: scheme: HTTP -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" podAnnotations: {} -## Pod Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ -## +# -- Pod Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +# podSecurityContext: fsGroup: 1000 runAsUser: 1000 @@ -145,9 +145,9 @@ service: type: ClusterIP httpPort: 8881 -## Configure resource requests and limits. -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## +# -- Configure resource requests and limits. +# ref: http://kubernetes.io/docs/user-guide/compute-resources/ +# resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -160,47 +160,47 @@ resources: {} # cpu: 100m # memory: 128Mi -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## +# -- Node labels for pod assignment +# ref: https://kubernetes.io/docs/user-guide/node-selection/ +# nodeSelector: {} -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## +# -- Tolerations for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +# tolerations: {} -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## +# -- Affinity for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} -## Prometheus Service Monitor -## ref: https://github.com/coreos/prometheus-operator -## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint -## +# -- Prometheus Service Monitor +# ref: https://github.com/coreos/prometheus-operator +# https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint +# serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created + # namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped + # interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended + # scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- RelabelConfigs to apply to samples before scraping. + # relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. + # metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint + # honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + # additionalLabels: {} diff --git a/charts/web3signer/README.md b/charts/web3signer/README.md new file mode 100644 index 000000000..40259664b --- /dev/null +++ b/charts/web3signer/README.md @@ -0,0 +1,77 @@ +# web3signer + +![Version: 1.0.1](https://img.shields.io/badge/Version-1.0.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 23.11.0](https://img.shields.io/badge/AppVersion-23.11.0-informational?style=flat-square) + +A Helm chart for installing and configuring Web3signer + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| matilote | | | +| aivarasko | | | + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| file://../common | common | 1.0.1 | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | Affinity for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity | +| cliImage | object | `{"pullPolicy":"IfNotPresent","repository":"nethermindeth/keystores-cli","tag":"v1.0.0"}` | CLI image is used to fetch private keys. | +| enableReloader | bool | `false` | | +| externalSecrets.data | list | `[]` | | +| externalSecrets.enabled | bool | `true` | | +| externalSecrets.secretStoreRef.kind | string | `"secretStoreKind"` | | +| externalSecrets.secretStoreRef.name | string | `"secretStoreName"` | | +| flywayImage | object | `{"pullPolicy":"IfNotPresent","repository":"flyway/flyway","tag":"9.3"}` | Flyawy image is used to apply database migrations | +| fullnameOverride | string | `""` | Provide a name to substitute for the full names of resources | +| global.label | string | `""` | | +| global.podSecurityContext | object | `{"fsGroup":10000,"runAsNonRoot":true,"runAsUser":10000}` | Pod Security Context ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ | +| global.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| global.securityContext.readOnlyRootFilesystem | bool | `true` | | +| global.securityContext.runAsNonRoot | bool | `true` | | +| global.securityContext.runAsUser | int | `10000` | | +| global.serviceAccount.create | bool | `true` | | +| httpPort | int | `6174` | Port on which Web3Signer HTTP listens. | +| idleTimeout | int | `30` | | +| image.pullPolicy | string | `"IfNotPresent"` | | +| image.repository | string | `"consensys/web3signer"` | | +| image.tag | string | `"23.11.0"` | Overrides the image tag whose default is the chart appVersion. | +| imagePullSecrets | list | `[]` | | +| initImage | object | `{"pullPolicy":"IfNotPresent","repository":"busybox","tag":"1.36"}` | Init image is used to chown data volume, etc. | +| loggingLevel | string | `"INFO"` | Sets logging verbosity. Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. | +| metricsPort | int | `9000` | The port (TCP) on which Prometheus accesses metrics | +| nameOverride | string | `""` | Provide a name in place of operator for `app:` labels | +| network | string | `"mainnet"` | Network Options: mainnet, prater, gnosis | +| nodeSelector | object | `{}` | Node labels for pod assignment ref: https://kubernetes.io/docs/user-guide/node-selection/ | +| podAnnotations | object | `{}` | | +| pruningEpochToKeep | int | `100` | Number of epochs to keep when pruning the slashing protection database. | +| pruningInterval | int | `24` | Hours between slashing protection database pruning operations. | +| replicaCount | int | `3` | | +| resources | object | `{}` | Configure resource requests and limits. ref: http://kubernetes.io/docs/user-guide/compute-resources/ | +| service.port | int | `80` | | +| service.type | string | `"ClusterIP"` | | +| serviceAccount | object | `{"annotations":{},"name":""}` | ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ | +| serviceAccount.annotations | object | `{}` | Annotations to add to the service account | +| serviceAccount.name | string | `""` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template | +| serviceMonitor | object | `{"additionalLabels":{},"enabled":false,"honorLabels":false,"interval":"30s","metricRelabelings":[],"namespace":"","relabellings":[],"scrapeTimeout":""}` | Prometheus Service Monitor ref: https://github.com/coreos/prometheus-operator https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint | +| serviceMonitor.additionalLabels | object | `{}` | Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus | +| serviceMonitor.enabled | bool | `false` | Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator | +| serviceMonitor.honorLabels | bool | `false` | Specify honorLabels parameter to add the scrape endpoint | +| serviceMonitor.interval | string | `"30s"` | The interval at which metrics should be scraped | +| serviceMonitor.metricRelabelings | list | `[]` | Metrics RelabelConfigs to apply to samples before ingestion. | +| serviceMonitor.namespace | string | `""` | The namespace in which the ServiceMonitor will be created | +| serviceMonitor.relabellings | list | `[]` | Metrics RelabelConfigs to apply to samples before scraping. | +| serviceMonitor.scrapeTimeout | string | `""` | The timeout after which the scrape is ended | +| slotsPerEpoch | int | `32` | Number of slots per epoch. This number multiplied by the number of epochs to keep determines what blocks to keep when pruning the slashing protection database. The default is 32 as defined on MainNet/Prater. | +| tmpfsSize | string | `"128Mi"` | | +| tolerations | object | `{}` | Tolerations for pod assignment ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ | +| web3signerJavaOpts | string | `"-Xmx1g -Xms1g"` | Java Opts | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.12.0](https://github.com/norwoodj/helm-docs/releases/v1.12.0) diff --git a/charts/web3signer/values.yaml b/charts/web3signer/values.yaml index 58fc6488b..fd4c5ea84 100644 --- a/charts/web3signer/values.yaml +++ b/charts/web3signer/values.yaml @@ -6,9 +6,9 @@ global: label: "" serviceAccount: create: true - ## Pod Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## + # -- Pod Security Context + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + # podSecurityContext: runAsNonRoot: true runAsUser: 10000 @@ -31,18 +31,18 @@ tmpfsSize: 128Mi image: repository: consensys/web3signer pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. + # -- Overrides the image tag whose default is the chart appVersion. tag: "23.11.0" -## Init image is used to chown data volume, etc. -## +# -- Init image is used to chown data volume, etc. +# initImage: repository: busybox tag: "1.36" pullPolicy: IfNotPresent -## CLI image is used to fetch private keys. -## +# -- CLI image is used to fetch private keys. +# cliImage: repository: nethermindeth/keystores-cli tag: "v1.0.0" @@ -64,8 +64,8 @@ externalSecrets: kind: secretStoreKind data: [] -## Flyawy image is used to apply database migrations -## +# -- Flyawy image is used to apply database migrations +# flywayImage: repository: flyway/flyway tag: "9.3" @@ -73,59 +73,59 @@ flywayImage: imagePullSecrets: [] -## Provide a name in place of operator for `app:` labels -## +# -- Provide a name in place of operator for `app:` labels +# nameOverride: "" -## Provide a name to substitute for the full names of resources -## +# -- Provide a name to substitute for the full names of resources +# fullnameOverride: "" -## Web3Signer Settings -## ref: https://docs.web3signer.consensys.net/en/latest/Reference/CLI/CLI-Syntax/ -## +# -- Web3Signer Settings +# ref: https://docs.web3signer.consensys.net/en/latest/Reference/CLI/CLI-Syntax/ +# -## Network -## Options: mainnet, prater, gnosis +# -- Network +# Options: mainnet, prater, gnosis network: "mainnet" -## Sets logging verbosity. -## Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. +# -- Sets logging verbosity. +# Log levels are OFF, FATAL, WARN, INFO, DEBUG, TRACE, ALL. loggingLevel: "INFO" -## Number of epochs to keep when pruning the slashing protection database. -## +# -- Number of epochs to keep when pruning the slashing protection database. +# pruningEpochToKeep: 100 -## Hours between slashing protection database pruning operations. -## +# -- Hours between slashing protection database pruning operations. +# pruningInterval: 24 -## Number of slots per epoch. This number multiplied by the number of epochs to keep determines what blocks to keep when pruning the slashing protection database. -## The default is 32 as defined on MainNet/Prater. -## +# -- Number of slots per epoch. This number multiplied by the number of epochs to keep determines what blocks to keep when pruning the slashing protection database. +# The default is 32 as defined on MainNet/Prater. +# slotsPerEpoch: 32 -## Port on which Web3Signer HTTP listens. -## +# -- Port on which Web3Signer HTTP listens. +# httpPort: 6174 -## The port (TCP) on which Prometheus accesses metrics -## +# -- The port (TCP) on which Prometheus accesses metrics +# metricsPort: 9000 -## Java Opts -## +# -- Java Opts +# web3signerJavaOpts: "-Xmx1g -Xms1g" -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## +# -- ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +# serviceAccount: - # Annotations to add to the service account + # -- Annotations to add to the service account annotations: {} - # The name of the service account to use. + # -- The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" @@ -135,9 +135,9 @@ service: type: ClusterIP port: 80 -## Configure resource requests and limits. -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## +# -- Configure resource requests and limits. +# ref: http://kubernetes.io/docs/user-guide/compute-resources/ +# resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -150,47 +150,47 @@ resources: {} # cpu: 100m # memory: 128Mi -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## +# -- Node labels for pod assignment +# ref: https://kubernetes.io/docs/user-guide/node-selection/ +# nodeSelector: {} -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## +# -- Tolerations for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +# tolerations: {} -## Affinity for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## +# -- Affinity for pod assignment +# ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +# affinity: {} -## Prometheus Service Monitor -## ref: https://github.com/coreos/prometheus-operator -## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint -## +# -- Prometheus Service Monitor +# ref: https://github.com/coreos/prometheus-operator +# https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint +# serviceMonitor: - ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator - ## + # -- Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + # enabled: false - ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created - ## + # -- The namespace in which the ServiceMonitor will be created + # namespace: "" - ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped - ## + # -- The interval at which metrics should be scraped + # interval: 30s - ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended - ## + # -- The timeout after which the scrape is ended + # scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. - ## + # -- Metrics RelabelConfigs to apply to samples before scraping. + # relabellings: [] - ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. - ## + # -- Metrics RelabelConfigs to apply to samples before ingestion. + # metricRelabelings: [] - ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint - ## + # -- Specify honorLabels parameter to add the scrape endpoint + # honorLabels: false - ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus - ## + # -- Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + # additionalLabels: {}