From 300f6b1240c4e915a76b569fcd1cffafcd871562 Mon Sep 17 00:00:00 2001 From: antares-sw <23400824+antares-sw@users.noreply.github.com> Date: Fri, 10 Nov 2023 12:27:49 +0300 Subject: [PATCH] Add Lodestar (#463) * Init Lodestar * init validator * lodestar * Init Lodestar * Bump version * Update charts/lodestar-validator/README.md Co-authored-by: Dmitri Tsumak Signed-off-by: antares-sw <23400824+antares-sw@users.noreply.github.com> * Fix * fix readme --------- Signed-off-by: antares-sw <23400824+antares-sw@users.noreply.github.com> Co-authored-by: Dmitri Tsumak --- charts/lodestar-validator/.helmignore | 23 + charts/lodestar-validator/Chart.lock | 6 + charts/lodestar-validator/Chart.yaml | 29 ++ charts/lodestar-validator/README.md | 40 ++ .../templates/prometheusrules.yaml | 23 + .../lodestar-validator/templates/service.yaml | 15 + .../templates/serviceaccount.yaml | 12 + .../templates/servicemonitor.yaml | 42 ++ .../templates/statefulset.yaml | 136 ++++++ charts/lodestar-validator/values.yaml | 215 ++++++++++ charts/lodestar/.helmignore | 23 + charts/lodestar/Chart.lock | 6 + charts/lodestar/Chart.yaml | 24 ++ charts/lodestar/templates/_helpers.yaml | 3 + charts/lodestar/templates/clusterrole.yaml | 10 + .../templates/clusterrolebinding.yaml | 16 + charts/lodestar/templates/configmap.yaml | 12 + charts/lodestar/templates/pdb.yaml | 18 + .../lodestar/templates/prometheusrules.yaml | 51 +++ charts/lodestar/templates/role.yaml | 10 + charts/lodestar/templates/rolebinding.yaml | 15 + charts/lodestar/templates/secret.yaml | 9 + charts/lodestar/templates/service-p2p.yaml | 42 ++ charts/lodestar/templates/service.yaml | 33 ++ charts/lodestar/templates/serviceaccount.yaml | 12 + charts/lodestar/templates/servicemonitor.yaml | 42 ++ charts/lodestar/templates/statefulset.yaml | 250 +++++++++++ .../templates/tests/test-connection.yaml | 15 + charts/lodestar/templates/validate.yaml | 8 + charts/lodestar/templates/vpa.yaml | 20 + charts/lodestar/values.yaml | 397 ++++++++++++++++++ 31 files changed, 1557 insertions(+) create mode 100644 charts/lodestar-validator/.helmignore create mode 100644 charts/lodestar-validator/Chart.lock create mode 100644 charts/lodestar-validator/Chart.yaml create mode 100644 charts/lodestar-validator/README.md create mode 100644 charts/lodestar-validator/templates/prometheusrules.yaml create mode 100644 charts/lodestar-validator/templates/service.yaml create mode 100644 charts/lodestar-validator/templates/serviceaccount.yaml create mode 100644 charts/lodestar-validator/templates/servicemonitor.yaml create mode 100644 charts/lodestar-validator/templates/statefulset.yaml create mode 100644 charts/lodestar-validator/values.yaml create mode 100644 charts/lodestar/.helmignore create mode 100644 charts/lodestar/Chart.lock create mode 100644 charts/lodestar/Chart.yaml create mode 100644 charts/lodestar/templates/_helpers.yaml create mode 100644 charts/lodestar/templates/clusterrole.yaml create mode 100644 charts/lodestar/templates/clusterrolebinding.yaml create mode 100644 charts/lodestar/templates/configmap.yaml create mode 100644 charts/lodestar/templates/pdb.yaml create mode 100644 charts/lodestar/templates/prometheusrules.yaml create mode 100644 charts/lodestar/templates/role.yaml create mode 100644 charts/lodestar/templates/rolebinding.yaml create mode 100644 charts/lodestar/templates/secret.yaml create mode 100644 charts/lodestar/templates/service-p2p.yaml create mode 100644 charts/lodestar/templates/service.yaml create mode 100644 charts/lodestar/templates/serviceaccount.yaml create mode 100644 charts/lodestar/templates/servicemonitor.yaml create mode 100644 charts/lodestar/templates/statefulset.yaml create mode 100644 charts/lodestar/templates/tests/test-connection.yaml create mode 100644 charts/lodestar/templates/validate.yaml create mode 100644 charts/lodestar/templates/vpa.yaml create mode 100644 charts/lodestar/values.yaml diff --git a/charts/lodestar-validator/.helmignore b/charts/lodestar-validator/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/lodestar-validator/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/lodestar-validator/Chart.lock b/charts/lodestar-validator/Chart.lock new file mode 100644 index 000000000..bf23e24c4 --- /dev/null +++ b/charts/lodestar-validator/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.stakewise.io/ + version: 1.0.0 +digest: sha256:a52d823dcd535c64eafc9df56fe41455c602032e084b8adcaa34e536451d2ab2 +generated: "2023-04-03T10:46:51.136024+03:00" diff --git a/charts/lodestar-validator/Chart.yaml b/charts/lodestar-validator/Chart.yaml new file mode 100644 index 000000000..e85503cbf --- /dev/null +++ b/charts/lodestar-validator/Chart.yaml @@ -0,0 +1,29 @@ +apiVersion: v2 +name: lodestar-validator +description: A Lodestar Validator Chart + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 1.0.2 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "v1.12.0" + +dependencies: +- name: common + repository: https://charts.stakewise.io/ + version: 1.x.x diff --git a/charts/lodestar-validator/README.md b/charts/lodestar-validator/README.md new file mode 100644 index 000000000..f359c2b99 --- /dev/null +++ b/charts/lodestar-validator/README.md @@ -0,0 +1,40 @@ +# Lodestar Validator Setup + +This guide will walk you through creating a new Lodestar Validator, loading an existing keystore to it, and creating a Kubernetes secret to store your keystore securely. + +## Prerequisites + +* Lodestar Validator installed on your local machine or a remote server. +* Kubernetes CLI (kubectl) installed and configured to access your cluster. +* Access to a running Kubernetes cluster. + +## Creating secrets with keystores and password + +> When setting up a Lodestar Validator and creating a Kubernetes secret to store your keystores and passwords, it's important to name the secret files consistently. This is because the Lodestar Validator expects a specific naming pattern in order to find and use your keystore. +> +> The naming pattern for the Kubernetes secret should be `keystore-{index}`, index represents kubernetes replica, if `replicasCount=2` you must create two secrets `keystore-0` and `keystore-1`. This ensures that the Lodestar Validator can find the correct secret when it looks for your keystore. +> +> By following these naming conventions, you can ensure that your Lodestar Validator is set up correctly and ready to use. + +1. Create secrets with keystores and passswords. This will create a new secrets with keystore files and a password. If you have more then 100 keystores it's better to split it in multiple replicas, for this create multiple secrets with 100 keystores in each and load it to Kubernetes as secrets. You can do this with the following command: + +```javascript +kubectl create secret generic keystore-0 --from-file=/path/to/my-wallet/keystores-0 --from-file=/path/to/my-wallet/keystore-0.txt +kubectl create secret generic keystore-1 --from-file=/path/to/my-wallet/keystores-1 --from-file=/path/to/my-wallet/keystore-1.txt +``` + +> `/path/to/my-wallet/keystores-0` expect keystores with name `keystore*.json` and `/path/to/my-wallet/keystore-0.txt` plain txt with password for these keystores. + +This will create a new Kubernetes secrets named `keystore-0` and `keystore-1` that contains your keystore files and passwords. + +## Deploy Lodestar Validator + +```bash +helm repo add stakewise https://charts.stakewise.io +helm repo update +helm upgrade --install lodestar-validator stakewise/lodestar-validator \ + --namespace validators \ + --create-namespace \ + --set global.network="goerli" \ + --set replicaCount=2 +``` diff --git a/charts/lodestar-validator/templates/prometheusrules.yaml b/charts/lodestar-validator/templates/prometheusrules.yaml new file mode 100644 index 000000000..e02c3f28f --- /dev/null +++ b/charts/lodestar-validator/templates/prometheusrules.yaml @@ -0,0 +1,23 @@ +{{- if and (or .Values.global.metrics.enabled .Values.metrics.enabled) + (or .Values.global.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ include "common.names.fullname" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} +{{- end }} diff --git a/charts/lodestar-validator/templates/service.yaml b/charts/lodestar-validator/templates/service.yaml new file mode 100644 index 000000000..33f89a63e --- /dev/null +++ b/charts/lodestar-validator/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.metrics.port }} + targetPort: metrics + protocol: TCP + name: metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/charts/lodestar-validator/templates/serviceaccount.yaml b/charts/lodestar-validator/templates/serviceaccount.yaml new file mode 100644 index 000000000..cb2edd11f --- /dev/null +++ b/charts/lodestar-validator/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.global.serviceAccount.create .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "common.names.serviceAccountName" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/lodestar-validator/templates/servicemonitor.yaml b/charts/lodestar-validator/templates/servicemonitor.yaml new file mode 100644 index 000000000..9ecffc6ca --- /dev/null +++ b/charts/lodestar-validator/templates/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if and (or .Values.global.metrics.enabled .Values.metrics.enabled) + (or .Values.global.metrics.serviceMonitor.enabled .Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + path: /metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/lodestar-validator/templates/statefulset.yaml b/charts/lodestar-validator/templates/statefulset.yaml new file mode 100644 index 000000000..f22b847e8 --- /dev/null +++ b/charts/lodestar-validator/templates/statefulset.yaml @@ -0,0 +1,136 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.statefulset" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + podManagementPolicy: "Parallel" + serviceName: {{ include "common.names.fullname" . }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "common.labels.matchLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "common.names.serviceAccountName" . }} + {{- with .Values.securityContext }} + securityContext: + {{ toYaml . | nindent 8 | trim }} + {{- end }} + initContainers: + {{- if and .Values.persistence.enabled .Values.initChownData }} + - name: init-chown + image: "{{ .Values.initImage.registry }}/{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }}", "/data"] + volumeMounts: + - name: data + mountPath: /data + {{- end }} + - name: import + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy }} + command: + - sh + - -c + - > + INDEX=$((${HOSTNAME##*-})); + node /usr/app/packages/cli/bin/lodestar validator import --dataDir=/data --importKeystores=/keystore-${INDEX} --importKeystoresPassword=/keystore-${INDEX}/keystore-${INDEX}.txt + volumeMounts: + - name: data + mountPath: /data + {{- range $i := until (int .Values.replicaCount) }} + - name: keystore-{{ . }} + mountPath: /keystore-{{ . }} + readOnly: true + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - sh + - -c + - > + node /usr/app/packages/cli/bin/lodestar + validator + --network={{ .Values.global.network }} + --dataDir=/data + --beaconNodes={{ .Values.global.beaconNodes }} + --metrics + --metrics.address={{ .Values.metrics.address }} + --metrics.port={{ .Values.metrics.port }} + {{- range .Values.extraFlags }} + {{ . }} + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: data + mountPath: /data + {{- range $i := until (int .Values.replicaCount) }} + - name: keystore-{{ . }} + mountPath: /keystore-{{ . }} + readOnly: true + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + {{- range $i := until (int .Values.replicaCount) }} + - name: keystore-{{ . }} + secret: + {{- if $.Values.global.keystoreSecretName }} + secretName: {{ $.Values.global.keystoreSecretName }}-{{ . }} + {{- else }} + secretName: keystore-{{ . }} + {{- end }} + {{- end }} + {{- if (not .Values.persistence.enabled) }} + - name: data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: + {{- include "common.labels.statefulset" . | nindent 10 }} + {{- with .Values.persistence.annotations }} + annotations: + {{ toYaml . | nindent 10 | trim }} + {{- end }} + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- end }} \ No newline at end of file diff --git a/charts/lodestar-validator/values.yaml b/charts/lodestar-validator/values.yaml new file mode 100644 index 000000000..5eea86f31 --- /dev/null +++ b/charts/lodestar-validator/values.yaml @@ -0,0 +1,215 @@ +# Default values for lodestar-validator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + ## Network ID + ## + network: mainnet + + ## If keystoreSecretName is not specified, the keystore files will be loaded from secrets + ## with the default pattern keystore-{index}. + ## For example, if keystoreSecretName is not specified and you are using replicaCount 1, + ## the keystore files will be loaded from the keystore-0 secret. + ## If keystoreSecretName is specified, the keystore files will be loaded from secrets + ## with the pattern {keystoreSecretName}-{index}. + ## For example, if you set keystoreSecretName to my-keystore, + ## the keystore files for validator replicaCount 1 will be loaded from the my-keystore-0 secret. + keystoreSecretName: "" + + ## Beacon nodes REST API provider endpoint + beaconNodes: "" + + ## Credentials to fetch images from private registry + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + + ## Service account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## Additional settings could be made in non-global section. + ## + serviceAccount: + # Specifies whether a service account should be created + create: true + + ## Monitoring + ## Additional settings could be made in non-global section. + ## + metrics: + ## Whether to enable metrics collection or not + ## + enabled: true + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + + ## Configure liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## NB! readinessProbe and livenessProbe must be disabled before fully synced + ## Additional settings could be made in non-global section. + ## + livenessProbe: + enabled: true + + readinessProbe: + enabled: true + +replicaCount: 1 + +## Extra flags for Lodestar validator +## +## Example: +## extraFlags: +## --graffiti="StakeWise" +extraFlags: [] + +## Init image is used to chown data volume, initialise genesis, etc. +## +initImage: + registry: "docker.io" + repository: "busybox" + tag: "1.36" + pullPolicy: IfNotPresent + +image: + registry: "docker.io" + repository: "chainsafe/lodestar" + # Overrides the image tag whose default is the chart appVersion. + tag: "v1.12.0" + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "lstar-validator" + +serviceAccount: + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +securityContext: {} + +service: + type: ClusterIP + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: {} + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +## Example: +## affinity: +## podAntiAffinity: +## requiredDuringSchedulingIgnoredDuringExecution: +## - labelSelector: +## matchExpressions: +## - key: app.kubernetes.io/name +## operator: In +## values: +## - lodestar-validator +## topologyKey: kubernetes.io/hostname +## +affinity: {} + +## If false, data ownership will not be reset at startup +## This allows the node to be run with an arbitrary user +## +initChownData: false + +## Whether or not to allocate persistent volume disk for the data directory. +## In case of pod failure, the pod data directory will still persist. +## +persistence: + enabled: true + storageClassName: "" + accessModes: + - ReadWriteOnce + size: 5Gi + annotations: {} + +## Monitoring +## +metrics: + address: 0.0.0.0 + ## Metrics port to expose metrics for Prometheus + ## + port: 8008 + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## The interval at which metrics should be scraped + ## + interval: 30s + ## The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## The namespace in which the prometheusRule will be created + ## + namespace: "" + ## Additional labels for the prometheusRule + ## + additionalLabels: {} + ## Custom Prometheus rules + ## + rules: [] diff --git a/charts/lodestar/.helmignore b/charts/lodestar/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/lodestar/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/lodestar/Chart.lock b/charts/lodestar/Chart.lock new file mode 100644 index 000000000..02254376e --- /dev/null +++ b/charts/lodestar/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.stakewise.io/ + version: 1.0.0 +digest: sha256:a52d823dcd535c64eafc9df56fe41455c602032e084b8adcaa34e536451d2ab2 +generated: "2023-01-09T12:36:53.27952+04:00" diff --git a/charts/lodestar/Chart.yaml b/charts/lodestar/Chart.yaml new file mode 100644 index 000000000..a240c99a1 --- /dev/null +++ b/charts/lodestar/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: lodestar +version: 1.0.2 +kubeVersion: "^1.20.0-0" +description: Rust Ethereum 2.0 Client. +type: application +keywords: + - ethereum + - blockchain + - lodestar + - p2p +home: https://www.ethereum.org/ +sources: + - https://github.com/ChainSafe/lodestar +maintainers: + - name: Dmitri Tsumak + email: dmitri@stakewise.io +icon: https://raw.githubusercontent.com/ethereum/ethereum-org/master/public/images/logos/ETHEREUM-ICON_Black.png +appVersion: v1.12.0 + +dependencies: +- name: common + repository: https://charts.stakewise.io/ + version: 1.x.x diff --git a/charts/lodestar/templates/_helpers.yaml b/charts/lodestar/templates/_helpers.yaml new file mode 100644 index 000000000..5379e51e7 --- /dev/null +++ b/charts/lodestar/templates/_helpers.yaml @@ -0,0 +1,3 @@ +{{- define "lodestar.p2pPort" -}} +{{- printf "9000" -}} +{{- end -}} diff --git a/charts/lodestar/templates/clusterrole.yaml b/charts/lodestar/templates/clusterrole.yaml new file mode 100644 index 000000000..6d7136bd1 --- /dev/null +++ b/charts/lodestar/templates/clusterrole.yaml @@ -0,0 +1,10 @@ +{{- if or .Values.global.rbac.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "common.names.clusterRoleName" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +rules: +{{- toYaml .Values.rbac.clusterRules | nindent 0 }} +{{- end }} diff --git a/charts/lodestar/templates/clusterrolebinding.yaml b/charts/lodestar/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..87d636dac --- /dev/null +++ b/charts/lodestar/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if or .Values.global.rbac.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "common.names.clusterRoleName" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "common.names.clusterRoleName" . }} +subjects: + - kind: ServiceAccount + name: {{ include "common.names.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/lodestar/templates/configmap.yaml b/charts/lodestar/templates/configmap.yaml new file mode 100644 index 000000000..e6da57c0a --- /dev/null +++ b/charts/lodestar/templates/configmap.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +data: + execution-endpoints.txt: |- +{{- range $i, $e := .Values.global.executionEndpoints }} + {{ . }} +{{- end }} diff --git a/charts/lodestar/templates/pdb.yaml b/charts/lodestar/templates/pdb.yaml new file mode 100644 index 000000000..4ef63f0ff --- /dev/null +++ b/charts/lodestar/templates/pdb.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +spec: +{{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/lodestar/templates/prometheusrules.yaml b/charts/lodestar/templates/prometheusrules.yaml new file mode 100644 index 000000000..f07045f43 --- /dev/null +++ b/charts/lodestar/templates/prometheusrules.yaml @@ -0,0 +1,51 @@ +{{- if and (or .Values.global.metrics.enabled .Values.metrics.enabled) + (or .Values.global.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml .Values.metrics.prometheusRule.additionalLabels | nindent 4 }} + {{- end }} +spec: + groups: + {{- with .Values.metrics.prometheusRule.rules }} + - name: {{ include "common.names.fullname" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.default }} + - name: {{ include "common.names.fullname" $ }}-default + rules: + - alert: LodestarBeaconNodeDown + expr: up{job='{{ include "common.names.fullname" . }}'} == 0 + for: 1m + labels: + severity: critical + annotations: + summary: Lodestar beacon node is down + description: Check {{ printf "{{ $labels.pod }}" }} beacon node in namespace {{ printf "{{ $labels.namespace }}" }} + - alert: LodestarBeaconNodeIsNotConnectedToEth1Node + expr: sync_eth1_connected{job='{{ include "common.names.fullname" . }}'} == 0 + for: 1m + labels: + severity: critical + annotations: + summary: Lodestar beacon node is not connected to eth1 node + description: Check {{ printf "{{ $labels.pod }}" }} beacon node in namespace {{ printf "{{ $labels.namespace }}" }} + - alert: LodestarBeaconNodeIsOutOfSync + expr: sync_eth2_synced{job='{{ include "common.names.fullname" . }}'} == 0 + for: 3m + labels: + severity: critical + annotations: + summary: Lodestar beacon node is out of sync + description: Check {{ printf "{{ $labels.pod }}" }} beacon node in namespace {{ printf "{{ $labels.namespace }}" }} + {{- end }} +{{- end }} diff --git a/charts/lodestar/templates/role.yaml b/charts/lodestar/templates/role.yaml new file mode 100644 index 000000000..dfbc0fe48 --- /dev/null +++ b/charts/lodestar/templates/role.yaml @@ -0,0 +1,10 @@ +{{- if or .Values.global.rbac.create .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +rules: +{{- toYaml .Values.rbac.rules | nindent 0 }} +{{- end }} diff --git a/charts/lodestar/templates/rolebinding.yaml b/charts/lodestar/templates/rolebinding.yaml new file mode 100644 index 000000000..1fdb617ba --- /dev/null +++ b/charts/lodestar/templates/rolebinding.yaml @@ -0,0 +1,15 @@ +{{- if or .Values.global.rbac.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "common.names.serviceAccountName" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "common.names.serviceAccountName" . }} +subjects: + - kind: ServiceAccount + name: {{ include "common.names.serviceAccountName" . }} +{{- end }} diff --git a/charts/lodestar/templates/secret.yaml b/charts/lodestar/templates/secret.yaml new file mode 100644 index 000000000..5a3f66bb5 --- /dev/null +++ b/charts/lodestar/templates/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +type: Opaque +data: + jwtsecret: {{ .Values.global.JWTSecret | b64enc | quote }} diff --git a/charts/lodestar/templates/service-p2p.yaml b/charts/lodestar/templates/service-p2p.yaml new file mode 100644 index 000000000..21a9c9987 --- /dev/null +++ b/charts/lodestar/templates/service-p2p.yaml @@ -0,0 +1,42 @@ +{{- if .Values.p2pNodePort.enabled -}} +{{- range $i, $e := until (len .Values.global.executionEndpoints) }} +{{- $port := add $.Values.p2pNodePort.startAt $i -}} +{{- if hasKey $.Values.p2pNodePort.replicaToNodePort ($i | toString) -}} + {{ $port = index $.Values.p2pNodePort.replicaToNodePort ($i | toString) }} +{{- end }} + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" $ }}-{{ $i }} + labels: + {{- include "common.labels.standard" $ | nindent 4 }} + pod: "{{ include "common.names.fullname" $ }}-{{ $i }}" + type: p2p + {{- with $.Values.p2pNodePort.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ $.Values.p2pNodePort.type }} + externalTrafficPolicy: Local + ports: + - name: p2p-tcp + port: {{ include "lodestar.p2pPort" $ }} + protocol: TCP + targetPort: p2p-tcp + nodePort: {{ $port }} + {{- if eq $.Values.p2pNodePort.type "NodePort" }} + - name: p2p-udp + port: {{ include "lodestar.p2pPort" $ }} + protocol: UDP + targetPort: p2p-udp + nodePort: {{ $port }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" $ | nindent 4 }} + statefulset.kubernetes.io/pod-name: "{{ include "common.names.fullname" $ }}-{{ $i }}" + +{{- end }} +{{- end }} diff --git a/charts/lodestar/templates/service.yaml b/charts/lodestar/templates/service.yaml new file mode 100644 index 000000000..8ce26a7b6 --- /dev/null +++ b/charts/lodestar/templates/service.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- if and .Values.global.metrics.enabled .Values.metrics.svcAnnotations }} + annotations: + {{ toYaml .Values.metrics.svcAnnotations | nindent 4 | trim }} +{{- end }} +spec: + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} +{{- if .Values.sessionAffinity.enabled }} + sessionAffinity: ClientIP + sessionAffinityConfig: + clientIP: + timeoutSeconds: {{ .Values.sessionAffinity.timeoutSeconds }} +{{- end }} + type: ClusterIP +{{- if .Values.svcHeadless }} + clusterIP: None +{{- end }} + ports: + - name: {{ .Values.rest.portName }} + port: {{ .Values.rest.port }} + targetPort: {{ .Values.rest.portName }} + {{- if .Values.global.metrics.enabled }} + - name: metrics + port: {{ .Values.metrics.port }} + targetPort: metrics + {{- end }} diff --git a/charts/lodestar/templates/serviceaccount.yaml b/charts/lodestar/templates/serviceaccount.yaml new file mode 100644 index 000000000..cb2edd11f --- /dev/null +++ b/charts/lodestar/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if or .Values.global.serviceAccount.create .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "common.names.serviceAccountName" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/lodestar/templates/servicemonitor.yaml b/charts/lodestar/templates/servicemonitor.yaml new file mode 100644 index 000000000..9ecffc6ca --- /dev/null +++ b/charts/lodestar/templates/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if and (or .Values.global.metrics.enabled .Values.metrics.enabled) + (or .Values.global.metrics.serviceMonitor.enabled .Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: metrics + path: /metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/lodestar/templates/statefulset.yaml b/charts/lodestar/templates/statefulset.yaml new file mode 100644 index 000000000..9aeae392a --- /dev/null +++ b/charts/lodestar/templates/statefulset.yaml @@ -0,0 +1,250 @@ +--- +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.statefulset" . | nindent 4 }} +spec: + replicas: {{ len .Values.global.executionEndpoints }} + podManagementPolicy: "Parallel" + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + serviceName: {{ include "common.names.fullname" . }} + template: + metadata: + labels: + {{- include "common.labels.matchLabels" . | nindent 8 }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with (concat .Values.imagePullSecrets .Values.global.imagePullSecrets) }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{ toYaml . | nindent 8 | trim }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{ toYaml . | nindent 8 | trim }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{ toYaml . | nindent 8 | trim }} + {{- end }} + {{- with .Values.securityContext }} + securityContext: + {{ toYaml . | nindent 8 | trim }} + {{- end }} + serviceAccountName: {{ include "common.names.serviceAccountName" . }} + priorityClassName: {{ .Values.priorityClassName | quote }} + initContainers: + - name: init + image: "{{ .Values.initImage.registry }}/{{ .Values.initImage.repository }}:{{ .Values.initImage.tag }}" + imagePullPolicy: {{ .Values.initImage.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - sh + - -c + - > + echo "Namespace: ${POD_NAMESPACE} Pod: ${POD_NAME}"; + {{- if .Values.p2pNodePort.enabled }} + {{- if eq .Values.p2pNodePort.type "LoadBalancer" }} + until [ -n "$(kubectl -n ${POD_NAMESPACE} get svc/${POD_NAME} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" ]; do echo "Waiting for load balancer gets an IP" && sleep 10; done; + export EXTERNAL_PORT=$(kubectl -n ${POD_NAMESPACE} get services -l "pod in (${POD_NAME}), type in (p2p)" -o jsonpath='{.items[0].spec.ports[0].nodePort}'); + export EXTERNAL_IP=$(kubectl -n ${POD_NAMESPACE} get svc/${POD_NAME} -o jsonpath='{.status.loadBalancer.ingress[0].ip}'); + {{- else }} + export EXTERNAL_PORT=$(kubectl get services -l "pod in (${POD_NAME}), type in (p2p)" -o jsonpath='{.items[0].spec.ports[0].nodePort}'); + export EXTERNAL_IP=$(kubectl get nodes "${NODE_NAME}" -o jsonpath='{.status.addresses[?(@.type=="ExternalIP")].address}'); + {{- end }} + echo "EXTERNAL_PORT=$EXTERNAL_PORT" > /env/init-nodeport; + echo "EXTERNAL_IP=$EXTERNAL_IP" >> /env/init-nodeport; + cat /env/init-nodeport; + {{- end }} + {{- if and .Values.persistence.enabled .Values.initChownData }} + mkdir -p /data && chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsUser }} /data; + {{- end }} + INDEX=$((${HOSTNAME##*-}+1)); + EE=$(sed "${INDEX}q;d" /configs/execution-endpoints.txt); + echo ${EE} > /data/ee.txt; + echo "Pod will connect to the ${EE} endpoint"; + volumeMounts: + - name: env-nodeport + mountPath: /env + - name: data + mountPath: /data + - name: configs + mountPath: /configs + containers: + - name: {{ .Chart.Name }} + image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - sh + - -ac + - > + {{- if .Values.p2pNodePort.enabled }} + . /env/init-nodeport; + {{- end }} + EE=`cat /data/ee.txt`; + echo "Pod will connect to the ${EE} endpoint"; + exec node /usr/app/packages/cli/bin/lodestar + beacon + {{- if .Values.rest.enabled }} + --rest + --rest.port={{ .Values.rest.port }} + --rest.address={{ .Values.rest.address }} + --rest.cors={{ .Values.rest.cors }} + {{- end}} + --eth1 + --execution.urls=${EE} + --jwt-secret=/secret/jwtsecret + --dataDir=/data + --network={{ .Values.global.network }} + {{- if .Values.checkpointSyncUrl }} + --checkpointSyncUrl={{ .Values.checkpointSyncUrl }} + {{- end}} + --targetPeers={{ .Values.targetPeers }} + --discoveryPort={{ include "lodestar.p2pPort" . }} + {{- if .Values.p2pNodePort.enabled }} + --enr.ip=$EXTERNAL_IP + --enr.tcp=$EXTERNAL_PORT + --enr.udp=$EXTERNAL_PORT + {{- else }} + --enr.ip=$(POD_IP) + --enr.tcp={{ include "lodestar.p2pPort" . }} + --enr.udp={{ include "lodestar.p2pPort" . }} + {{- end }} + {{- if .Values.global.metrics.enabled }} + {{- range .Values.metrics.flags }} + {{ . }} + {{- end }} + {{- end }} + {{- range .Values.extraFlags }} + {{ . }} + {{- end }} + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + ports: + - containerPort: {{ .Values.rest.port }} + protocol: TCP + name: {{ .Values.rest.portName }} + {{- if .Values.global.metrics.enabled }} + - containerPort: {{ .Values.metrics.port }} + name: metrics + protocol: TCP + {{- end }} + {{- if .Values.p2pNodePort.enabled }} + - name: p2p-tcp + containerPort: {{ include "lodestar.p2pPort" . }} + protocol: TCP + - name: p2p-udp + containerPort: {{ include "lodestar.p2pPort" . }} + protocol: UDP + {{- end }} + volumeMounts: + - name: data + mountPath: /data + - name: jwtsecret + mountPath: /secret + readOnly: true + - name: env-nodeport + mountPath: /env + {{- with .Values.resources }} + resources: + {{ toYaml . | nindent 12 | trim }} + {{- end }} + - name: sidecar + image: "{{ .Values.sidecar.registry }}/{{ .Values.sidecar.repository }}:{{ .Values.sidecar.tag }}" + imagePullPolicy: {{ .Values.sidecar.pullPolicy }} + env: + - name: SERVER_BINDADDR + value: "{{ .Values.sidecar.bindAddr }}:{{ .Values.sidecar.bindPort }}" + - name: CLIENT_PORT + value: {{ .Values.rest.port | quote }} + ports: + - containerPort: {{ .Values.sidecar.bindPort }} + name: sidecar + protocol: TCP + {{- if or .Values.global.livenessProbe.enabled .Values.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + httpGet: + path: {{ .Values.livenessProbe.httpGet.path }} + port: {{ .Values.livenessProbe.httpGet.port }} + scheme: {{ .Values.livenessProbe.httpGet.scheme }} + {{- end }} + {{- if or .Values.global.readinessProbe.enabled .Values.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + httpGet: + path: {{ .Values.readinessProbe.httpGet.path }} + port: {{ .Values.readinessProbe.httpGet.port }} + scheme: {{ .Values.readinessProbe.httpGet.scheme }} + {{- end }} + volumes: + - name: jwtsecret + secret: + secretName: {{ include "common.names.fullname" . }} + - name: env-nodeport + emptyDir: {} + - name: configs + configMap: + name: {{ include "common.names.fullname" . }} + {{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} +{{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + labels: + {{- include "common.labels.statefulset" . | nindent 10 }} + {{- with .Values.persistence.annotations }} + annotations: + {{ toYaml . | nindent 10 | trim }} + {{- end }} + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end }} diff --git a/charts/lodestar/templates/tests/test-connection.yaml b/charts/lodestar/templates/tests/test-connection.yaml new file mode 100644 index 000000000..7957672d5 --- /dev/null +++ b/charts/lodestar/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "common.names.fullname" . }}-test-connection" + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "common.names.fullname" . }}:{{ .Values.rest.port }}'] + restartPolicy: Never diff --git a/charts/lodestar/templates/validate.yaml b/charts/lodestar/templates/validate.yaml new file mode 100644 index 000000000..75d40a4fd --- /dev/null +++ b/charts/lodestar/templates/validate.yaml @@ -0,0 +1,8 @@ +{{- if not .Values.global.JWTSecret }} +{{- fail ".Values.global.JWTSecret is required" }} +{{- end }} + +{{- $endpoints := uniq .Values.global.executionEndpoints -}} +{{- if lt (len $endpoints) (len .Values.global.executionEndpoints)}} +{{- fail ".Values.global.executionClients must only contain unique values, since each consensus client must be connected to a unique execution client." }} +{{- end }} diff --git a/charts/lodestar/templates/vpa.yaml b/charts/lodestar/templates/vpa.yaml new file mode 100644 index 000000000..8b579cbcb --- /dev/null +++ b/charts/lodestar/templates/vpa.yaml @@ -0,0 +1,20 @@ +{{- if .Values.verticalAutoscaler.enabled }} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: StatefulSet + name: {{ include "common.names.fullname" . }} + updatePolicy: + updateMode: {{ .Values.verticalAutoscaler.updateMode | default "Off" | quote }} + {{- if .Values.verticalAutoscaler.containerPolicies }} + resourcePolicy: + containerPolicies: + {{ tpl .Values.verticalAutoscaler.containerPolicies . | nindent 6 | trim }} + {{- end }} +{{- end }} diff --git a/charts/lodestar/values.yaml b/charts/lodestar/values.yaml new file mode 100644 index 000000000..ddfcd1943 --- /dev/null +++ b/charts/lodestar/values.yaml @@ -0,0 +1,397 @@ +# Default values for Lodestar. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + ## Eth2 network ID + ## + network: mainnet + + ## JSON Web Token (JWT) authentication is used to secure the communication + ## between the beacon node and execution client. You can generate a JWT using + ## a command line tool, for example: + ## openssl rand -hex 32 > token.txt + ## + JWTSecret: "" + + ## Server endpoints for an execution layer jwt authenticated HTTP JSON-RPC connection. + ## Uses the same endpoint to populate the deposit cache. + ## A separate Statefulset will be created for each specified address + ## + ## !!!!! WARNING !!!!! + ## NEVER CHANGE THE ORDER OF ENDPOINTS AS THIS MAY BREAK + ## THE CONSENSUS AND EXECTION CLIENTS CONNECTIVITY + executionEndpoints: [] + + ## Credentials to fetch images from private registry + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + + ## Service account + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## Additional settings could be made in non-global section. + ## + serviceAccount: + # Specifies whether a service account should be created + create: true + + ## RBAC configuration. + ## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ + ## Additional settings could be made in non-global section. + ## + rbac: + ## Specifies whether RBAC resources are to be created + ## + create: true + + ## Monitoring + ## Additional settings could be made in non-global section. + ## + metrics: + ## Whether to enable metrics collection or not + ## + enabled: true + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator + ## + enabled: false + + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator + ## + enabled: false + + ## Configure liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## NB! readinessProbe and livenessProbe must be disabled before fully synced + ## Additional settings could be made in non-global section. + ## + livenessProbe: + enabled: true + + readinessProbe: + enabled: true + +## Init image is used to chown data volume, initialise genesis, etc. +## +initImage: + registry: "docker.io" + repository: "bitnami/kubectl" + tag: "1.24" + pullPolicy: IfNotPresent + +## Sidecar image is used to perform Liveness/Readiness probes. +## +sidecar: + registry: "europe-west4-docker.pkg.dev" + repository: "stakewiselabs/public/ethnode-sidecar" + tag: "v1.0.6" + pullPolicy: IfNotPresent + bindAddr: "0.0.0.0" + bindPort: 3000 + +## Configuration for Lodestar +## ref: https://chainsafe.github.io/lodestar/ +## + +## Lodestar image version +## ref: https://hub.docker.com/r/chainsafe/lodestar +image: + registry: "docker.io" + repository: "chainsafe/lodestar" + tag: "v1.12.0" + pullPolicy: IfNotPresent + +## Credentials to fetch images from private registry +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +## +imagePullSecrets: [] + +## Provide a name in place of Lodestar for `app:` labels +## +nameOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## Rest API Settings +## +rest: + # Enables Beacon Rest API + enabled: true + # Port of the REST server + port: "5052" + # Port name for the respective k8s service + portName: "http" + # Listening address of the REST server + address: "0.0.0.0" + # Access-Control-Allow-Origin response HTTP header + cors: "*" + +## If set, Lodestar will perform a checkpoint sync. For more information see here: https://lodestar-book.sigmaprime.io/checkpoint-sync.html +## Respective public checkpoint sync endpoints can be found here: https://eth-clients.github.io/checkpoint-sync-endpoints/ +## It is not recommended to blindly trust any public beacon node. +## Therefore please verify that you are on the correct chain: https://notes.ethereum.org/@launchpad/checkpoint-sync#1-Obtaining-finalized-checkpoint-amp-state-root +## +checkpointSyncUrl: "" + +## The target number of peers. +## +targetPeers: 80 + +## Extra flags for lodestar beacon chain node +## +extraFlags: [] + +## Extra annotations for StatefulSet pods +podAnnotations: {} + +## Service account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +## Additional labels for all resources +## +additionalLabels: + client-type: "consensus" + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + fsGroup: 1001 + runAsUser: 1001 + +## RBAC configuration. +## ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ +## +rbac: + # The name of the cluster role to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + ## Required ClusterRole rules + ## + clusterRules: + ## Required to obtain the nodes external IP + ## + - apiGroups: [""] + resources: + - "nodes" + verbs: + - "get" + - "list" + - "watch" + ## Required Role rules + ## + rules: + ## Required to get information about the serices nodePort. + ## + - apiGroups: [""] + resources: + - "services" + verbs: + - "get" + - "list" + - "watch" + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: {} + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +## Example: +## affinity: +## podAntiAffinity: +## requiredDuringSchedulingIgnoredDuringExecution: +## - labelSelector: +## matchExpressions: +## - key: app.kubernetes.io/name +## operator: In +## values: +## - lodestar +## topologyKey: kubernetes.io/hostname +## +affinity: {} + +## Used to assign priority to pods +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Enable pod disruption budget +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb +## +podDisruptionBudget: + enabled: true + maxUnavailable: 1 + +## Vertical Pod Autoscaler config +## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler +## +verticalAutoscaler: + # If true a VPA object will be created for the StatefulSet + enabled: false + updateMode: Off + containerPolicies: {} + +## Configure resource requests and limits. +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: {} + +## Configure liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +## +livenessProbe: + initialDelaySeconds: 900 + timeoutSeconds: 3 + periodSeconds: 30 + failureThreshold: 3 + successThreshold: 1 + httpGet: + path: /eth2/liveness + port: sidecar + scheme: HTTP + +readinessProbe: + initialDelaySeconds: 300 + timeoutSeconds: 3 + periodSeconds: 30 + failureThreshold: 30 + successThreshold: 2 + httpGet: + path: /eth2/readiness + port: sidecar + scheme: HTTP + +## Defines whether the service must be headless +## +svcHeadless: true + +## Configure session affinity for validator clients to hit the same beacon node +## for the period specified in `timeoutSeconds` +## ref: https://kubernetes.io/docs/concepts/services-networking/service/#proxy-mode-userspace +## +sessionAffinity: + # Whether to enable session affinity or not + enabled: false + # The session duration in seconds + timeoutSeconds: 86400 + +## When p2pNodePort is enabled, your P2P port will be exposed via service type NodePort/LoadBalancer. +## This will generate a service for each replica, with a port binding via NodePort/LoadBalancer. +## This is useful if you want to expose and announce your node to the Internet. +## +p2pNodePort: + ## @param p2pNodePort.enabled Expose P2P port via NodePort + ## + enabled: false + ## @param p2pNodePort.annotations + ## + annotations: {} + ## @param p2pNodePort.type + ## Options: NodePort, LoadBalancer + type: NodePort + ## @param p2pNodePort.startAt The ports allocation will start from this value + ## + startAt: 31300 + ## @param p2pNodePort.replicaToNodePort Overwrite a port for specific replicas + ## @default -- See `values.yaml` for example + replicaToNodePort: {} + # "0": 32345 + # "3": 32348 + +## Monitoring +## +metrics: + # Prometheus exporter port + port: 8008 + + # Extra flags to pass for collecting metrics + flags: + - "--metrics" + - "--metrics.port=8008" + - "--metrics.address=0.0.0.0" + + ## Prometheus Service Monitor + ## ref: https://github.com/coreos/prometheus-operator + ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + serviceMonitor: + ## The namespace in which the ServiceMonitor will be created + ## + namespace: "" + ## The interval at which metrics should be scraped + ## + interval: 30s + ## The timeout after which the scrape is ended + ## + scrapeTimeout: "" + ## Metrics RelabelConfigs to apply to samples before scraping. + ## + relabellings: [] + ## Metrics RelabelConfigs to apply to samples before ingestion. + ## + metricRelabelings: [] + ## Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus + ## + additionalLabels: {} + ## Custom PrometheusRule to be defined + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + ## Create a default set of Alerts + ## + default: true + ## The namespace in which the prometheusRule will be created + ## + namespace: "" + ## Additional labels for the prometheusRule + ## + additionalLabels: {} + ## Custom Prometheus rules + ## + rules: [] + +## If false, data ownership will not be reset at startup +## This allows the geth node to be run with an arbitrary user +## +initChownData: true + +## Whether or not to allocate persistent volume disk for the data directory. +## In case of pod failure, the pod data directory will still persist. +## +persistence: + enabled: true + storageClassName: "" + accessModes: + - ReadWriteOnce + size: 250Gi + annotations: {}