diff --git a/.gitignore b/.gitignore index 934735c..d4fed95 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,6 @@ gremlin/ *.bak values-*.yaml .dump/ -opentelemetry-demo opentelemetry/ nohup.out istio-*/ diff --git a/bootstrap.sh b/bootstrap.sh index b904345..3190a50 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -32,8 +32,13 @@ CLUSTER_NAME=${3:-"sre-playground"} KIND_CONFIG_FILE=`mktemp` OVERRIDE_CONFIG_FILE=`mktemp` +PLAYGROUND_CONFIG_FILE=`mktemp` + CPU_ARCH="$(uname -m)" -PLAYGROUND_CONFIG_FILE="kind/bootstrap-config.yaml" +PLAYGROUND_CONFIG_FILE_URI="kind/bootstrap-config.yaml" + +# reading the raw bootstrap config file and resolve the aliases +yq 'explode(.)' $PLAYGROUND_CONFIG_FILE_URI > $PLAYGROUND_CONFIG_FILE # reading kind cluster common configuration yq .common-config.$CPU_ARCH $PLAYGROUND_CONFIG_FILE > $KIND_CONFIG_FILE @@ -64,7 +69,8 @@ echo "`date` >>>>> METALLB_IP_RANGE:$METALLB_IP_RANGE" # install cluster yq .override-config.$KUBEPROXY_OPTS.$CNI.cni-patch $PLAYGROUND_CONFIG_FILE > $OVERRIDE_CONFIG_FILE -echo "`date` >>>>> OVERRIDE_CONFIG_FILE:$OVERRIDE_CONFIG_FILE" + +#install the cluster helm dependency update sreplayground-cluster helm upgrade --install sreplayground-cluster sreplayground-cluster \ --dependency-update \ @@ -87,7 +93,7 @@ helm upgrade --install sreplayground-platform sreplayground-platform \ --create-namespace \ --dependency-update \ --wait - + # install app kubectl create namespace app --dry-run=client -o yaml | kubectl apply -f - diff --git a/kind/bootstrap-config.yaml b/kind/bootstrap-config.yaml index 2f99ee7..220aec2 100644 --- a/kind/bootstrap-config.yaml +++ b/kind/bootstrap-config.yaml @@ -1,19 +1,10 @@ common-config: - arm64: + common: &common kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane image: kindest/node:v1.26.0 - extraPortMappings: - - containerPort: 80 - hostPort: 80 - protocol: TCP - listenAddress: "127.0.0.1" - - containerPort: 443 - hostPort: 443 - protocol: TCP - listenAddress: "127.0.0.1" extraMounts: - hostPath: kind/tracing containerPath: /etc/kubernetes/tracing @@ -24,10 +15,6 @@ common-config: apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration apiServer: - apiServer: - dns: - servers: - - 10.96.0.10 extraArgs: tracing-config-file: /etc/kubernetes/tracing/kube-apiserver-tracing-config.yaml extraVolumes: @@ -78,7 +65,8 @@ common-config: featureGates: APIServerTracing: true KubeletTracing: true - + x86_64: *common + arm64: *common override-config: with-kubeproxy: kindnet: diff --git a/requirements.sh b/requirements.sh old mode 100644 new mode 100755 index a21fb03..e8c3516 --- a/requirements.sh +++ b/requirements.sh @@ -3,7 +3,7 @@ set -eo pipefail # install netcat, apache,git and kubectl -sudo apt update && sudo apt install -y netcat apache2 kubectl git yq +sudo apt update && sudo apt install -y netcat apache2 kubectl git # install helm curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null @@ -13,8 +13,8 @@ sudo apt-get update sudo apt-get install helm # install kind -curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.17.0/kind-linux-amd64 -chmod +x ./kind +sudo curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.17.0/kind-linux-amd64 +sudo chmod +x ./kind sudo mv ./kind /usr/local/bin/kind @@ -24,6 +24,8 @@ sudo mv ./kind /usr/local/bin/kind eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" sudo apt-get install build-essential +sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 +sudo chmod a+x /usr/local/bin/yq # install docker diff --git a/sreplayground-app/charts/opentelemetry-demo/.helmignore b/sreplayground-app/charts/opentelemetry-demo/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/sreplayground-app/charts/opentelemetry-demo/Chart.lock b/sreplayground-app/charts/opentelemetry-demo/Chart.lock new file mode 100644 index 0000000..02a14bd --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/Chart.lock @@ -0,0 +1,15 @@ +dependencies: +- name: opentelemetry-collector + repository: https://open-telemetry.github.io/opentelemetry-helm-charts + version: 0.49.1 +- name: jaeger + repository: https://jaegertracing.github.io/helm-charts + version: 0.67.6 +- name: prometheus + repository: https://prometheus-community.github.io/helm-charts + version: 19.7.2 +- name: grafana + repository: https://grafana.github.io/helm-charts + version: 6.51.2 +digest: sha256:70149fdf53acfc2b727edc8017e5908aa14854231c02a4f8ea1a9b4d2445511b +generated: "2023-03-01T21:49:19.776835-05:00" diff --git a/sreplayground-app/charts/opentelemetry-demo/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/Chart.yaml new file mode 100644 index 0000000..0f81bc7 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/Chart.yaml @@ -0,0 +1,31 @@ +apiVersion: v2 +appVersion: 1.3.1 +dependencies: +- condition: observability.otelcol.enabled + name: opentelemetry-collector + repository: https://open-telemetry.github.io/opentelemetry-helm-charts + version: 0.49.1 +- condition: observability.jaeger.enabled + name: jaeger + repository: https://jaegertracing.github.io/helm-charts + version: 0.67.6 +- condition: observability.prometheus.enabled + name: prometheus + repository: https://prometheus-community.github.io/helm-charts + version: 19.7.2 +- condition: observability.grafana.enabled + name: grafana + repository: https://grafana.github.io/helm-charts + version: 6.51.2 +description: opentelemetry demo helm chart +home: https://opentelemetry.io/ +icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png +maintainers: +- name: dmitryax +- name: puckpuck +- name: tylerhelmuth +name: opentelemetry-demo +sources: +- https://github.com/open-telemetry/opentelemetry-demo +type: application +version: 0.19.6 diff --git a/sreplayground-app/charts/opentelemetry-demo/README.md b/sreplayground-app/charts/opentelemetry-demo/README.md new file mode 100644 index 0000000..878e158 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/README.md @@ -0,0 +1,189 @@ +# OpenTelemetry Demo Helm Chart + +The helm chart installs [OpenTelemetry Demo](https://github.com/open-telemetry/opentelemetry-demo) +in kubernetes cluster. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.9+ + +Since the OpenTelemetry demo does not build images targeting arm64 architecture **the chart is not supported in clusters running on +arm64 architectures**, such as kind/minikube running on Apple Silicon. + +## Installing the Chart + +Add OpenTelemetry Helm repository: + +```console +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +``` + +To install the chart with the release name my-otel-demo, run the following command: + +```console +helm install my-otel-demo open-telemetry/opentelemetry-demo +``` + +## Upgrading + +See [UPGRADING.md](UPGRADING.md). + +## Chart Parameters + +Chart parameters are separated in 4 general sections: +* Default - Used to specify defaults applied to all demo components +* Components - Used to configure the individual components (microservices) for +the demo +* Observability - Used to enable/disable dependencies +* Sub-charts - Configuration for all sub-charts + +### Default parameters (applied to all demo components) + +| Property | Description | Default | +|----------------------------------------|-------------------------------------------------------------------------------------------|------------------------------------------------------| +| `default.env` | Environment variables added to all components | Array of several OpenTelemetry environment variables | +| `default.envOverrides` | Used to override individual environment variables without re-specifying the entire array. | `[]` | +| `default.image.repository` | Demo components image name | `otel/demo` | +| `default.image.tag` | Demo components image tag (leave blank to use app version) | `nil` | +| `default.image.pullPolicy` | Demo components image pull policy | `IfNotPresent` | +| `default.image.pullSecrets` | Demo components image pull secrets | `[]` | +| `default.schedulingRules.nodeSelector` | Node labels for pod assignment | `{}` | +| `default.schedulingRules.affinity` | Man of node/pod affinities | `{}` | +| `default.schedulingRules.tolerations` | Tolerations for pod assignment | `[]` | +| `default.securityContext` | Demo components container security context | `{}` | +| `serviceAccount.annotations` | Annotations for the serviceAccount | `{}` | +| `serviceAccount.create` | Wether to create a serviceAccount or use an existing one | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use for demo components | `""` | + +### Component parameters + +The OpenTelemetry demo contains several components (microservices). Each +component is configured with a common set of parameters. All components will +be defined within `components.[NAME]` where `[NAME]` is the name of the demo +component. + +> **Note** +> The following parameters require a `components.[NAME].` prefix where `[NAME]` +> is the name of the demo component + + +| Parameter | Description | Default | +|--------------------------------------|------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `enabled` | Is this component enabled | `true` | +| `useDefault.env` | Use the default environment variables in this component | `true` | +| `imageOverride.repository` | Name of image for this component | Defaults to the overall default image repository | +| `imageOverride.tag` | Tag of the image for this component | Defaults to the overall default image tag | +| `imageOverride.pullPolicy` | Image pull policy for this component | `IfNotPresent` | +| `imageOverride.pullSecrets` | Image pull secrets for this component | `[]` | +| `service.type` | Service type used for this component | `ClusterIP` | +| `service.port` | Service port used for this component | `nil` | +| `service.nodePort` | Service node port used for this component | `nil` | +| `service.annotations` | Annotations to add to the component's service | `{}` | +| `ports` | Array of ports to open for deployment and service of this component | `[]` | +| `env` | Array of environment variables added to this component | Each component will have its own set of environment variables | +| `envOverrides` | Used to override individual environment variables without re-specifying the entire array | `[]` | +| `resources` | CPU/Memory resource requests/limits | Each component will have a default memory limit set | +| `schedulingRules.nodeSelector` | Node labels for pod assignment | `{}` | +| `schedulingRules.affinity` | Man of node/pod affinities | `{}` | +| `schedulingRules.tolerations` | Tolerations for pod assignment | `[]` | +| `securityContext` | Container security context to define user ID (UID), group ID (GID) and other security policies | `{}` | +| `podAnnotations` | Pod annotations for this component | `{}` | +| `ingress.enabled` | Enable the creation of Ingress rules | `false` | +| `ingress.annotations` | Annotations to add to the ingress rule | `{}` | +| `ingress.ingressClassName` | Ingress class to use. If not specified default Ingress class will be used. | `nil` | +| `ingress.hosts` | Array of Hosts to use for the ingress rule. | `[]` | +| `ingress.hosts[].paths` | Array of paths / routes to use for the ingress rule host. | `[]` | +| `ingress.hosts[].paths[].path` | Actual path route to use | `nil` | +| `ingress.hosts[].paths[].pathType` | Path type to use for the given path. Typically this is `Prefix`. | `nil` | +| `ingress.hosts[].paths[].port` | Port to use for the given path | `nil` | +| `ingress.additionalIngresses` | Array of additional ingress rules to add. This is handy if you need to differently annotated ingress rules | `[]` | +| `ingress.additionalIngresses[].name` | Each additional ingress rule needs to have a unique name | `nil` | +| `command` | Command & arguments to pass to the container being spun up for this service | `[]` | +| `configuration` | Configuration for the container being spun up; will create a ConfigMap, Volume and VolumeMount | `{}` | + +### Observability parameters + +| Parameter | Description | Default | +|------------------------------------|-----------------------------------------------|---------| +| `observability.otelcol.enabled` | Enables the OpenTelemetry Collector sub-chart | `true` | +| `observability.jaeger.enabled` | Enables the Jaeger sub-chart | `true` | +| `observability.prometheus.enabled` | Enables the Prometheus sub-chart | `true` | +| `observability.grafana.enabled` | Enables the Grafana sub-chart | `true` | + +### Sub-charts + +The OpenTelemetry Demo Helm chart depends on 4 sub-charts: +* OpenTelemetry Collector +* Jaeger +* Prometheus +* Grafana + +Parameters for each sub-chart can be specified within that sub-chart's +respective top level. This chart will override some of the dependent sub-chart +parameters by default. The overriden parameters are specified below. + +#### OpenTelemetry Collector + +> **Note** +> The following parameters have a `opentelemetry-collector.` prefix. + +| Parameter | Description | Default | +|------------------|----------------------------------------------------|----------------------------------------------------------| +| `nameOverride` | Name that will be used by the sub-chart release | `otelcol` | +| `mode` | The Deployment or Daemonset mode | `deployment` | +| `resources` | CPU/Memory resource requests/limits | 100Mi memory limit | +| `service.type` | Service Type to use | `ClusterIP` | +| `ports` | Ports to enabled for the collector pod and service | `metrics` is enabled and `prometheus` is defined/enabled | +| `podAnnotations` | Pod annotations | Annotations leveraged by Prometheus scrape | +| `config` | OpenTelemetry Collector configuration | Configuration required for demo | + +#### Jaeger + +> **Note** +> The following parameters have a `jaeger.` prefix. + +| Parameter | Description | Default | +|--------------------------------|----------------------------------------------------|-----------------------------------------------------------------------| +| `provisionDataStore.cassandra` | Provision a cassandra data store | `false` (required for AllInOne mode) | +| `allInOne.enabled` | Enable All in One In-Memory Configuration | `true` | +| `allInOne.args` | Command arguments to pass to All in One deployment | `["--memory.max-traces", "10000", "--query.base-path", "/jaeger/ui"]` | +| `allInOne.resources` | CPU/Memory resource requests/limits for All in One | 275Mi memory limit | +| `storage.type` | Storage type to use | `none` (required for AllInOne mode) | +| `agent.enabled` | Enable Jaeger agent | `false` (required for AllInOne mode) | +| `collector.enabled` | Enable Jaeger Collector | `false` (required for AllInOne mode) | +| `query.enabled` | Enable Jaeger Query | `false` (required for AllInOne mode) | + +#### Prometheus + +> **Note** +> The following parameters have a `prometheus.` prefix. + +| Parameter | Description | Default | +|--------------------------------------|------------------------------------------------|-----------------------------------------------------------| +| `alertmanager.enabled` | Install the alertmanager | `false` | +| `configmapReload.prometheus.enabled` | Install the configmap-reload container | `false` | +| `kube-state-metrics.enabled` | Install the kube-state-metrics sub-chart | `false` | +| `prometheus-node-exporter.enabled` | Install the Prometheus Node Exporter sub-chart | `false` | +| `prometheus-pushgateway.enabled` | Install the Prometheus Push Gateway sub-chart | `false` | +| `server.extraFlags` | Additional flags to add to Prometheus server | `["enable-feature=exemplar-storage"]` | +| `server.persistentVolume.enabled` | Enable persistent storage for Prometheus data | `false` | +| `server.global.scrape_interval` | How frequently to scrape targets by default | `5s` | +| `server.global.scrap_timeout` | How long until a scrape request times out | `3s` | +| `server.global.evaluation_interval` | How frequently to evaluate rules | `30s` | +| `service.servicePort` | Service port used | `9090` | +| `serverFiles.prometheus.yml` | Prometheus configuration file | Scrape config to get metrics from OpenTelemetry collector | + +#### Grafana + +> **Note** +> The following parameters have a `grafana.` prefix. + +| Parameter | Description | Default | +|-----------------------|----------------------------------------------------|----------------------------------------------------------------------| +| `grafana.ini` | Grafana's primary configuration | Enables anonymous login, and proxy through the frontendProxy service | +| `adminPassword` | Password used by `admin` user | `admin` | +| `rbac.pspEnabled` | Enable PodSecurityPolicy resources | `false` | +| `datasources` | Configure grafana datasources (passed through tpl) | Prometheus and Jaeger data sources | +| `dashboardProviders` | Configure grafana dashboard providers | Defines a `default` provider based on a file path | +| `dashboardConfigMaps` | ConfigMaps reference that contains dashboards | Dashboard config map deployed with this Helm chart | diff --git a/sreplayground-app/charts/opentelemetry-demo/UPGRADING.md b/sreplayground-app/charts/opentelemetry-demo/UPGRADING.md new file mode 100644 index 0000000..8350c0c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/UPGRADING.md @@ -0,0 +1,15 @@ +# Upgrade guidelines + +## To 0.18 + +The `serviceType` and `servicePort` parameters have been moved under a `service` +parameter with names of `type` and `port` respectively. If you had changes to +these parameters for any demo component, you will need to update your changes +to work with the new structure for the `service` parameter. + +## To 0.13 + +Jaeger was moved to a Helm sub-chart instead of a local chart deployment. If you +had changes specified to the `observability.jaeger` parameter, those changes +will need to be re-implemented as sub-chart parameters under the top level +`jaeger` parameter instead. diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/.helmignore new file mode 100644 index 0000000..8cade13 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/Chart.yaml new file mode 100644 index 0000000..8a2dc33 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v2 +appVersion: 9.3.8 +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +- email: maor.friedman@redhat.com + name: maorfr +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: mail@torstenwalter.de + name: torstenwalter +name: grafana +sources: +- https://github.com/grafana/grafana +type: application +version: 6.51.2 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/README.md new file mode 100644 index 0000000..cd8d316 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/README.md @@ -0,0 +1,651 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` | +| `image.sha` | Image sha (optional) | `` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` | +| `service.enabled` | Enable grafana service | `true` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.appProtocol` | Adds the appProtocol field to the service | `` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations (can be templated) | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `headlessService` | Create a headless service | `false` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` | +| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | +| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `createConfigmap` | Enable creating the grafana configmap | `true` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `alerting` | Configure grafana alerting (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | +| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.22.0` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.securityContext` | Sidecar securityContext | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | +| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` | +| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` | +| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` | +| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` | +| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | +| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` | +| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | +| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | +| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` | +| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` | +| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.labels` | ServiceAccount labels | `{}` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `args` | Define additional args if command is used | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image` | `test-framework` image repository. | `bats/bats` | +| `testFramework.tag` | `test-framework` image tag. | `v1.4.1` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | +| `imageRenderer.service.portName` | image-renderer service port name | `http` | +| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` | +| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` | +| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` | +| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` | +| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` | +| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` | +| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` | +| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | +| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | + +### Example ingress with path + +With grafana 6.3 and above + +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If neither existingClaim or hostPath argument is given then type is emptyDir. + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + loki-dashboard-quick-search: + gnetId: 12019 + revision: 2 + datasource: + - name: DS_PROMETHEUS + value: Prometheus + - name: DS_LOKI + value: Loki + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## Provision alert rules, contact points, notification policies and notification templates + +There are two methods to provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +alerting: + team1-alert-rules.yaml: + file: alerting/team1/rules.yaml + team2-alert-rules.yaml: + file: alerting/team2/rules.yaml + team3-alert-rules.yaml: + file: alerting/team3/rules.yaml + notification-policies.yaml: + file: alerting/shared/notification-policies.yaml + notification-templates.yaml: + file: alerting/shared/notification-templates.yaml + contactpoints.yaml: + apiVersion: 1 + contactPoints: + - orgId: 1 + name: Slack channel + receivers: + - uid: default-receiver + type: slack + settings: + # Webhook URL to be filled in + url: "" + # We need to escape double curly braces for the tpl function. + text: '{{ `{{ template "default.message" . }}` }}' + title: '{{ `{{ template "default.title" . }}` }}' +``` + +There are two possibilities: + +* Inlining the file contents as described in the example `values.yaml` and the official [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). +* Importing a file using a relative path starting from the chart root directory. + +### Important notes on file provisioning + +* The chart supports importing YAML and JSON files. +* The filename must be unique, otherwise one volume mount will overwrite the other. +* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. +* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. +* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance + +### High Availability for unified alerting + +If you want to run Grafana in a high availability cluster you need to enable +the headless service by setting `headlessService: true` in your `values.yaml` +file. + +As next step you have to setup the `grafana.ini` in your `values.yaml` in a way +that it will make use of the headless service to obtain all the IPs of the +cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. + +```yaml +grafana.ini: + ... + unified_alerting: + enabled: true + ha_peers: {{ Name }}-headless:9094 + ha_listen_address: ${POD_IP}:9094 + ha_advertise_address: ${POD_IP}:9094 + + alerting: + enabled: false +``` diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/default-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-affinity-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-affinity-values.yaml new file mode 100644 index 0000000..f5b9b53 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-affinity-values.yaml @@ -0,0 +1,16 @@ +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: kubernetes.io/hostname diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-dashboard-json-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-dashboard-json-values.yaml new file mode 100644 index 0000000..e0c4e41 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-dashboard-json-values.yaml @@ -0,0 +1,53 @@ +dashboards: + my-provider: + my-awesome-dashboard: + # An empty but valid dashboard + json: | + { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.3.5" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [], + "schemaVersion": 19, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["5s"] + }, + "timezone": "", + "title": "Dummy Dashboard", + "uid": "IdcYQooWk", + "version": 1 + } + datasource: Prometheus diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-dashboard-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-dashboard-values.yaml new file mode 100644 index 0000000..7b662c5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-dashboard-values.yaml @@ -0,0 +1,19 @@ +dashboards: + my-provider: + my-awesome-dashboard: + gnetId: 10000 + revision: 1 + datasource: Prometheus +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'my-provider' + orgId: 1 + folder: '' + type: file + updateIntervalSeconds: 10 + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards/my-provider diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-extraconfigmapmounts-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-extraconfigmapmounts-values.yaml new file mode 100644 index 0000000..5cc44a0 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-extraconfigmapmounts-values.yaml @@ -0,0 +1,7 @@ +extraConfigmapMounts: + - name: '{{ include "grafana.fullname" . }}' + configMap: '{{ include "grafana.fullname" . }}' + mountPath: /var/lib/grafana/dashboards/test-dashboard.json + # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap + subPath: grafana.ini + readOnly: true diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-image-renderer-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-image-renderer-values.yaml new file mode 100644 index 0000000..32f3074 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-image-renderer-values.yaml @@ -0,0 +1,19 @@ +podLabels: + customLableA: Aaaaa +imageRenderer: + enabled: true + env: + RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + RENDERING_MODE: clustered + podLabels: + customLableB: Bbbbb + networkPolicy: + limitIngress: true + limitEgress: true + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 500m + memory: 50Mi diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-persistence.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-persistence.yaml new file mode 100644 index 0000000..b92ca02 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/ci/with-persistence.yaml @@ -0,0 +1,3 @@ +persistence: + type: pvc + enabled: true diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/dashboards/custom-dashboard.json b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/dashboards/custom-dashboard.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/dashboards/custom-dashboard.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/NOTES.txt b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/NOTES.txt new file mode 100644 index 0000000..f399f43 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/NOTES.txt @@ -0,0 +1,54 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: + {{- range .Values.ingress.hosts }} + http://{{ . }} + {{- end }} +{{- else }} + Get the Grafana URL to visit by running these commands in the same shell: + {{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} + {{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 + {{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/_helpers.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/_helpers.tpl new file mode 100644 index 0000000..8307e10 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/_helpers.tpl @@ -0,0 +1,201 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create }} +{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else }} +{{- default "default" .Values.serviceAccount.nameTest }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.extraLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} +{{- if $secret }} +{{- index $secret "data" "admin-password" }} +{{- else }} +{{- (randAlphaNum 40) | b64enc | quote }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "grafana.rbac.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" }} +{{- else }} +{{- print "rbac.authorization.k8s.io/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "grafana.ingress.apiVersion" -}} +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} +{{- print "networking.k8s.io/v1" }} +{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +{{- print "networking.k8s.io/v1beta1" }} +{{- else }} +{{- print "extensions/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "grafana.hpa.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2" }} +{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2beta2" }} +{{- else }} +{{- print "autoscaling/v2beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "grafana.podDisruptionBudget.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +{{- print "policy/v1" }} +{{- else }} +{{- print "policy/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return if ingress is stable. +*/}} +{{- define "grafana.ingress.isStable" -}} +{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "grafana.ingress.supportsIngressClassName" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "grafana.ingress.supportsPathType" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "grafana.imagePullSecrets" -}} +{{- $root := .root }} +{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} +{{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml (dict "name" (tpl .name $root)) | trim }} +{{- else }} +- name: {{ tpl . $root }} +{{- end }} +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/_pod.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/_pod.tpl new file mode 100644 index 0000000..68a9b4b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/_pod.tpl @@ -0,0 +1,1159 @@ +{{- define "grafana.pod" -}} +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- $root := . -}} +{{- with .Values.schedulerName }} +schedulerName: "{{ . }}" +{{- end }} +serviceAccountName: {{ include "grafana.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- with .Values.securityContext }} +securityContext: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.hostAliases }} +hostAliases: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.priorityClassName }} +priorityClassName: {{ . }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + {{- if .Values.initChownData.image.sha }} + image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" + {{- else }} + image: "{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + {{- with .Values.initChownData.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + command: + - chown + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} + - /var/lib/grafana + {{- with .Values.initChownData.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] + {{- with .Values.downloadDashboards.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + env: + {{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- with .Values.downloadDashboards.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl . $root }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} + - name: {{ include "grafana.name" . }}-init-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end }} +{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} + - name: {{ include "grafana.name" . }}-init-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- with .Values.extraInitContainers }} + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} +{{- end }} +{{- if not .Values.enableKubeBackwardCompatibility }} +enableServiceLinks: {{ .Values.enableServiceLinks }} +{{- end }} +containers: +{{- if .Values.sidecar.alerts.enabled }} + - name: {{ include "grafana.name" . }}-sc-alerts + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.alerts.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.alerts.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.alerts.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.alerts.watchServerTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.alerts.watchClientTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" +{{- end}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ include "grafana.name" . }}-sc-dashboard + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.dashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- with .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: {{ quote .Values.sidecar.dashboards.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.dashboards.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.dashboards.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.dashboards.watchServerTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.dashboards.watchClientTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- with .Values.sidecar.dashboards.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ include "grafana.name" . }}-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.datasources.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.datasources.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.datasources.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.datasources.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.datasources.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.datasources.watchServerTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.datasources.watchClientTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ include "grafana.name" . }}-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.notifiers.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.notifiers.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.notifiers.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.notifiers.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.notifiers.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.notifiers.watchServerTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.notifiers.watchClientTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.sidecar.plugins.enabled }} + - name: {{ include "grafana.name" . }}-sc-plugins + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.plugins.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.plugins.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.plugins.label }}" + {{- if .Values.sidecar.plugins.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.plugins.labelValue }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/plugins" + - name: RESOURCE + value: {{ quote .Values.sidecar.plugins.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.plugins.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.plugins.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.plugins.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.plugins.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.plugins.watchServerTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.plugins.watchClientTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" +{{- end}} + - name: {{ .Chart.Name }} + {{- if .Values.image.sha }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.args }} + args: + {{- range .Values.args }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + mountPath: {{ tpl .mountPath $root }} + subPath: {{ (tpl .subPath $root) | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- with .Values.dashboards }} + {{- range $provider, $dashboards := . }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardsConfigMaps }} + {{- range (keys . | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" + {{- end }} + {{- end }} + {{- with .Values.datasources }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.notifiers }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.alerting }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.dashboardProviders }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- end}} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml + {{- end}} + {{- end}} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" + {{- end}} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" + {{- end}} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" + {{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.podPortName }} + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.gossipPortName }}-tcp + containerPort: 9094 + protocol: TCP + - name: {{ .Values.gossipPortName }}-udp + containerPort: 9094 + protocol: UDP + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ include "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{- if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{- end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" + {{- end }} + {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + - secretRef: + name: {{ include "grafana.fullname" . }}-env + {{- end }} + {{- range .Values.envFromSecrets }} + - secretRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- range .Values.envFromConfigMaps }} + - configMapRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.lifecycleHooks }} + lifecycle: + {{- tpl (toYaml .) $root | nindent 6 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- with .Values.extraContainers }} + {{- tpl . $ | nindent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ include "grafana.fullname" . }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + configMap: + name: {{ tpl .configMap $root }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ include "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} + {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} + {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} + {{/* nothing */}} + {{- else }} + - name: storage + {{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory + {{- with .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ . }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + emptyDir: + {{- with .Values.sidecar.alerts.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: + {{- with .Values.sidecar.dashboards.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ include "grafana.fullname" . }}-config-dashboards + {{- end }} + {{- end }} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: + {{- with .Values.sidecar.datasources.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + emptyDir: + {{- with .Values.sidecar.plugins.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: + {{- with .Values.sidecar.notifiers.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- range .Values.extraSecretMounts }} + {{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- else if .projected }} + - name: {{ .name }} + projected: + {{- toYaml .projected | nindent 6 }} + {{- else if .csi }} + - name: {{ .name }} + csi: + {{- toYaml .csi | nindent 6 }} + {{- end }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + path: {{ .hostPath }} + {{- else if .csi }} + csi: + {{- toYaml .data | nindent 6 }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} + {{- end }} + {{- with .Values.extraContainerVolumes }} + {{- tpl (toYaml .) $root | nindent 2 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/clusterrole.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/clusterrole.yaml new file mode 100644 index 0000000..3396713 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled (or .Values.rbac.extraClusterRoleRules (or .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled)) }} +rules: + {{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end}} + {{- with .Values.rbac.extraClusterRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/clusterrolebinding.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..48411fe --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +roleRef: + kind: ClusterRole + {{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} + {{- else }} + name: {{ include "grafana.fullname" . }}-clusterrole + {{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/configmap-dashboard-provider.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 0000000..1f706a8 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-config-dashboards + namespace: {{ include "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end }} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/configmap.yaml new file mode 100644 index 0000000..00ab74e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/configmap.yaml @@ -0,0 +1,134 @@ +{{- if .Values.createConfigmap }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + {{- with .Values.plugins }} + plugins: {{ join "," . }} + {{- end }} + grafana.ini: | + {{- range $elem, $elemVal := index .Values "grafana.ini" }} + {{- if not (kindIs "map" $elemVal) }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := index .Values "grafana.ini" }} + {{- if kindIs "map" $value }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.datasources }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + + {{- range $key, $value := .Values.notifiers }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + + {{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "file") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- else }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.dashboardProviders }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + {{ $dashboardProviders := .Values.dashboardProviders }} + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + -H "Accept: application/json" \ + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + {{- if $value.bearerToken }} + -H "Authorization: Bearer {{ $value.bearerToken }}" \ + {{- end }} + {{- if $value.basic }} + -H "Basic: {{ $value.basic }}" \ + {{- end }} + {{- if $value.gitlabToken }} + -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{- end }} + {{- $dpPath := "" -}} + {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} + {{- if eq $kd.name $provider }} + {{- $dpPath = $kd.options.path }} + {{- end }} + {{- end }} + {{- if $value.url }} + "{{ $value.url }}" \ + {{- else }} + "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ + {{- end }} + {{- if $value.datasource }} + {{- if kindIs "string" $value.datasource }} + | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ + {{- end }} + {{- if kindIs "slice" $value.datasource }} + {{- range $value.datasource }} + | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ + {{- end }} + {{- end }} + {{- end }} + {{- if $value.b64content }} + | base64 -d \ + {{- end }} + > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" + {{ end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/dashboards-json-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 0000000..df0ed0d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,35 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ include "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} + {{- print $key | nindent 2 }}.json: + {{- if hasKey $value "json" }} + |- + {{- $value.json | nindent 6 }} + {{- end }} + {{- if hasKey $value "file" }} + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/deployment.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/deployment.yaml new file mode 100644 index 0000000..96eac4d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/deployment.yaml @@ -0,0 +1,50 @@ +{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} + replicas: {{ .Values.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + {{- with .Values.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/extra-manifests.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/extra-manifests.yaml new file mode 100644 index 0000000..a9bb3b6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraObjects }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/headless-service.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/headless-service.yaml new file mode 100644 index 0000000..3028589 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/headless-service.yaml @@ -0,0 +1,22 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-headless + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP + ports: + - name: {{ .Values.gossipPortName }}-tcp + port: 9094 +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/hpa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/hpa.yaml new file mode 100644 index 0000000..46bbcb4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/hpa.yaml @@ -0,0 +1,52 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if .Values.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }} + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + {{- if has .Values.persistence.type $sts }} + kind: StatefulSet + {{- else }} + kind: Deployment + {{- end }} + name: {{ include "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.behavior }} + behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-deployment.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-deployment.yaml new file mode 100644 index 0000000..0c3d30c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,125 @@ +{{ if .Values.imageRenderer.enabled }} +{{- $root := . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} + replicas: {{ .Values.imageRenderer.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + + {{- with .Values.imageRenderer.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.imageRenderer.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imageRenderer.schedulerName }} + schedulerName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range . }} + - name: {{ tpl . $root }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- if .Values.imageRenderer.image.sha }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: {{ .Values.imageRenderer.service.portName }} + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.targetPort | quote }} + {{- if .Values.imageRenderer.serviceMonitor.enabled }} + - name: ENABLE_METRICS + value: "true" + {{- end }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + {{- with .Values.imageRenderer.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.imageRenderer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-hpa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-hpa.yaml new file mode 100644 index 0000000..b0f0059 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "grafana.fullname" . }}-image-renderer + minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} + maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} + metrics: + {{- if .Values.imageRenderer.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.behavior }} + behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-network-policy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-network-policy.yaml new file mode 100644 index 0000000..d1a0eb3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} + {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} + {{ toYaml . | nindent 8 }} + {{- end }} +{{- end }} + +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-egress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.targetPort }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-service.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-service.yaml new file mode 100644 index 0000000..f8da127 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + {{- with .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + {{- with .Values.imageRenderer.appProtocol }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-servicemonitor.yaml new file mode 100644 index 0000000..5d9f09d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/image-renderer-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if .Values.imageRenderer.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + {{- if .Values.imageRenderer.serviceMonitor.namespace }} + namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.imageRenderer.service.portName }} + {{- with .Values.imageRenderer.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.imageRenderer.serviceMonitor.path }} + scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} + {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}-image-renderer" + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/ingress.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/ingress.yaml new file mode 100644 index 0000000..063cdfa --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/ingress.yaml @@ -0,0 +1,78 @@ +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: {{ include "grafana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} + {{- with .Values.ingress.tls }} + tls: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $ }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- with $ingressPath }} + path: {{ . }} + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/networkpolicy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/networkpolicy.yaml new file mode 100644 index 0000000..ea4578b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/networkpolicy.yaml @@ -0,0 +1,52 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + policyTypes: + {{- if .Values.networkPolicy.ingress }} + - Ingress + {{- end }} + {{- if .Values.networkPolicy.egress.enabled }} + - Egress + {{- end }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + + {{- if .Values.networkPolicy.egress.enabled }} + egress: + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- end }} + {{- if .Values.networkPolicy.ingress }} + ingress: + - ports: + - port: {{ .Values.service.targetPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "grafana.fullname" . }}-client: "true" + {{- with .Values.networkPolicy.explicitNamespacesSelector }} + - namespaceSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "grafana.labels" . | nindent 14 }} + role: read + {{- end }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/poddisruptionbudget.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..0525121 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} + {{- with .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/podsecuritypolicy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..eed7af9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/pvc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/pvc.yaml new file mode 100644 index 0000000..eb8f87f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/pvc.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.extraPvcLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- with .Values.persistence.storageClassName }} + storageClassName: {{ . }} + {{- end }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/role.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/role.yaml new file mode 100644 index 0000000..ffdb16f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/role.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: {{ include "grafana.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} +rules: + {{- if .Values.rbac.pspEnabled }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}] + {{- end }} + {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end }} + {{- with .Values.rbac.extraRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/rolebinding.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/rolebinding.yaml new file mode 100644 index 0000000..cc07bd9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "grafana.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + {{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} + {{- else }} + name: {{ include "grafana.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/secret-env.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/secret-env.yaml new file mode 100644 index 0000000..c765567 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/secret-env.yaml @@ -0,0 +1,14 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }}-env + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ $val | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/secret.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/secret.yaml new file mode 100644 index 0000000..5cbd527 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/secret.yaml @@ -0,0 +1,26 @@ +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ include "grafana.password" . }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/service.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/service.yaml new file mode 100644 index 0000000..43d360b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/service.yaml @@ -0,0 +1,55 @@ +{{- if .Values.service.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- with .Values.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + {{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- with .Values.service.externalIPs }} + externalIPs: + {{- toYaml . | nindent 4 }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} + {{- with .Values.service.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- with .Values.extraExposePorts }} + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/serviceaccount.yaml new file mode 100644 index 0000000..784e71b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create }} +{{- $root := . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/servicemonitor.yaml new file mode 100644 index 0000000..a4e9f00 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ tpl .Values.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- with .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/statefulset.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/statefulset.yaml new file mode 100644 index 0000000..acfab4d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/statefulset.yaml @@ -0,0 +1,55 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ include "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + {{- if .Values.persistence.enabled}} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..01c96c9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-configmap.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + run.sh: |- + @test "Test Health" { + url="http://{{ include "grafana.fullname" . }}/api/health" + + code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') + [ "$code" == "200" ] + } +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-podsecuritypolicy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-podsecuritypolicy.yaml new file mode 100644 index 0000000..1821772 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,32 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }}-test + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - csi + - secret +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-role.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-role.yaml new file mode 100644 index 0000000..cb4c782 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-role.yaml @@ -0,0 +1,17 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}-test] +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-rolebinding.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-rolebinding.yaml new file mode 100644 index 0000000..f40d791 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "grafana.fullname" . }}-test +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-serviceaccount.yaml new file mode 100644 index 0000000..38fba35 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test.yaml new file mode 100644 index 0000000..9fb8842 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/templates/tests/test.yaml @@ -0,0 +1,49 @@ +{{- if .Values.testFramework.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "grafana.fullname" . }}-test + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + namespace: {{ include "grafana.namespace" . }} +spec: + serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} + {{- with .Values.testFramework.securityContext }} + securityContext: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" + command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + volumes: + - name: tests + configMap: + name: {{ include "grafana.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/grafana/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/values.yaml new file mode 100644 index 0000000..7ea0007 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/grafana/values.yaml @@ -0,0 +1,1196 @@ +global: + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # Can be tempalted. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-(cluster)role + pspEnabled: true + pspUseAppArmor: true + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: + ## ServiceAccount labels. + labels: {} +## Service account annotations. Can be templated. +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create a headless service for the deployment +headlessService: false + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: grafana/grafana + # Overrides the Grafana image tag whose default is the chart appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + pullSecrets: [] + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: "bats/bats" + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: {} + +# Enable creating the grafana configmap +createConfigmap: true + +# Extra configmaps to mount in grafana pods +# Values are templated. +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: curlimages/curl + tag: 7.85.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + securityContext: {} + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana +gossipPortName: gossip +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + ## Service annotations. Can be templated. + annotations: {} + labels: {} + portName: service + # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + targetLabels: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + # type: ClusterIP + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Topology Spread Constraints +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] + +## Additional init containers (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: "" +# extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + # existingClaim: + ## Extra labels to apply to a PVC. + extraPvcLabels: {} + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the grafana-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + repository: busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + securityContext: + runAsNonRoot: false + runAsUser: 0 + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Optionally define args if command is used +## Needed if using `hashicorp/envconsul` to manage secrets +## By default no arguments are set +# args: +# - "-secret" +# - "secret/grafana" +# - "./grafana" + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +## The names of secrets in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. +## Name is templated. +envFromSecrets: [] +## - name: secret-name +## optional: true + +## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. +## Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core +envFromConfigMaps: [] +## - name: configmap-name +## optional: true + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + # - name: grafana-secrets + # csi: true + # data: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "grafana-env-spc" + +## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request +lifecycleHooks: {} + # postStart: + # exec: + # command: [] + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 + +## Configure grafana alerting (can be templated) +## ref: http://docs.grafana.org/administration/provisioning/#alerting +## +alerting: {} + # rules.yaml: + # apiVersion: 1 + # groups: + # - orgId: 1 + # name: '{{ .Chart.Name }}_my_rule_group' + # folder: my_first_folder + # interval: 60s + # rules: + # - uid: my_id_1 + # title: my_first_rule + # condition: A + # data: + # - refId: A + # datasourceUid: '-100' + # model: + # conditions: + # - evaluator: + # params: + # - 3 + # type: gt + # operator: + # type: and + # query: + # params: + # - A + # reducer: + # type: last + # type: query + # datasource: + # type: __expr__ + # uid: '-100' + # expression: 1==0 + # intervalMs: 1000 + # maxDataPoints: 43200 + # refId: A + # type: math + # dashboardUid: my_dashboard + # panelId: 123 + # noDataState: Alerting + # for: 60s + # annotations: + # some_key: some_value + # labels: + # team: sre_team_1 + # contactpoints.yaml: + # apiVersion: 1 + # contactPoints: + # - orgId: 1 + # name: cp_1 + # receivers: + # - uid: first_uid + # type: pagerduty + # settings: + # integrationKey: XXX + # severity: critical + # class: ping failure + # component: Grafana + # group: app-stack + # summary: | + # {{ `{{ include "default.message" . }}` }} + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + # local-dashboard-gitlab: + # url: https://example.com/repository/test-gitlab.json + # gitlabToken: '' + # local-dashboard-bitbucket: + # url: https://example.com/repository/test-bitbucket.json + # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: quay.io/kiwigrid/k8s-sidecar + tag: 1.22.0 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + securityContext: {} + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO + # logLevel: INFO + alerts: + enabled: false + # Additional environment variables for the alerts sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with alert are marked with + label: grafana_alert + # value of label that the configmaps with alert are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for alert config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" + # Absolute path to shell script to execute after a alert got reloaded + script: null + skipReload: false + # Deploy the alert sidecar as an initContainer in addition to a container. + # Sets the size limit of the alert sidecar emptyDir volume + sizeLimit: {} + dashboards: + enabled: false + # Additional environment variables for the dashboards sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" + # Absolute path to shell script to execute after a configmap got reloaded + script: null + skipReload: false + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboard sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: {} + datasources: + enabled: false + # Additional environment variables for the datasourcessidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + # Absolute path to shell script to execute after a datasource got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any datasources defined at startup time. + initDatasources: false + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: {} + plugins: + enabled: false + # Additional environment variables for the plugins sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with plugins are marked with + label: grafana_plugin + # value of label that the configmaps with plugins are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + # Absolute path to shell script to execute after a plugin got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: {} + notifiers: + enabled: false + # Additional environment variables for the notifierssidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # value of label that the configmaps with notifiers are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload notifiers + reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" + # Absolute path to shell script to execute after a notifier got reloaded + script: null + skipReload: false + # Deploy the notifier sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any notifiers defined at startup time. + initNotifiers: false + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: {} + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + deploymentStrategy: {} + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + image: + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment container securityContext + containerSecurityContext: + capabilities: + drop: ['ALL'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to grafana port defined. + ## When true, grafana will accept connections from any source + ## (with the correct destination port). + ## + ingress: true + ## @param networkPolicy.ingress When true enables the creation + ## an ingress network policy + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the grafana. + ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## + ## + ## + ## + ## + ## + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing grafana to connect to external data sources from kubernetes cluster. + enabled: false + ## + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ports: [] + ## Add ports to the egress by specifying - port: + ## E.X. + ## ports: + ## - port: 80 + ## - port: 443 + ## + ## + ## + ## + ## + ## + +# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option +enableKubeBackwardCompatibility: false +useStatefulSet: false +# Create a dynamic manifests via values: +extraObjects: [] + # - apiVersion: "kubernetes-client.io/v1" + # kind: ExternalSecret + # metadata: + # name: grafana-secrets + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/Chart.lock b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/Chart.lock new file mode 100644 index 0000000..97ba1e2 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/Chart.lock @@ -0,0 +1,15 @@ +dependencies: +- name: cassandra + repository: https://charts.helm.sh/incubator + version: 0.15.3 +- name: elasticsearch + repository: https://helm.elastic.co + version: 7.17.3 +- name: kafka + repository: https://charts.bitnami.com/bitnami + version: 19.1.5 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.16.0 +digest: sha256:60d70b3966a1bb5b87d6679139138b5a79871c486a771953d527e083ea44d9df +generated: "2023-03-01T13:41:33.280002191Z" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/Chart.yaml new file mode 100644 index 0000000..0438664 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/Chart.yaml @@ -0,0 +1,43 @@ +apiVersion: v2 +appVersion: 1.39.0 +dependencies: +- condition: provisionDataStore.cassandra + name: cassandra + repository: https://charts.helm.sh/incubator + version: 0.15.3 +- condition: provisionDataStore.elasticsearch + name: elasticsearch + repository: https://helm.elastic.co + version: ^7.11.1 +- condition: provisionDataStore.kafka + name: kafka + repository: https://charts.bitnami.com/bitnami + version: ^19.1.5 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.16.0 +description: A Jaeger Helm chart for Kubernetes +home: https://jaegertracing.io +icon: https://camo.githubusercontent.com/afa87494e0753b4b1f5719a2f35aa5263859dffb/687474703a2f2f6a61656765722e72656164746865646f63732e696f2f656e2f6c61746573742f696d616765732f6a61656765722d766563746f722e737667 +keywords: +- jaeger +- opentracing +- tracing +- instrumentation +kubeVersion: '>= 1.21-0' +maintainers: +- email: david.vonthenen@dell.com + name: dvonthenen +- email: ankit.mehta@appian.com + name: mehta-ankit +- email: michael.lorant@fairfaxmedia.com.au + name: mikelorant +- email: naseem@transit.app + name: naseemkullah +- email: me@pavelnikolov.net + name: pavelnikolov +name: jaeger +sources: +- https://hub.docker.com/u/jaegertracing/ +type: application +version: 0.67.6 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/README.md new file mode 100644 index 0000000..dfc2d0b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/README.md @@ -0,0 +1,400 @@ +# Jaeger + +[Jaeger](https://www.jaegertracing.io/) is a distributed tracing system. + +## Introduction + +This chart adds all components required to run Jaeger as described in the [jaeger-kubernetes](https://github.com/jaegertracing/jaeger-kubernetes) GitHub page for a production-like deployment. The chart default will deploy a new Cassandra cluster (using the [cassandra chart](https://github.com/kubernetes/charts/tree/master/incubator/cassandra)), but also supports using an existing Cassandra cluster, deploying a new ElasticSearch cluster (using the [elasticsearch chart](https://github.com/elastic/helm-charts/tree/master/elasticsearch)), or connecting to an existing ElasticSearch cluster. Once the storage backend is available, the chart will deploy jaeger-agent as a DaemonSet and deploy the jaeger-collector and jaeger-query components as Deployments. + +## Installing the Chart + +Add the Jaeger Tracing Helm repository: + +```bash +helm repo add jaegertracing https://jaegertracing.github.io/helm-charts +``` + +To install a release named `jaeger`: + +```bash +helm install jaeger jaegertracing/jaeger +``` + +By default, the chart deploys the following: + +- Jaeger Agent DaemonSet +- Jaeger Collector Deployment +- Jaeger Query (UI) Deployment +- Cassandra StatefulSet (subject to change!) + +![Jaeger with Default +components](https://www.jaegertracing.io/img/architecture-v1.png) + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](https://github.com/jaegertracing/helm-charts/blob/master/charts/jaeger/values.yaml), or run these configuration commands: + +```console +$ helm show values jaegertracing/jaeger +``` + +You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options. + +### Dependencies + +If installing with a dependency such as Cassandra, Elasticsearch and/or Kafka +their, values can be shown by running: + +```console +helm repo add elastic https://helm.elastic.co +helm show values elastic/elasticsearch +``` + +```console +helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ +helm show values incubator/cassandra +``` + +```console +helm repo add bitnami +helm show values bitnami/kafka +``` + +Please note, any dependency values must be nested within the key named after the +chart, i.e. `elasticsearch`, `cassandra` and/or `kafka`. + +## Storage + +As per Jaeger documentation, for large scale production deployment the Jaeger +team [recommends Elasticsearch backend over Cassandra](https://www.jaegertracing.io/docs/latest/faq/#what-is-the-recommended-storage-backend), +as such the default backend may change in the future and **it is highly +recommended to explicitly configure storage**. + +If you are just starting out with a testing/demo setup, you can also use in-memory storage for a +fast and easy setup experience using the [Jaeger All in One executable](https://www.jaegertracing.io/docs/1.29/getting-started/#all-in-one). + +### Elasticsearch configuration + +#### Elasticsearch Rollover + +If using the [Elasticsearch +Rollover](https://www.jaegertracing.io/docs/latest/deployment/#elasticsearch-rollover) +feature, elasticsearch must already be present and so must be deployed +separately from this chart, if not the rollover init hook won't be able to +complete successfully. + +#### Installing the Chart using a New ElasticSearch Cluster + +To install the chart with the release name `jaeger` using a new ElasticSearch cluster instead of Cassandra (default), run the following command: + +```console +helm install jaeger jaegertracing/jaeger \ + --set provisionDataStore.cassandra=false \ + --set provisionDataStore.elasticsearch=true \ + --set storage.type=elasticsearch +``` + +#### Installing the Chart using an Existing Elasticsearch Cluster + +A release can be configured as follows to use an existing ElasticSearch cluster as it as the storage backend: + +```console +helm install jaeger jaegertracing/jaeger \ + --set provisionDataStore.cassandra=false \ + --set storage.type=elasticsearch \ + --set storage.elasticsearch.host= \ + --set storage.elasticsearch.port= \ + --set storage.elasticsearch.user= \ + --set storage.elasticsearch.password= +``` + +#### Installing the Chart using an Existing ElasticSearch Cluster with TLS + +If you already have an existing running ElasticSearch cluster with TLS, you can configure the chart as follows to use it as your backing store: + +Content of the `jaeger-values.yaml` file: + +```YAML +storage: + type: elasticsearch + elasticsearch: + host: + port: + scheme: https + user: + password: +provisionDataStore: + cassandra: false + elasticsearch: false +query: + cmdlineParams: + es.tls.ca: "/tls/es.pem" + extraConfigmapMounts: + - name: jaeger-tls + mountPath: /tls + subPath: "" + configMap: jaeger-tls + readOnly: true +collector: + cmdlineParams: + es.tls.ca: "/tls/es.pem" + extraConfigmapMounts: + - name: jaeger-tls + mountPath: /tls + subPath: "" + configMap: jaeger-tls + readOnly: true +spark: + enabled: true + cmdlineParams: + java.opts: "-Djavax.net.ssl.trustStore=/tls/trust.store -Djavax.net.ssl.trustStorePassword=changeit" + extraConfigmapMounts: + - name: jaeger-tls + mountPath: /tls + subPath: "" + configMap: jaeger-tls + readOnly: true + +``` + +Generate configmap jaeger-tls: + +```console +keytool -import -trustcacerts -keystore trust.store -storepass changeit -alias es-root -file es.pem +kubectl create configmap jaeger-tls --from-file=trust.store --from-file=es.pem +``` + +```console +helm install jaeger jaegertracing/jaeger --values jaeger-values.yaml +``` + +### Cassandra configuration + +#### Installing the Chart using an Existing Cassandra Cluster + +If you already have an existing running Cassandra cluster, you can configure the chart as follows to use it as your backing store (make sure you replace ``, ``, etc with your values): + +```console +helm install jaeger jaegertracing/jaeger \ + --set provisionDataStore.cassandra=false \ + --set storage.cassandra.host= \ + --set storage.cassandra.port= \ + --set storage.cassandra.user= \ + --set storage.cassandra.password= +``` + +#### Installing the Chart using an Existing Cassandra Cluster with TLS + +If you already have an existing running Cassandra cluster with TLS, you can configure the chart as follows to use it as your backing store: + +Content of the `values.yaml` file: + +```YAML +storage: + type: cassandra + cassandra: + host: + port: + user: + password: + tls: + enabled: true + secretName: cassandra-tls-secret + +provisionDataStore: + cassandra: false +``` + +Content of the `jaeger-tls-cassandra-secret.yaml` file: + +```YAML +apiVersion: v1 +kind: Secret +metadata: + name: cassandra-tls-secret +data: + commonName: + ca-cert.pem: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + client-cert.pem: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + client-key.pem: | + -----BEGIN RSA PRIVATE KEY----- + -----END RSA PRIVATE KEY----- + cqlshrc: | + [ssl] + certfile = ~/.cassandra/ca-cert.pem + userkey = ~/.cassandra/client-key.pem + usercert = ~/.cassandra/client-cert.pem + +``` + +```console +kubectl apply -f jaeger-tls-cassandra-secret.yaml +helm install jaeger jaegertracing/jaeger --values values.yaml +``` + +### Ingester Configuration + +#### Installing the Chart with Ingester enabled + +The architecture illustrated below can be achieved by enabling the ingester component. When enabled, Cassandra or Elasticsearch (depending on the configured values) now becomes the ingester's storage backend, whereas Kafka becomes the storage backend of the collector service. + +![Jaeger with Ingester](https://www.jaegertracing.io/img/architecture-v2.png) + +#### Installing the Chart with Ingester enabled using a New Kafka Cluster + +To provision a new Kafka cluster along with jaeger-ingester: + +```console +helm install jaeger jaegertracing/jaeger \ + --set provisionDataStore.kafka=true \ + --set ingester.enabled=true +``` + +#### Installing the Chart with Ingester using an existing Kafka Cluster + +You can use an existing Kafka cluster with jaeger too + +```console +helm install jaeger jaegertracing/jaeger \ + --set ingester.enabled=true \ + --set storage.kafka.brokers={,} \ + --set storage.kafka.topic= +``` + +### Other Storage Configuration + +If you are using grpc-plugin based storage, you can set environment +variables that are needed by the plugin. + +As as example if using the [jaeger-mongodb](https://github.com/mongodb-labs/jaeger-mongodb) +plugin you can set the `MONGO_URL` as follows... + +```YAML +storage: + type: grpc-plugin + grpcPlugin: + extraEnv: + - name: MONGO_URL + valueFrom: + secretKeyRef: + key: MONGO_URL + name: jaeger-secrets +``` + +### All in One In-Memory Configuration + +#### Installing the Chart using the All in One executable and in-memory storage + +To install the chart with the release name `jaeger` using in-memory storage and the All in One +executable, configure the chart as follows: + +Content of the `values.yaml` file: + +```yaml +provisionDataStore: + cassandra: false +allInOne: + enabled: true +storage: + type: none +agent: + enabled: false +collector: + enabled: false +query: + enabled: false +``` + +It's possible to specify resources and extra environment variables for the all in one deployment: + +```yaml +allInOne: + extraEnv: + - name: QUERY_BASE_PATH + value: /jaeger + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 256m + memory: 128Mi +``` + +```bash +helm install jaeger jaegertracing/jaeger --values values.yaml +``` + +## oAuth2 Sidecar +If extra protection of the Jaeger UI is needed, then the oAuth2 sidecar can be enabled in the Jaeger Query. The oAuth2 +sidecar acts as a security proxy in front of the Jaeger Query service and enforces user authentication before reaching +the Jaeger UI. This method can work with any valid provider including Keycloak, Azure, Google, GitHub, and more. + +Offical docs [here](https://oauth2-proxy.github.io/oauth2-proxy/docs/behaviour) + +Content of the `jaeger-values.yaml` file: + +```YAML +query: + enabled: true + oAuthSidecar: + enabled: true + image: quay.io/oauth2-proxy/oauth2-proxy:v7.3.0 + pullPolicy: IfNotPresent + containerPort: 4180 + args: + - --config + - /etc/oauth2-proxy/oauth2-proxy.cfg + - --client-secret + - "$(client-secret)" + extraEnv: + - name: client-secret + valueFrom: + secretKeyRef: + name: client-secret + key: client-secret-key + extraConfigmapMounts: [] + extraSecretMounts: [] + config: |- + provider = "oidc" + https_address = ":4180" + upstreams = ["http://localhost:16686"] + redirect_url = "https://jaeger-svc-domain/oauth2/callback" + client_id = "jaeger-query" + oidc_issuer_url = "https://keycloak-svc-domain/auth/realms/Default" + cookie_secure = "true" + cookie_secret = "" + email_domains = "*" + oidc_groups_claim = "groups" + user_id_claim = "preferred_username" + skip_provider_button = "true" +``` + +## Installing extra kubernetes objects + +If additional kubernetes objects need to be installed alongside this chart, set the `extraObjects` array to contain +the yaml describing these objects. The values in the array are treated as a template to allow the use of variable +substitution and function calls as in the example below. + +Content of the `jaeger-values.yaml` file: + +```YAML +extraObjects: + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: {{ .Release.Name }}-someRoleBinding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: someRole + subjects: + - kind: ServiceAccount + name: "{{ include \"jaeger.esLookback.serviceAccountName\" . }}" +``` diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/.helmignore new file mode 100644 index 0000000..5e03def --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/.helmignore @@ -0,0 +1,17 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/Chart.yaml new file mode 100644 index 0000000..ccf9eaf --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +appVersion: 3.11.6 +description: Apache Cassandra is a free and open-source distributed database management + system designed to handle large amounts of data across many commodity servers, providing + high availability with no single point of failure. +home: http://cassandra.apache.org +icon: https://upload.wikimedia.org/wikipedia/commons/thumb/5/5e/Cassandra_logo.svg/330px-Cassandra_logo.svg.png +keywords: +- cassandra +- database +- nosql +maintainers: +- email: goonohc@gmail.com + name: KongZ +- email: maor.friedman@redhat.com + name: maorfr +name: cassandra +version: 0.15.3 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/README.md new file mode 100644 index 0000000..5b6842c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/README.md @@ -0,0 +1,223 @@ +# ⚠️ Repo Archive Notice + +As of Nov 13, 2020, charts in this repo will no longer be updated. +For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/). + +# Cassandra +A Cassandra Chart for Kubernetes + +## Install Chart +To install the Cassandra Chart into your Kubernetes cluster (This Chart requires persistent volume by default, you may need to create a storage class before install chart. To create storage class, see [Persist data](#persist_data) section) + +```bash +helm install --namespace "cassandra" -n "cassandra" incubator/cassandra +``` + +After installation succeeds, you can get a status of Chart + +```bash +helm status "cassandra" +``` + +If you want to delete your Chart, use this command +```bash +helm delete --purge "cassandra" +``` + +## Upgrading + +To upgrade your Cassandra release, simply run + +```bash +helm upgrade "cassandra" incubator/cassandra +``` + +### 0.12.0 + +This version fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +Until this version, in order to upgrade, you have to delete the Cassandra StatefulSet before upgrading: +```bash +$ kubectl delete statefulset --cascade=false my-cassandra-release +``` + + +## Persist data +You need to create `StorageClass` before able to persist data in persistent volume. +To create a `StorageClass` on Google Cloud, run the following + +```bash +kubectl create -f sample/create-storage-gce.yaml +``` + +And set the following values in `values.yaml` + +```yaml +persistence: + enabled: true +``` + +If you want to create a `StorageClass` on other platform, please see documentation here [https://kubernetes.io/docs/user-guide/persistent-volumes/](https://kubernetes.io/docs/user-guide/persistent-volumes/) + +When running a cluster without persistence, the termination of a pod will first initiate a decommissioning of that pod. +Depending on the amount of data stored inside the cluster this may take a while. In order to complete a graceful +termination, pods need to get more time for it. Set the following values in `values.yaml`: + +```yaml +podSettings: + terminationGracePeriodSeconds: 1800 +``` + +## Install Chart with specific cluster size +By default, this Chart will create a cassandra with 3 nodes. If you want to change the cluster size during installation, you can use `--set config.cluster_size={value}` argument. Or edit `values.yaml` + +For example: +Set cluster size to 5 + +```bash +helm install --namespace "cassandra" -n "cassandra" --set config.cluster_size=5 incubator/cassandra/ +``` + +## Install Chart with specific resource size +By default, this Chart will create a cassandra with CPU 2 vCPU and 4Gi of memory which is suitable for development environment. +If you want to use this Chart for production, I would recommend to update the CPU to 4 vCPU and 16Gi. Also increase size of `max_heap_size` and `heap_new_size`. +To update the settings, edit `values.yaml` + +## Install Chart with specific node +Sometime you may need to deploy your cassandra to specific nodes to allocate resources. You can use node selector by edit `nodes.enabled=true` in `values.yaml` +For example, you have 6 vms in node pools and you want to deploy cassandra to node which labeled as `cloud.google.com/gke-nodepool: pool-db` + +Set the following values in `values.yaml` + +```yaml +nodes: + enabled: true + selector: + nodeSelector: + cloud.google.com/gke-nodepool: pool-db +``` + +## Configuration + +The following table lists the configurable parameters of the Cassandra chart and their default values. + +| Parameter | Description | Default | +| ----------------------- | --------------------------------------------- | ---------------------------------------------------------- | +| `image.repo` | `cassandra` image repository | `cassandra` | +| `image.tag` | `cassandra` image tag | `3.11.5` | +| `image.pullPolicy` | Image pull policy | `Always` if `imageTag` is `latest`, else `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets | `nil` | +| `config.cluster_domain` | The name of the cluster domain. | `cluster.local` | +| `config.cluster_name` | The name of the cluster. | `cassandra` | +| `config.cluster_size` | The number of nodes in the cluster. | `3` | +| `config.seed_size` | The number of seed nodes used to bootstrap new clients joining the cluster. | `2` | +| `config.seeds` | The comma-separated list of seed nodes. | Automatically generated according to `.Release.Name` and `config.seed_size` | +| `config.num_tokens` | Initdb Arguments | `256` | +| `config.dc_name` | Initdb Arguments | `DC1` | +| `config.rack_name` | Initdb Arguments | `RAC1` | +| `config.endpoint_snitch` | Initdb Arguments | `SimpleSnitch` | +| `config.max_heap_size` | Initdb Arguments | `2048M` | +| `config.heap_new_size` | Initdb Arguments | `512M` | +| `config.ports.cql` | Initdb Arguments | `9042` | +| `config.ports.thrift` | Initdb Arguments | `9160` | +| `config.ports.agent` | The port of the JVM Agent (if any) | `nil` | +| `config.start_rpc` | Initdb Arguments | `false` | +| `configOverrides` | Overrides config files in /etc/cassandra dir | `{}` | +| `commandOverrides` | Overrides default docker command | `[]` | +| `argsOverrides` | Overrides default docker args | `[]` | +| `env` | Custom env variables | `{}` | +| `schedulerName` | Name of k8s scheduler (other than the default) | `nil` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `10Gi` | +| `resources` | CPU/Memory resource requests/limits | Memory: `4Gi`, CPU: `2` | +| `service.type` | k8s service type exposing ports, e.g. `NodePort`| `ClusterIP` | +| `service.annotations` | Annotations to apply to cassandra service | `""` | +| `podManagementPolicy` | podManagementPolicy of the StatefulSet | `OrderedReady` | +| `podDisruptionBudget` | Pod distruption budget | `{}` | +| `podAnnotations` | pod annotations for the StatefulSet | `{}` | +| `updateStrategy.type` | UpdateStrategy of the StatefulSet | `OnDelete` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `90` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `30` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `90` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `30` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` | +| `readinessProbe.address` | Address to use for checking node has joined the cluster and is ready. | `${POD_IP}` | +| `rbac.create` | Specifies whether RBAC resources should be created | `true` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use | | +| `backup.enabled` | Enable backup on chart installation | `false` | +| `backup.schedule` | Keyspaces to backup, each with cron time | | +| `backup.annotations` | Backup pod annotations | iam.amazonaws.com/role: `cain` | +| `backup.image.repository` | Backup image repository | `maorfr/cain` | +| `backup.image.tag` | Backup image tag | `0.6.0` | +| `backup.extraArgs` | Additional arguments for cain | `[]` | +| `backup.env` | Backup environment variables | AWS_REGION: `us-east-1` | +| `backup.resources` | Backup CPU/Memory resource requests/limits | Memory: `1Gi`, CPU: `1` | +| `backup.destination` | Destination to store backup artifacts | `s3://bucket/cassandra` | +| `backup.google.serviceAccountSecret` | Secret containing credentials if GCS is used as destination | | +| `exporter.enabled` | Enable Cassandra exporter | `false` | +| `exporter.servicemonitor.enabled` | Enable ServiceMonitor for exporter | `true` | +| `exporter.servicemonitor.additionalLabels`| Additional labels for Service Monitor | `{}` | +| `exporter.image.repo` | Exporter image repository | `criteord/cassandra_exporter` | +| `exporter.image.tag` | Exporter image tag | `2.0.2` | +| `exporter.port` | Exporter port | `5556` | +| `exporter.jvmOpts` | Exporter additional JVM options | | +| `exporter.resources` | Exporter CPU/Memory resource requests/limits | `{}` | +| `extraContainers` | Sidecar containers for the pods | `[]` | +| `extraVolumes` | Additional volumes for the pods | `[]` | +| `extraVolumeMounts` | Extra volume mounts for the pods | `[]` | +| `affinity` | Kubernetes node affinity | `{}` | +| `tolerations` | Kubernetes node tolerations | `[]` | + + +## Scale cassandra +When you want to change the cluster size of your cassandra, you can use the helm upgrade command. + +```bash +helm upgrade --set config.cluster_size=5 cassandra incubator/cassandra +``` + +## Get cassandra status +You can get your cassandra cluster status by running the command + +```bash +kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') nodetool status +``` + +Output +```bash +Datacenter: asia-east1 +====================== +Status=Up/Down +|/ State=Normal/Leaving/Joining/Moving +-- Address Load Tokens Owns (effective) Host ID Rack +UN 10.8.1.11 108.45 KiB 256 66.1% 410cc9da-8993-4dc2-9026-1dd381874c54 a +UN 10.8.4.12 84.08 KiB 256 68.7% 96e159e1-ef94-406e-a0be-e58fbd32a830 c +UN 10.8.3.6 103.07 KiB 256 65.2% 1a42b953-8728-4139-b070-b855b8fff326 b +``` + +## Benchmark +You can use [cassandra-stress](https://docs.datastax.com/en/cassandra/3.0/cassandra/tools/toolsCStress.html) tool to run the benchmark on the cluster by the following command + +```bash +kubectl exec -it --namespace cassandra $(kubectl get pods --namespace cassandra -l app=cassandra-cassandra -o jsonpath='{.items[0].metadata.name}') cassandra-stress +``` + +Example of `cassandra-stress` argument + - Run both read and write with ration 9:1 + - Operator total 1 million keys with uniform distribution + - Use QUORUM for read/write + - Generate 50 threads + - Generate result in graph + - Use NetworkTopologyStrategy with replica factor 2 + +```bash +cassandra-stress mixed ratio\(write=1,read=9\) n=1000000 cl=QUORUM -pop dist=UNIFORM\(1..1000000\) -mode native cql3 -rate threads=50 -log file=~/mixed_autorate_r9w1_1M.log -graph file=test2.html title=test revision=test2 -schema "replication(strategy=NetworkTopologyStrategy, factor=2)" +``` diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/sample/create-storage-gce.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/sample/create-storage-gce.yaml new file mode 100644 index 0000000..2467b95 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/sample/create-storage-gce.yaml @@ -0,0 +1,7 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: generic +provisioner: kubernetes.io/gce-pd +parameters: + type: pd-ssd diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/NOTES.txt b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/NOTES.txt new file mode 100644 index 0000000..9ecb004 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/NOTES.txt @@ -0,0 +1,35 @@ +Cassandra CQL can be accessed via port {{ .Values.config.ports.cql }} on the following DNS name from within your cluster: +Cassandra Thrift can be accessed via port {{ .Values.config.ports.thrift }} on the following DNS name from within your cluster: + +If you want to connect to the remote instance with your local Cassandra CQL cli. To forward the API port to localhost:9042 run the following: +- kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }') 9042:{{ .Values.config.ports.cql }} + +If you want to connect to the Cassandra CQL run the following: +{{- if contains "NodePort" .Values.service.type }} +- export CQL_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "cassandra.fullname" . }}) +- export CQL_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") +- cqlsh $CQL_HOST $CQL_PORT + +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "cassandra.fullname" . }}' +- export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "cassandra.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +- echo cqlsh $SERVICE_IP +{{- else if contains "ClusterIP" .Values.service.type }} +- kubectl port-forward --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "cassandra.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 9042:{{ .Values.config.ports.cql }} + echo cqlsh 127.0.0.1 9042 +{{- end }} + +You can also see the cluster status by run the following: +- kubectl exec -it --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{.items[0].metadata.name}') nodetool status + +To tail the logs for the Cassandra pod run the following: +- kubectl logs -f --namespace {{ .Release.Namespace }} $(kubectl get pods --namespace {{ .Release.Namespace }} -l app={{ template "cassandra.name" . }},release={{ .Release.Name }} -o jsonpath='{ .items[0].metadata.name }') + +{{- if not .Values.persistence.enabled }} + +Note that the cluster is running with node-local storage instead of PersistentVolumes. In order to prevent data loss, +pods will be decommissioned upon termination. Decommissioning may take some time, so you might also want to adjust the +pod termination gace period, which is currently set to {{ .Values.podSettings.terminationGracePeriodSeconds }} seconds. + +{{- end}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/_helpers.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/_helpers.tpl new file mode 100644 index 0000000..b870420 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/_helpers.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "cassandra.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "cassandra.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "cassandra.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "cassandra.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "cassandra.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/backup/cronjob.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/backup/cronjob.yaml new file mode 100644 index 0000000..3ee3210 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/backup/cronjob.yaml @@ -0,0 +1,90 @@ +{{- if .Values.backup.enabled }} +{{- $release := .Release }} +{{- $values := .Values }} +{{- $backup := $values.backup }} +{{- range $index, $schedule := $backup.schedule }} +--- +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: {{ template "cassandra.fullname" $ }}-backup-{{ $schedule.keyspace | replace "_" "-" }} + labels: + app: {{ template "cassandra.name" $ }}-cain + chart: {{ template "cassandra.chart" $ }} + release: "{{ $release.Name }}" + heritage: "{{ $release.Service }}" +spec: + schedule: {{ $schedule.cron | quote }} + concurrencyPolicy: Forbid + startingDeadlineSeconds: 120 + jobTemplate: + spec: + template: + metadata: + annotations: + {{ toYaml $backup.annotations }} + spec: + restartPolicy: OnFailure + serviceAccountName: {{ template "cassandra.serviceAccountName" $ }} + containers: + - name: cassandra-backup + image: "{{ $backup.image.repository }}:{{ $backup.image.tag }}" + command: ["cain"] + args: + - backup + - --namespace + - {{ $release.Namespace }} + - --selector + - release={{ $release.Name }},app={{ template "cassandra.name" $ }} + - --keyspace + - {{ $schedule.keyspace }} + - --dst + - {{ $backup.destination }} + {{- with $backup.extraArgs }} +{{ toYaml . | indent 12 }} + {{- end }} + env: +{{- if $backup.google.serviceAccountSecret }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/etc/secrets/google/credentials.json" +{{- end }} + {{- with $backup.env }} +{{ toYaml . | indent 12 }} + {{- end }} + {{- with $backup.resources }} + resources: +{{ toYaml . | indent 14 }} + {{- end }} +{{- if $backup.google.serviceAccountSecret }} + volumeMounts: + - name: google-service-account + mountPath: /etc/secrets/google/ +{{- end }} +{{- if $backup.google.serviceAccountSecret }} + volumes: + - name: google-service-account + secret: + secretName: {{ $backup.google.serviceAccountSecret | quote }} +{{- end }} + affinity: + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - {{ template "cassandra.fullname" $ }} + - key: release + operator: In + values: + - {{ $release.Name }} + topologyKey: "kubernetes.io/hostname" + {{- with $values.tolerations }} + tolerations: +{{ toYaml . | indent 12 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/backup/rbac.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/backup/rbac.yaml new file mode 100644 index 0000000..12b0f27 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/backup/rbac.yaml @@ -0,0 +1,50 @@ +{{- if .Values.backup.enabled }} +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "cassandra.serviceAccountName" . }} + labels: + app: {{ template "cassandra.name" . }} + chart: {{ template "cassandra.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +--- +{{- end }} +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "cassandra.fullname" . }}-backup + labels: + app: {{ template "cassandra.name" . }} + chart: {{ template "cassandra.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +rules: +- apiGroups: [""] + resources: ["pods", "pods/log"] + verbs: ["get", "list"] +- apiGroups: [""] + resources: ["pods/exec"] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "cassandra.fullname" . }}-backup + labels: + app: {{ template "cassandra.name" . }} + chart: {{ template "cassandra.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "cassandra.fullname" . }}-backup +subjects: +- kind: ServiceAccount + name: {{ template "cassandra.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/configmap.yaml new file mode 100644 index 0000000..4e5ab76 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.configOverrides }} +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "cassandra.name" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "cassandra.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: +{{ toYaml .Values.configOverrides | indent 2 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/pdb.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/pdb.yaml new file mode 100644 index 0000000..2e539bd --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/pdb.yaml @@ -0,0 +1,17 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + app: {{ template "cassandra.name" . }} + chart: {{ .Chart.Name }}-{{ .Chart.Version }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + name: {{ template "cassandra.fullname" . }} +spec: + selector: + matchLabels: + app: {{ template "cassandra.name" . }} + release: {{ .Release.Name }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/service.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/service.yaml new file mode 100644 index 0000000..da0a84e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/service.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "cassandra.fullname" . }} + labels: + app: {{ template "cassandra.name" . }} + chart: {{ template "cassandra.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + type: {{ .Values.service.type }} + ports: + {{- if .Values.exporter.enabled }} + - name: metrics + port: 5556 + targetPort: {{ .Values.exporter.port }} + {{- end }} + - name: intra + port: 7000 + targetPort: 7000 + - name: tls + port: 7001 + targetPort: 7001 + - name: jmx + port: 7199 + targetPort: 7199 + - name: cql + port: {{ default 9042 .Values.config.ports.cql }} + targetPort: {{ default 9042 .Values.config.ports.cql }} + - name: thrift + port: {{ default 9160 .Values.config.ports.thrift }} + targetPort: {{ default 9160 .Values.config.ports.thrift }} + {{- if .Values.config.ports.agent }} + - name: agent + port: {{ .Values.config.ports.agent }} + targetPort: {{ .Values.config.ports.agent }} + {{- end }} + selector: + app: {{ template "cassandra.name" . }} + release: {{ .Release.Name }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/servicemonitor.yaml new file mode 100644 index 0000000..cf9408b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/servicemonitor.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.exporter.enabled .Values.exporter.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "cassandra.fullname" . }} + labels: + app: {{ template "cassandra.name" . }} + chart: {{ template "cassandra.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.exporter.serviceMonitor.additionalLabels }} +{{ toYaml .Values.exporter.serviceMonitor.additionalLabels | indent 4 }} + {{- end }} +spec: + jobLabel: {{ template "cassandra.name" . }} + endpoints: + - port: metrics + interval: 10s + selector: + matchLabels: + app: {{ template "cassandra.name" . }} + namespaceSelector: + any: true +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/statefulset.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/statefulset.yaml new file mode 100644 index 0000000..adaab35 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/templates/statefulset.yaml @@ -0,0 +1,229 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "cassandra.fullname" . }} + labels: + app: {{ template "cassandra.name" . }} + chart: {{ template "cassandra.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + selector: + matchLabels: + app: {{ template "cassandra.name" . }} + release: {{ .Release.Name }} + serviceName: {{ template "cassandra.fullname" . }} + replicas: {{ .Values.config.cluster_size }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + template: + metadata: + labels: + app: {{ template "cassandra.name" . }} + release: {{ .Release.Name }} +{{- if .Values.podLabels }} +{{ toYaml .Values.podLabels | indent 8 }} +{{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + hostNetwork: {{ .Values.hostNetwork }} +{{- if .Values.selector }} +{{ toYaml .Values.selector | indent 6 }} +{{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} +{{- if .Values.tolerations }} + tolerations: +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} +{{- if .Values.configOverrides }} + initContainers: + - name: config-copier + image: busybox + command: [ 'sh', '-c', 'cp /configmap-files/* /cassandra-configs/ && chown 999:999 /cassandra-configs/*'] + volumeMounts: +{{- range $key, $value := .Values.configOverrides }} + - name: cassandra-config-{{ $key | replace "." "-" | replace "_" "--" }} + mountPath: /configmap-files/{{ $key }} + subPath: {{ $key }} +{{- end }} + - name: cassandra-configs + mountPath: /cassandra-configs/ +{{- end }} + containers: +{{- if .Values.extraContainers }} +{{ tpl (toYaml .Values.extraContainers) . | indent 6}} +{{- end }} +{{- if .Values.exporter.enabled }} + - name: cassandra-exporter + image: "{{ .Values.exporter.image.repo }}:{{ .Values.exporter.image.tag }}" + resources: +{{ toYaml .Values.exporter.resources | indent 10 }} + env: + - name: CASSANDRA_EXPORTER_CONFIG_listenPort + value: {{ .Values.exporter.port | quote }} + - name: JVM_OPTS + value: {{ .Values.exporter.jvmOpts | quote }} + ports: + - name: metrics + containerPort: {{ .Values.exporter.port }} + protocol: TCP + - name: jmx + containerPort: 5555 + livenessProbe: + tcpSocket: + port: {{ .Values.exporter.port }} + readinessProbe: + httpGet: + path: /metrics + port: {{ .Values.exporter.port }} + initialDelaySeconds: 20 + timeoutSeconds: 45 +{{- end }} + - name: {{ template "cassandra.fullname" . }} + image: "{{ .Values.image.repo }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} +{{- if .Values.commandOverrides }} + command: {{ .Values.commandOverrides }} +{{- end }} +{{- if .Values.argsOverrides }} + args: {{ .Values.argsOverrides }} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + env: + {{- $seed_size := default 1 .Values.config.seed_size | int -}} + {{- $global := . }} + - name: CASSANDRA_SEEDS + {{- if .Values.hostNetwork }} + value: {{ required "You must fill \".Values.config.seeds\" with list of Cassandra seeds when hostNetwork is set to true" .Values.config.seeds | quote }} + {{- else }} + value: "{{- range $i, $e := until $seed_size }}{{ template "cassandra.fullname" $global }}-{{ $i }}.{{ template "cassandra.fullname" $global }}.{{ $global.Release.Namespace }}.svc.{{ $global.Values.config.cluster_domain }}{{- if (lt ( add1 $i ) $seed_size ) }},{{- end }}{{- end }}" + {{- end }} + - name: MAX_HEAP_SIZE + value: {{ default "8192M" .Values.config.max_heap_size | quote }} + - name: HEAP_NEWSIZE + value: {{ default "200M" .Values.config.heap_new_size | quote }} + - name: CASSANDRA_ENDPOINT_SNITCH + value: {{ default "SimpleSnitch" .Values.config.endpoint_snitch | quote }} + - name: CASSANDRA_CLUSTER_NAME + value: {{ default "Cassandra" .Values.config.cluster_name | quote }} + - name: CASSANDRA_DC + value: {{ default "DC1" .Values.config.dc_name | quote }} + - name: CASSANDRA_RACK + value: {{ default "RAC1" .Values.config.rack_name | quote }} + - name: CASSANDRA_START_RPC + value: {{ default "false" .Values.config.start_rpc | quote }} + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- range $key, $value := .Values.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + livenessProbe: + exec: + command: [ "/bin/sh", "-c", "nodetool status" ] + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + readinessProbe: + exec: + command: [ "/bin/sh", "-c", "nodetool status | grep -E \"^UN\\s+{{ .Values.readinessProbe.address }}\"" ] + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + ports: + - name: intra + containerPort: 7000 + - name: tls + containerPort: 7001 + - name: jmx + containerPort: 7199 + - name: cql + containerPort: {{ default 9042 .Values.config.ports.cql }} + - name: thrift + containerPort: {{ default 9160 .Values.config.ports.thrift }} + {{- if .Values.config.ports.agent }} + - name: agent + containerPort: {{ .Values.config.ports.agent }} + {{- end }} + volumeMounts: + - name: data + mountPath: /var/lib/cassandra + {{- if .Values.configOverrides }} + - name: cassandra-configs + mountPath: /etc/cassandra + {{- end }} + {{- if .Values.extraVolumeMounts }} +{{ toYaml .Values.extraVolumeMounts | indent 8 }} + {{- end }} + {{- if not .Values.persistence.enabled }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "exec nodetool decommission"] + {{- end }} + terminationGracePeriodSeconds: {{ default 30 .Values.podSettings.terminationGracePeriodSeconds }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + - name: {{ .Values.image.pullSecrets }} + {{- end }} +{{- if or .Values.extraVolumes ( or .Values.configOverrides (not .Values.persistence.enabled) ) }} + volumes: +{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 6 }} +{{- end }} +{{- range $key, $value := .Values.configOverrides }} + - configMap: + name: cassandra + name: cassandra-config-{{ $key | replace "." "-" | replace "_" "--" }} +{{- end }} +{{- if .Values.configOverrides }} + - name: cassandra-configs + emptyDir: {} +{{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: + app: {{ template "cassandra.name" . }} + release: {{ .Release.Name }} + spec: + accessModes: + - {{ .Values.persistence.accessMode | quote }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClass }} + {{- if (eq "-" .Values.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/values.yaml new file mode 100644 index 0000000..c993e77 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/cassandra/values.yaml @@ -0,0 +1,254 @@ +## Cassandra image version +## ref: https://hub.docker.com/r/library/cassandra/ +image: + repo: cassandra + tag: 3.11.6 + pullPolicy: IfNotPresent + ## Specify ImagePullSecrets for Pods + ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + # pullSecrets: myregistrykey + +## Specify a service type +## ref: http://kubernetes.io/docs/user-guide/services/ +service: + type: ClusterIP + annotations: "" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Persist data to a persistent volume +persistence: + enabled: true + ## cassandra data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessMode: ReadWriteOnce + size: 10Gi + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## Minimum memory for development is 4GB and 2 CPU cores +## Minimum memory for production is 8GB and 4 CPU cores +## ref: http://docs.datastax.com/en/archived/cassandra/2.0/cassandra/architecture/architecturePlanningHardware_c.html +resources: {} + # requests: + # memory: 4Gi + # cpu: 2 + # limits: + # memory: 4Gi + # cpu: 2 + +## Change cassandra configuration parameters below: +## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/configuration/configCassandra_yaml.html +## Recommended max heap size is 1/2 of system memory +## Recommended heap new size is 1/4 of max heap size +## ref: http://docs.datastax.com/en/cassandra/3.0/cassandra/operations/opsTuneJVM.html +config: + cluster_domain: cluster.local + cluster_name: cassandra + cluster_size: 3 + seed_size: 2 + num_tokens: 256 + # If you want Cassandra to use this datacenter and rack name, + # you need to set endpoint_snitch to GossipingPropertyFileSnitch. + # Otherwise, these values are ignored and datacenter1 and rack1 + # are used. + dc_name: DC1 + rack_name: RAC1 + endpoint_snitch: SimpleSnitch + max_heap_size: 2048M + heap_new_size: 512M + start_rpc: false + ports: + cql: 9042 + thrift: 9160 + # If a JVM Agent is in place + # agent: 61621 + +## Cassandra config files overrides +configOverrides: {} + +## Cassandra docker command overrides +commandOverrides: [] + +## Cassandra docker args overrides +argsOverrides: [] + +## Custom env variables. +## ref: https://hub.docker.com/_/cassandra/ +env: {} + +## Liveness and Readiness probe values. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +livenessProbe: + initialDelaySeconds: 90 + periodSeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 +readinessProbe: + initialDelaySeconds: 90 + periodSeconds: 30 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + address: "${POD_IP}" + +## Configure node selector. Edit code below for adding selector to pods +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# selector: + # nodeSelector: + # cloud.google.com/gke-nodepool: pool-db + +## Additional pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Additional pod labels +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Additional pod-level settings +podSettings: + # Change this to give pods more time to properly leave the cluster when not using persistent storage. + terminationGracePeriodSeconds: 30 + +## Pod distruption budget +podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 2 + +podManagementPolicy: OrderedReady +updateStrategy: + type: OnDelete + +## Pod Security Context +securityContext: + enabled: false + fsGroup: 999 + runAsUser: 999 + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +affinity: {} + +## Node tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# Use host network for Cassandra pods +# You must pass seed list into config.seeds property if set to true +hostNetwork: false + +## Backup cronjob configuration +## Ref: https://github.com/maorfr/cain +backup: + enabled: false + + # Schedule to run jobs. Must be in cron time format + # Ref: https://crontab.guru/ + schedule: + - keyspace: keyspace1 + cron: "0 7 * * *" + - keyspace: keyspace2 + cron: "30 7 * * *" + + annotations: + # Example for authorization to AWS S3 using kube2iam + # Can also be done using environment variables + iam.amazonaws.com/role: cain + + image: + repository: maorfr/cain + tag: 0.6.0 + + # Additional arguments for cain + # Ref: https://github.com/maorfr/cain#usage + extraArgs: [] + + # Add additional environment variables + env: + # Example environment variable required for AWS credentials chain + - name: AWS_REGION + value: us-east-1 + + resources: + requests: + memory: 1Gi + cpu: 1 + limits: + memory: 1Gi + cpu: 1 + + # Name of the secret containing the credentials of the service account used by GOOGLE_APPLICATION_CREDENTIALS, as a credentials.json file + # google: + # serviceAccountSecret: + + # Destination to store the backup artifacts + # Supported cloud storage services: AWS S3, Minio S3, Azure Blob Storage, Google Cloud Storage + # Additional support can added. Visit this repository for details + # Ref: https://github.com/maorfr/skbn + destination: s3://bucket/cassandra + +## Cassandra exported configuration +## ref: https://github.com/criteo/cassandra_exporter +exporter: + enabled: false + serviceMonitor: + enabled: false + additionalLabels: {} + # prometheus: default + image: + repo: criteord/cassandra_exporter + tag: 2.0.2 + port: 5556 + jvmOpts: "" + resources: {} + # limits: + # cpu: 1 + # memory: 1Gi + # requests: + # cpu: 1 + # memory: 1Gi + +extraVolumes: [] +extraVolumeMounts: [] +# extraVolumes and extraVolumeMounts allows you to mount other volumes +# Example Use Case: mount ssl certificates +# extraVolumes: +# - name: cas-certs +# secret: +# defaultMode: 420 +# secretName: cas-certs +# extraVolumeMounts: +# - name: cas-certs +# mountPath: /certs +# readOnly: true + +extraContainers: [] +## Additional containers to be added +# extraContainers: +# - name: cassandra-sidecar +# image: cassandra-sidecar:latest +# volumeMounts: +# - name: some-mount +# mountPath: /some/path diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/Chart.yaml new file mode 100644 index 0000000..bd152e3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.16.0 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 1.16.0 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/README.md new file mode 100644 index 0000000..3b5e09c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/README.md @@ -0,0 +1,350 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_affinities.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..189ea40 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_affinities.tpl @@ -0,0 +1,102 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_capabilities.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_errors.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_images.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_images.tpl new file mode 100644 index 0000000..42ffbc7 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_images.tpl @@ -0,0 +1,75 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_ingress.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..8caf73a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_labels.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_names.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_secrets.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..a53fb44 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_secrets.tpl @@ -0,0 +1,140 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_storage.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_tplvalues.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_utils.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_warnings.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_cassandra.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mariadb.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mongodb.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mysql.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_postgresql.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_redis.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_validations.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/.helmignore new file mode 100644 index 0000000..e12c0b4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/.helmignore @@ -0,0 +1,2 @@ +tests/ +.pytest_cache/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/Chart.yaml new file mode 100644 index 0000000..0b417cb --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +appVersion: 7.17.3 +description: Official Elastic helm chart for Elasticsearch +home: https://github.com/elastic/helm-charts +icon: https://helm.elastic.co/icons/elasticsearch.png +maintainers: +- email: helm-charts@elastic.co + name: Elastic +name: elasticsearch +sources: +- https://github.com/elastic/elasticsearch +version: 7.17.3 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/Makefile new file mode 100644 index 0000000..22218a1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/Makefile @@ -0,0 +1 @@ +include ../helpers/common.mk diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/README.md new file mode 100644 index 0000000..6e4fd02 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/README.md @@ -0,0 +1,459 @@ +# Elasticsearch Helm Chart + +[![Build Status](https://img.shields.io/jenkins/s/https/devops-ci.elastic.co/job/elastic+helm-charts+master.svg)](https://devops-ci.elastic.co/job/elastic+helm-charts+master/) [![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/elastic)](https://artifacthub.io/packages/search?repo=elastic) + +This Helm chart is a lightweight way to configure and run our official +[Elasticsearch Docker image][]. + + + + + + + +- [Requirements](#requirements) +- [Installing](#installing) + - [Install released version using Helm repository](#install-released-version-using-helm-repository) + - [Install development version from a branch](#install-development-version-from-a-branch) +- [Upgrading](#upgrading) +- [Usage notes](#usage-notes) +- [Configuration](#configuration) + - [Deprecated](#deprecated) +- [FAQ](#faq) + - [How to deploy this chart on a specific K8S distribution?](#how-to-deploy-this-chart-on-a-specific-k8s-distribution) + - [How to deploy dedicated nodes types?](#how-to-deploy-dedicated-nodes-types) + - [Clustering and Node Discovery](#clustering-and-node-discovery) + - [How to deploy clusters with security (authentication and TLS) enabled?](#how-to-deploy-clusters-with-security-authentication-and-tls-enabled) + - [How to migrate from helm/charts stable chart?](#how-to-migrate-from-helmcharts-stable-chart) + - [How to install plugins?](#how-to-install-plugins) + - [How to use the keystore?](#how-to-use-the-keystore) + - [Basic example](#basic-example) + - [Multiple keys](#multiple-keys) + - [Custom paths and keys](#custom-paths-and-keys) + - [How to enable snapshotting?](#how-to-enable-snapshotting) + - [How to configure templates post-deployment?](#how-to-configure-templates-post-deployment) +- [Contributing](#contributing) + + + + + + +## Requirements + +* Kubernetes >= 1.14 +* [Helm][] >= 2.17.0 +* Minimum cluster requirements include the following to run this chart with +default settings. All of these settings are configurable. + * Three Kubernetes nodes to respect the default "hard" affinity settings + * 1GB of RAM for the JVM heap + +See [supported configurations][] for more details. + +## Installing + +This chart is tested with the latest 7.17.3 version. + +### Install released version using Helm repository + +* Add the Elastic Helm charts repo: +`helm repo add elastic https://helm.elastic.co` + +* Install it: + - with Helm 3: `helm install elasticsearch --version elastic/elasticsearch` + - with Helm 2 (deprecated): `helm install --name elasticsearch --version elastic/elasticsearch` + +### Install development version from a branch + +* Clone the git repo: `git clone git@github.com:elastic/helm-charts.git` + +* Checkout the branch : `git checkout 7.17` + +* Install it: + - with Helm 3: `helm install elasticsearch ./helm-charts/elasticsearch --set imageTag=7.17.3` + - with Helm 2 (deprecated): `helm install --name elasticsearch ./helm-charts/elasticsearch --set imageTag=7.17.3` + + +## Upgrading + +Please always check [CHANGELOG.md][] and [BREAKING_CHANGES.md][] before +upgrading to a new chart version. + + +## Usage notes + +* This repo includes a number of [examples][] configurations which can be used +as a reference. They are also used in the automated testing of this chart. +* Automated testing of this chart is currently only run against GKE (Google +Kubernetes Engine). +* The chart deploys a StatefulSet and by default will do an automated rolling +update of your cluster. It does this by waiting for the cluster health to become +green after each instance is updated. If you prefer to update manually you can +set `OnDelete` [updateStrategy][]. +* It is important to verify that the JVM heap size in `esJavaOpts` and to set +the CPU/Memory `resources` to something suitable for your cluster. +* To simplify chart and maintenance each set of node groups is deployed as a +separate Helm release. Take a look at the [multi][] example to get an idea for +how this works. Without doing this it isn't possible to resize persistent +volumes in a StatefulSet. By setting it up this way it makes it possible to add +more nodes with a new storage size then drain the old ones. It also solves the +problem of allowing the user to determine which node groups to update first when +doing upgrades or changes. +* We have designed this chart to be very un-opinionated about how to configure +Elasticsearch. It exposes ways to set environment variables and mount secrets +inside of the container. Doing this makes it much easier for this chart to +support multiple versions with minimal changes. + + +## Configuration + +| Parameter | Description | Default | +|------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------| +| `antiAffinityTopologyKey` | The [anti-affinity][] topology key. By default this will prevent multiple Elasticsearch nodes from running on the same Kubernetes node | `kubernetes.io/hostname` | +| `antiAffinity` | Setting this to hard enforces the [anti-affinity][] rules. If it is set to soft it will be done "best effort". Other values will be ignored | `hard` | +| `clusterHealthCheckParams` | The [Elasticsearch cluster health status params][] that will be used by readiness [probe][] command | `wait_for_status=green&timeout=1s` | +| `clusterName` | This will be used as the Elasticsearch [cluster.name][] and should be unique per cluster in the namespace | `elasticsearch` | +| `clusterDeprecationIndexing` | Enable or disable deprecation logs to be indexed (should be disabled when deploying master only node groups) | `false` | +| `enableServiceLinks` | Set to false to disabling service links, which can cause slow pod startup times when there are many services in the current namespace. | `true` | +| `envFrom` | Templatable string to be passed to the [environment from variables][] which will be appended to the `envFrom:` definition for the container | `[]` | +| `esConfig` | Allows you to add any config files in `/usr/share/elasticsearch/config/` such as `elasticsearch.yml` and `log4j2.properties`. See [values.yaml][] for an example of the formatting | `{}` | +| `esJavaOpts` | [Java options][] for Elasticsearch. This is where you could configure the [jvm heap size][] | `""` | +| `esJvmOptions` | [Java options][] for Elasticsearch. Override the default JVM options by adding custom options files . See [values.yaml][] for an example of the formatting | `{}` | +| `esMajorVersion` | Deprecated. Instead, use the version of the chart corresponding to your ES minor version. Used to set major version specific configuration. If you are using a custom image and not running the default Elasticsearch version you will need to set this to the version you are running (e.g. `esMajorVersion: 6`) | `""` | +| `extraContainers` | Templatable string of additional `containers` to be passed to the `tpl` function | `""` | +| `extraEnvs` | Extra [environment variables][] which will be appended to the `env:` definition for the container | `[]` | +| `extraInitContainers` | Templatable string of additional `initContainers` to be passed to the `tpl` function | `""` | +| `extraVolumeMounts` | Templatable string of additional `volumeMounts` to be passed to the `tpl` function | `""` | +| `extraVolumes` | Templatable string of additional `volumes` to be passed to the `tpl` function | `""` | +| `fullnameOverride` | Overrides the `clusterName` and `nodeGroup` when used in the naming of resources. This should only be used when using a single `nodeGroup`, otherwise you will have name conflicts | `""` | +| `healthNameOverride` | Overrides `test-elasticsearch-health` pod name | `""` | +| `hostAliases` | Configurable [hostAliases][] | `[]` | +| `httpPort` | The http port that Kubernetes will use for the healthchecks and the service. If you change this you will also need to set [http.port][] in `extraEnvs` | `9200` | +| `imagePullPolicy` | The Kubernetes [imagePullPolicy][] value | `IfNotPresent` | +| `imagePullSecrets` | Configuration for [imagePullSecrets][] so that you can use a private registry for your image | `[]` | +| `imageTag` | The Elasticsearch Docker image tag | `7.17.3` | +| `image` | The Elasticsearch Docker image | `docker.elastic.co/elasticsearch/elasticsearch` | +| `ingress` | Configurable [ingress][] to expose the Elasticsearch service. See [values.yaml][] for an example | see [values.yaml][] | +| `initResources` | Allows you to set the [resources][] for the `initContainer` in the StatefulSet | `{}` | +| `keystore` | Allows you map Kubernetes secrets into the keystore. See the [config example][] and [how to use the keystore][] | `[]` | +| `labels` | Configurable [labels][] applied to all Elasticsearch pods | `{}` | +| `lifecycle` | Allows you to add [lifecycle hooks][]. See [values.yaml][] for an example of the formatting | `{}` | +| `masterService` | The service name used to connect to the masters. You only need to set this if your master `nodeGroup` is set to something other than `master`. See [Clustering and Node Discovery][] for more information | `""` | +| `maxUnavailable` | The [maxUnavailable][] value for the pod disruption budget. By default this will prevent Kubernetes from having more than 1 unhealthy pod in the node group | `1` | +| `minimumMasterNodes` | The value for [discovery.zen.minimum_master_nodes][]. Should be set to `(master_eligible_nodes / 2) + 1`. Ignored in Elasticsearch versions >= 7 | `2` | +| `nameOverride` | Overrides the `clusterName` when used in the naming of resources | `""` | +| `networkHost` | Value for the [network.host Elasticsearch setting][] | `0.0.0.0` | +| `networkPolicy` | The [NetworkPolicy](https://kubernetes.io/docs/concepts/services-networking/network-policies/) to set. See [`values.yaml`](./values.yaml) for an example | `{http.enabled: false,transport.enabled: false}` | +| `nodeAffinity` | Value for the [node affinity settings][] | `{}` | +| `nodeGroup` | This is the name that will be used for each group of nodes in the cluster. The name will be `clusterName-nodeGroup-X` , `nameOverride-nodeGroup-X` if a `nameOverride` is specified, and `fullnameOverride-X` if a `fullnameOverride` is specified | `master` | +| `nodeSelector` | Configurable [nodeSelector][] so that you can target specific nodes for your Elasticsearch cluster | `{}` | +| `persistence` | Enables a persistent volume for Elasticsearch data. Can be disabled for nodes that only have [roles][] which don't require persistent data | see [values.yaml][] | +| `podAnnotations` | Configurable [annotations][] applied to all Elasticsearch pods | `{}` | +| `podManagementPolicy` | By default Kubernetes [deploys StatefulSets serially][]. This deploys them in parallel so that they can discover each other | `Parallel` | +| `podSecurityContext` | Allows you to set the [securityContext][] for the pod | see [values.yaml][] | +| `podSecurityPolicy` | Configuration for create a pod security policy with minimal permissions to run this Helm chart with `create: true`. Also can be used to reference an external pod security policy with `name: "externalPodSecurityPolicy"` | see [values.yaml][] | +| `priorityClassName` | The name of the [PriorityClass][]. No default is supplied as the PriorityClass must be created first | `""` | +| `protocol` | The protocol that will be used for the readiness [probe][]. Change this to `https` if you have `xpack.security.http.ssl.enabled` set | `http` | +| `rbac` | Configuration for creating a role, role binding and ServiceAccount as part of this Helm chart with `create: true`. Also can be used to reference an external ServiceAccount with `serviceAccountName: "externalServiceAccountName"`, or automount the service account token | see [values.yaml][] | +| `readinessProbe` | Configuration fields for the readiness [probe][] | see [values.yaml][] | +| `replicas` | Kubernetes replica count for the StatefulSet (i.e. how many pods) | `3` | +| `resources` | Allows you to set the [resources][] for the StatefulSet | see [values.yaml][] | +| `roles` | A hash map with the specific [roles][] for the `nodeGroup` | see [values.yaml][] | +| `schedulerName` | Name of the [alternate scheduler][] | `""` | +| `secretMounts` | Allows you easily mount a secret as a file inside the StatefulSet. Useful for mounting certificates and other secrets. See [values.yaml][] for an example | `[]` | +| `securityContext` | Allows you to set the [securityContext][] for the container | see [values.yaml][] | +| `service.annotations` | [LoadBalancer annotations][] that Kubernetes will use for the service. This will configure load balancer if `service.type` is `LoadBalancer` | `{}` | +| `service.enabled` | Enable non-headless service | `true` | +| `service.externalTrafficPolicy` | Some cloud providers allow you to specify the [LoadBalancer externalTrafficPolicy][]. Kubernetes will use this to preserve the client source IP. This will configure load balancer if `service.type` is `LoadBalancer` | `""` | +| `service.httpPortName` | The name of the http port within the service | `http` | +| `service.labelsHeadless` | Labels to be added to headless service | `{}` | +| `service.labels` | Labels to be added to non-headless service | `{}` | +| `service.loadBalancerIP` | Some cloud providers allow you to specify the [loadBalancer][] IP. If the `loadBalancerIP` field is not specified, the IP is dynamically assigned. If you specify a `loadBalancerIP` but your cloud provider does not support the feature, it is ignored. | `""` | +| `service.loadBalancerSourceRanges` | The IP ranges that are allowed to access | `[]` | +| `service.nodePort` | Custom [nodePort][] port that can be set if you are using `service.type: nodePort` | `""` | +| `service.transportPortName` | The name of the transport port within the service | `transport` | +| `service.publishNotReadyAddresses` | Consider that all endpoints are considered "ready" even if the Pods themselves are not | `false` | +| `service.type` | Elasticsearch [Service Types][] | `ClusterIP` | +| `sysctlInitContainer` | Allows you to disable the `sysctlInitContainer` if you are setting [sysctl vm.max_map_count][] with another method | `enabled: true` | +| `sysctlVmMaxMapCount` | Sets the [sysctl vm.max_map_count][] needed for Elasticsearch | `262144` | +| `terminationGracePeriod` | The [terminationGracePeriod][] in seconds used when trying to stop the pod | `120` | +| `tests.enabled` | Enable creating test related resources when running `helm template` or `helm test` | `true` | +| `tolerations` | Configurable [tolerations][] | `[]` | +| `transportPort` | The transport port that Kubernetes will use for the service. If you change this you will also need to set [transport port configuration][] in `extraEnvs` | `9300` | +| `updateStrategy` | The [updateStrategy][] for the StatefulSet. By default Kubernetes will wait for the cluster to be green after upgrading each pod. Setting this to `OnDelete` will allow you to manually delete each pod during upgrades | `RollingUpdate` | +| `volumeClaimTemplate` | Configuration for the [volumeClaimTemplate for StatefulSets][]. You will want to adjust the storage (default `30Gi` ) and the `storageClassName` if you are using a different storage class | see [values.yaml][] | + +### Deprecated + +| Parameter | Description | Default | +|-----------|---------------------------------------------------------------------------------------------------------------|---------| +| `fsGroup` | The Group ID (GID) for [securityContext][] so that the Elasticsearch user can read from the persistent volume | `""` | + + +## FAQ + +### How to deploy this chart on a specific K8S distribution? + +This chart is designed to run on production scale Kubernetes clusters with +multiple nodes, lots of memory and persistent storage. For that reason it can be +a bit tricky to run them against local Kubernetes environments such as +[Minikube][]. + +This chart is highly tested with [GKE][], but some K8S distribution also +requires specific configurations. + +We provide examples of configuration for the following K8S providers: + +- [Docker for Mac][] +- [KIND][] +- [Minikube][] +- [MicroK8S][] +- [OpenShift][] + +### How to deploy dedicated nodes types? + +All the Elasticsearch pods deployed share the same configuration. If you need to +deploy dedicated [nodes types][] (for example dedicated master and data nodes), +you can deploy multiple releases of this chart with different configurations +while they share the same `clusterName` value. + +For each Helm release, the nodes types can then be defined using `roles` value. + +An example of Elasticsearch cluster using 2 different Helm releases for master +and data nodes can be found in [examples/multi][]. + +#### Clustering and Node Discovery + +This chart facilitates Elasticsearch node discovery and services by creating two +`Service` definitions in Kubernetes, one with the name `$clusterName-$nodeGroup` +and another named `$clusterName-$nodeGroup-headless`. +Only `Ready` pods are a part of the `$clusterName-$nodeGroup` service, while all +pods ( `Ready` or not) are a part of `$clusterName-$nodeGroup-headless`. + +If your group of master nodes has the default `nodeGroup: master` then you can +just add new groups of nodes with a different `nodeGroup` and they will +automatically discover the correct master. If your master nodes have a different +`nodeGroup` name then you will need to set `masterService` to +`$clusterName-$masterNodeGroup`. + +The chart value for `masterService` is used to populate +`discovery.zen.ping.unicast.hosts` , which Elasticsearch nodes will use to +contact master nodes and form a cluster. +Therefore, to add a group of nodes to an existing cluster, setting +`masterService` to the desired `Service` name of the related cluster is +sufficient. + +### How to deploy clusters with security (authentication and TLS) enabled? + +This Helm chart can use existing [Kubernetes secrets][] to setup +credentials or certificates for examples. These secrets should be created +outside of this chart and accessed using [environment variables][] and volumes. + +An example of Elasticsearch cluster using security can be found in +[examples/security][]. + +### How to migrate from helm/charts stable chart? + +If you currently have a cluster deployed with the [helm/charts stable][] chart +you can follow the [migration guide][]. + +### How to install plugins? + +The recommended way to install plugins into our Docker images is to create a +[custom Docker image][]. + +The Dockerfile would look something like: + +``` +ARG elasticsearch_version +FROM docker.elastic.co/elasticsearch/elasticsearch:${elasticsearch_version} + +RUN bin/elasticsearch-plugin install --batch repository-gcs +``` + +And then updating the `image` in values to point to your custom image. + +There are a couple reasons we recommend this. + +1. Tying the availability of Elasticsearch to the download service to install +plugins is not a great idea or something that we recommend. Especially in +Kubernetes where it is normal and expected for a container to be moved to +another host at random times. +2. Mutating the state of a running Docker image (by installing plugins) goes +against best practices of containers and immutable infrastructure. + +### How to use the keystore? + +#### Basic example + +Create the secret, the key name needs to be the keystore key path. In this +example we will create a secret from a file and from a literal string. + +``` +kubectl create secret generic encryption-key --from-file=xpack.watcher.encryption_key=./watcher_encryption_key +kubectl create secret generic slack-hook --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +To add these secrets to the keystore: + +``` +keystore: + - secretName: encryption-key + - secretName: slack-hook +``` + +#### Multiple keys + +All keys in the secret will be added to the keystore. To create the previous +example in one secret you could also do: + +``` +kubectl create secret generic keystore-secrets --from-file=xpack.watcher.encryption_key=./watcher_encryption_key --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +``` +keystore: + - secretName: keystore-secrets +``` + +#### Custom paths and keys + +If you are using these secrets for other applications (besides the Elasticsearch +keystore) then it is also possible to specify the keystore path and which keys +you want to add. Everything specified under each `keystore` item will be passed +through to the `volumeMounts` section for mounting the [secret][]. In this +example we will only add the `slack_hook` key from a secret that also has other +keys. Our secret looks like this: + +``` +kubectl create secret generic slack-secrets --from-literal=slack_channel='#general' --from-literal=slack_hook='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' +``` + +We only want to add the `slack_hook` key to the keystore at path +`xpack.notification.slack.account.monitoring.secure_url`: + +``` +keystore: + - secretName: slack-secrets + items: + - key: slack_hook + path: xpack.notification.slack.account.monitoring.secure_url +``` + +You can also take a look at the [config example][] which is used as part of the +automated testing pipeline. + +### How to enable snapshotting? + +1. Install your [snapshot plugin][] into a custom Docker image following the +[how to install plugins guide][]. +2. Add any required secrets or credentials into an Elasticsearch keystore +following the [how to use the keystore][] guide. +3. Configure the [snapshot repository][] as you normally would. +4. To automate snapshots you can use [Snapshot Lifecycle Management][] or a tool +like [curator][]. + +### How to configure templates post-deployment? + +You can use `postStart` [lifecycle hooks][] to run code triggered after a +container is created. + +Here is an example of `postStart` hook to configure templates: + +```yaml +lifecycle: + postStart: + exec: + command: + - bash + - -c + - | + #!/bin/bash + # Add a template to adjust number of shards/replicas + TEMPLATE_NAME=my_template + INDEX_PATTERN="logstash-*" + SHARD_COUNT=8 + REPLICA_COUNT=1 + ES_URL=http://localhost:9200 + while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done + curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' +``` + + +## Contributing + +Please check [CONTRIBUTING.md][] before any contribution or for any questions +about our development and testing process. + +[7.17]: https://github.com/elastic/helm-charts/releases +[#63]: https://github.com/elastic/helm-charts/issues/63 +[BREAKING_CHANGES.md]: https://github.com/elastic/helm-charts/blob/master/BREAKING_CHANGES.md +[CHANGELOG.md]: https://github.com/elastic/helm-charts/blob/master/CHANGELOG.md +[CONTRIBUTING.md]: https://github.com/elastic/helm-charts/blob/master/CONTRIBUTING.md +[alternate scheduler]: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/#specify-schedulers-for-pods +[annotations]: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +[anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +[cluster.name]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/important-settings.html#cluster-name +[clustering and node discovery]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/README.md#clustering-and-node-discovery +[config example]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/config/values.yaml +[curator]: https://www.elastic.co/guide/en/elasticsearch/client/curator/7.9/snapshot.html +[custom docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/docker.html#_c_customized_image +[deploys statefulsets serially]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies +[discovery.zen.minimum_master_nodes]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/discovery-settings.html#minimum_master_nodes +[docker for mac]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/docker-for-mac +[elasticsearch cluster health status params]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/cluster-health.html#request-params +[elasticsearch docker image]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/docker.html +[environment variables]: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/#using-environment-variables-inside-of-your-config +[environment from variables]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables +[examples]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/ +[examples/multi]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/multi +[examples/security]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/security +[gke]: https://cloud.google.com/kubernetes-engine +[helm]: https://helm.sh +[helm/charts stable]: https://github.com/helm/charts/tree/master/stable/elasticsearch/ +[how to install plugins guide]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/README.md#how-to-install-plugins +[how to use the keystore]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/README.md#how-to-use-the-keystore +[http.port]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/modules-http.html#_settings +[imagePullPolicy]: https://kubernetes.io/docs/concepts/containers/images/#updating-images +[imagePullSecrets]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-pod-that-uses-your-secret +[ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ +[java options]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/jvm-options.html +[jvm heap size]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/heap-size.html +[hostAliases]: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +[kind]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/kubernetes-kind +[kubernetes secrets]: https://kubernetes.io/docs/concepts/configuration/secret/ +[labels]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +[lifecycle hooks]: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ +[loadBalancer annotations]: https://kubernetes.io/docs/concepts/services-networking/service/#ssl-support-on-aws +[loadBalancer externalTrafficPolicy]: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip +[loadBalancer]: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer +[maxUnavailable]: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +[migration guide]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/migration/README.md +[minikube]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/minikube +[microk8s]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/microk8s +[multi]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/multi/ +[network.host elasticsearch setting]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/network.host.html +[node affinity settings]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +[node-certificates]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/configuring-tls.html#node-certificates +[nodePort]: https://kubernetes.io/docs/concepts/services-networking/service/#nodeport +[nodes types]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/modules-node.html +[nodeSelector]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector +[openshift]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/openshift +[priorityClass]: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +[probe]: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ +[resources]: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +[roles]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/modules-node.html +[secret]: https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets +[securityContext]: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +[service types]: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types +[snapshot lifecycle management]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/snapshot-lifecycle-management.html +[snapshot plugin]: https://www.elastic.co/guide/en/elasticsearch/plugins/7.17/repository.html +[snapshot repository]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/modules-snapshots.html +[supported configurations]: https://github.com/elastic/helm-charts/tree/7.17/README.md#supported-configurations +[sysctl vm.max_map_count]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/vm-max-map-count.html#vm-max-map-count +[terminationGracePeriod]: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +[tolerations]: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +[transport port configuration]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/modules-transport.html#_transport_settings +[updateStrategy]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +[values.yaml]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/values.yaml +[volumeClaimTemplate for statefulsets]: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-storage diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/Makefile new file mode 100644 index 0000000..9ae9c37 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/Makefile @@ -0,0 +1,21 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-config +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +secrets: + kubectl delete secret elastic-config-credentials elastic-config-secret elastic-config-slack elastic-config-custom-path || true + kubectl create secret generic elastic-config-credentials --from-literal=password=changeme --from-literal=username=elastic + kubectl create secret generic elastic-config-slack --from-literal=xpack.notification.slack.account.monitoring.secure_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' + kubectl create secret generic elastic-config-secret --from-file=xpack.watcher.encryption_key=./watcher_encryption_key + kubectl create secret generic elastic-config-custom-path --from-literal=slack_url='https://hooks.slack.com/services/asdasdasd/asdasdas/asdasd' --from-literal=thing_i_don_tcare_about=test + +test: secrets install goss + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/README.md new file mode 100644 index 0000000..410d754 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/README.md @@ -0,0 +1,27 @@ +# Config + +This example deploy a single node Elasticsearch 7.17.3 with authentication and +custom [values][]. + + +## Usage + +* Create the required secrets: `make secrets` + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/config-master 9200 + curl -u elastic:changeme http://localhost:9200/_cat/indices + ``` + + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/config/test/goss.yaml +[values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/config/values.yaml diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/test/goss.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/test/goss.yaml new file mode 100644 index 0000000..752db8d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/test/goss.yaml @@ -0,0 +1,29 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":1' + - '"number_of_data_nodes":1' + + http://localhost:9200: + status: 200 + timeout: 2000 + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"cluster_name" : "config"' + - "You Know, for Search" + +command: + "elasticsearch-keystore list": + exit-status: 0 + stdout: + - keystore.seed + - bootstrap.password + - xpack.notification.slack.account.monitoring.secure_url + - xpack.notification.slack.account.otheraccount.secure_url + - xpack.watcher.encryption_key diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/values.yaml new file mode 100644 index 0000000..13cff2c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/values.yaml @@ -0,0 +1,27 @@ +--- + +clusterName: "config" +replicas: 1 + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elastic-config-credentials + key: password + +# This is just a dummy file to make sure that +# the keystore can be mounted at the same time +# as a custom elasticsearch.yml +esConfig: + elasticsearch.yml: | + xpack.security.enabled: true + path.data: /usr/share/elasticsearch/data + +keystore: + - secretName: elastic-config-secret + - secretName: elastic-config-slack + - secretName: elastic-config-custom-path + items: + - key: slack_url + path: xpack.notification.slack.account.otheraccount.secure_url diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/watcher_encryption_key b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/watcher_encryption_key new file mode 100644 index 0000000..b5f9078 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/config/watcher_encryption_key @@ -0,0 +1 @@ +supersecret diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/Makefile new file mode 100644 index 0000000..389bf99 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/Makefile @@ -0,0 +1,14 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-default +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/README.md new file mode 100644 index 0000000..c50ea2a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/README.md @@ -0,0 +1,25 @@ +# Default + +This example deploy a 3 nodes Elasticsearch 7.17.3 cluster using +[default values][]. + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/default/test/goss.yaml +[default values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/values.yaml diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/rolling_upgrade.sh b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/rolling_upgrade.sh new file mode 100644 index 0000000..c5a2a88 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/rolling_upgrade.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash -x + +kubectl proxy || true & + +make & +PROC_ID=$! + +while kill -0 "$PROC_ID" >/dev/null 2>&1; do + echo "PROCESS IS RUNNING" + if curl --fail 'http://localhost:8001/api/v1/proxy/namespaces/default/services/elasticsearch-master:9200/_search' ; then + echo "cluster is healthy" + else + echo "cluster not healthy!" + exit 1 + fi + sleep 1 +done +echo "PROCESS TERMINATED" +exit 0 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/test/goss.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/test/goss.yaml new file mode 100644 index 0000000..db9b68b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/default/test/goss.yaml @@ -0,0 +1,38 @@ +kernel-param: + vm.max_map_count: + value: "262144" + +http: + http://elasticsearch-master:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "7.17.3"' + - '"cluster_name" : "elasticsearch"' + - "You Know, for Search" + +file: + /usr/share/elasticsearch/data: + exists: true + mode: "2775" + owner: root + group: elasticsearch + filetype: directory + +mount: + /usr/share/elasticsearch/data: + exists: true + +user: + elasticsearch: + exists: true + uid: 1000 + gid: 1000 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/Makefile new file mode 100644 index 0000000..18fd053 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-docker-for-mac +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/README.md new file mode 100644 index 0000000..1b5f306 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/README.md @@ -0,0 +1,23 @@ +# Docker for Mac + +This example deploy a 3 nodes Elasticsearch 7.17.3 cluster on [Docker for Mac][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/docker-for-mac/values.yaml +[docker for mac]: https://docs.docker.com/docker-for-mac/kubernetes/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/values.yaml new file mode 100644 index 0000000..f7deba6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/docker-for-mac/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "hostpath" + resources: + requests: + storage: 100M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/Makefile new file mode 100644 index 0000000..9e5602d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/Makefile @@ -0,0 +1,17 @@ +default: test + +RELEASE := helm-es-kind +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +install-local-path: + kubectl apply -f https://raw.githubusercontent.com/rancher/local-path-provisioner/master/deploy/local-path-storage.yaml + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values-local-path.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/README.md new file mode 100644 index 0000000..a026c6f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/README.md @@ -0,0 +1,36 @@ +# KIND + +This example deploy a 3 nodes Elasticsearch 7.17.3 cluster on [Kind][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + +Note that Kind < 0.7.0 are affected by a [kind issue][] with mount points +created from PVCs not writable by non-root users. [kubernetes-sigs/kind#1157][] +fix it in Kind 0.7.0. + +The workaround for Kind < 0.7.0 is to install manually +[Rancher Local Path Provisioner][] and use `local-path` storage class for +Elasticsearch volumes (see [Makefile][] instructions). + + +## Usage + +* For Kind >= 0.7.0: Deploy Elasticsearch chart with the default values: `make install` +* For Kind < 0.7.0: Deploy Elasticsearch chart with `local-path` storage class: `make install-local-path` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/blob/7.17/elasticsearch/examples/kubernetes-kind/values.yaml +[kind]: https://kind.sigs.k8s.io/ +[kind issue]: https://github.com/kubernetes-sigs/kind/issues/830 +[kubernetes-sigs/kind#1157]: https://github.com/kubernetes-sigs/kind/pull/1157 +[rancher local path provisioner]: https://github.com/rancher/local-path-provisioner +[Makefile]: https://github.com/elastic/helm-charts/blob/7.17/elasticsearch/examples/kubernetes-kind/Makefile#L5 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/values.yaml new file mode 100644 index 0000000..500ad4b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/kubernetes-kind/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "local-path" + resources: + requests: + storage: 100M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/Makefile new file mode 100644 index 0000000..2d0012d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-microk8s +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/README.md new file mode 100644 index 0000000..bd611bd --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/README.md @@ -0,0 +1,32 @@ +# MicroK8S + +This example deploy a 3 nodes Elasticsearch 7.17.3 cluster on [MicroK8S][] +using [custom values][]. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Requirements + +The following MicroK8S [addons][] need to be enabled: +- `dns` +- `helm` +- `storage` + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[addons]: https://microk8s.io/docs/addons +[custom values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/microk8s/values.yaml +[MicroK8S]: https://microk8s.io diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/values.yaml new file mode 100644 index 0000000..2627ecb --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/microk8s/values.yaml @@ -0,0 +1,32 @@ +--- +# Disable privileged init Container creation. +sysctlInitContainer: + enabled: false + +# Restrict the use of the memory-mapping when sysctlInitContainer is disabled. +esConfig: + elasticsearch.yml: | + node.store.allow_mmap: false + +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "microk8s-hostpath" + resources: + requests: + storage: 100M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/Makefile new file mode 100644 index 0000000..020906f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/Makefile @@ -0,0 +1,10 @@ +PREFIX := helm-es-migration + +data: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ + +master: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ + +client: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/README.md new file mode 100644 index 0000000..fe6333d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/README.md @@ -0,0 +1,167 @@ +# Migration Guide from helm/charts + +There are two viable options for migrating from the community Elasticsearch Helm +chart from the [helm/charts][] repo. + +1. Restoring from Snapshot to a fresh cluster +2. Live migration by joining a new cluster to the existing cluster. + +## Restoring from Snapshot + +This is the recommended and preferred option. The downside is that it will +involve a period of write downtime during the migration. If you have a way to +temporarily stop writes to your cluster then this is the way to go. This is also +a lot simpler as it just involves launching a fresh cluster and restoring a +snapshot following the [restoring to a different cluster guide][]. + +## Live migration + +If restoring from a snapshot is not possible due to the write downtime then a +live migration is also possible. It is very important to first test this in a +testing environment to make sure you are comfortable with the process and fully +understand what is happening. + +This process will involve joining a new set of master, data and client nodes to +an existing cluster that has been deployed using the [helm/charts][] community +chart. Nodes will then be replaced one by one in a controlled fashion to +decommission the old cluster. + +This example will be using the default values for the existing helm/charts +release and for the Elastic helm-charts release. If you have changed any of the +default values then you will need to first make sure that your values are +configured in a compatible way before starting the migration. + +The process will involve a re-sync and a rolling restart of all of your data +nodes. Therefore it is important to disable shard allocation and perform a synced +flush like you normally would during any other rolling upgrade. See the +[rolling upgrades guide][] for more information. + +* The default image for this chart is +`docker.elastic.co/elasticsearch/elasticsearch` which contains the default +distribution of Elasticsearch with a [basic license][]. Make sure to update the +`image` and `imageTag` values to the correct Docker image and Elasticsearch +version that you currently have deployed. + +* Convert your current helm/charts configuration into something that is +compatible with this chart. + +* Take a fresh snapshot of your cluster. If something goes wrong you want to be +able to restore your data no matter what. + +* Check that your clusters health is green. If not abort and make sure your +cluster is healthy before continuing: + + ``` + curl localhost:9200/_cluster/health + ``` + +* Deploy new data nodes which will join the existing cluster. Take a look at the +configuration in [data.yaml][]: + + ``` + make data + ``` + +* Check that the new nodes have joined the cluster (run this and any other curl +commands from within one of your pods): + + ``` + curl localhost:9200/_cat/nodes + ``` + +* Check that your cluster is still green. If so we can now start to scale down +the existing data nodes. Assuming you have the default amount of data nodes (2) +we now want to scale it down to 1: + + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=1 + ``` + +* Wait for your cluster to become green again: + + ``` + watch 'curl -s localhost:9200/_cluster/health' + ``` + +* Once the cluster is green we can scale down again: + + ``` + kubectl scale statefulsets my-release-elasticsearch-data --replicas=0 + ``` + +* Wait for the cluster to be green again. +* OK. We now have all data nodes running in the new cluster. Time to replace the +masters by firstly scaling down the masters from 3 to 2. Between each step make +sure to wait for the cluster to become green again, and check with +`curl localhost:9200/_cat/nodes` that you see the correct amount of master +nodes. During this process we will always make sure to keep at least 2 master +nodes as to not lose quorum: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=2 + ``` + +* Now deploy a single new master so that we have 3 masters again. See +[master.yaml][] for the configuration: + + ``` + make master + ``` + +* Scale down old masters to 1: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=1 + ``` + +* Edit the masters in [masters.yaml][] to 2 and redeploy: + + ``` + make master + ``` + +* Scale down the old masters to 0: + + ``` + kubectl scale statefulsets my-release-elasticsearch-master --replicas=0 + ``` + +* Edit the [masters.yaml][] to have 3 replicas and remove the +`discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then redeploy the +masters. This will make sure all 3 masters are running in the new cluster and +are pointing at each other for discovery: + + ``` + make master + ``` + +* Remove the `discovery.zen.ping.unicast.hosts` entry from `extraEnvs` then +redeploy the data nodes to make sure they are pointing at the new masters: + + ``` + make data + ``` + +* Deploy the client nodes: + + ``` + make client + ``` + +* Update any processes that are talking to the existing client nodes and point +them to the new client nodes. Once this is done you can scale down the old +client nodes: + + ``` + kubectl scale deployment my-release-elasticsearch-client --replicas=0 + ``` + +* The migration should now be complete. After verifying that everything is +working correctly you can cleanup leftover resources from your old cluster. + +[basic license]: https://www.elastic.co/subscriptions +[data.yaml]: https://github.com/elastic/helm-charts/blob/7.17/elasticsearch/examples/migration/data.yaml +[helm/charts]: https://github.com/helm/charts/tree/7.17/stable/elasticsearch +[master.yaml]: https://github.com/elastic/helm-charts/blob/7.17/elasticsearch/examples/migration/master.yaml +[restoring to a different cluster guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/modules-snapshots.html#_restoring_to_a_different_cluster +[rolling upgrades guide]: https://www.elastic.co/guide/en/elasticsearch/reference/6.8/rolling-upgrades.html diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/client.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/client.yaml new file mode 100644 index 0000000..30ee700 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/client.yaml @@ -0,0 +1,23 @@ +--- + +replicas: 2 + +clusterName: "elasticsearch" +nodeGroup: "client" + +esMajorVersion: 6 + +roles: + master: "false" + ingest: "false" + data: "false" + +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 1Gi # Currently needed till pvcs are made optional + +persistence: + enabled: false diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/data.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/data.yaml new file mode 100644 index 0000000..eedcbb0 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/data.yaml @@ -0,0 +1,17 @@ +--- + +replicas: 2 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "data" + +roles: + master: "false" + ingest: "false" + data: "true" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/master.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/master.yaml new file mode 100644 index 0000000..3e3a2f1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/migration/master.yaml @@ -0,0 +1,26 @@ +--- + +# Temporarily set to 3 so we can scale up/down the old a new cluster +# one at a time whilst always keeping 3 masters running +replicas: 1 + +esMajorVersion: 6 + +extraEnvs: + - name: discovery.zen.ping.unicast.hosts + value: "my-release-elasticsearch-discovery" + +clusterName: "elasticsearch" +nodeGroup: "master" + +roles: + master: "true" + ingest: "false" + data: "false" + +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 4Gi diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/Makefile new file mode 100644 index 0000000..1021d98 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/Makefile @@ -0,0 +1,13 @@ +default: test + +RELEASE := helm-es-minikube +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install + helm test $(RELEASE) + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/README.md new file mode 100644 index 0000000..c13052a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/README.md @@ -0,0 +1,38 @@ +# Minikube + +This example deploy a 3 nodes Elasticsearch 7.17.3 cluster on [Minikube][] +using [custom values][]. + +If helm or kubectl timeouts occur, you may consider creating a minikube VM with +more CPU cores or memory allocated. + +Note that this configuration should be used for test only and isn't recommended +for production. + + +## Requirements + +In order to properly support the required persistent volume claims for the +Elasticsearch StatefulSet, the `default-storageclass` and `storage-provisioner` +minikube addons must be enabled. + +``` +minikube addons enable default-storageclass +minikube addons enable storage-provisioner +``` + + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + + +[custom values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/minikube/values.yaml +[minikube]: https://minikube.sigs.k8s.io/docs/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/values.yaml new file mode 100644 index 0000000..ccceb3a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/minikube/values.yaml @@ -0,0 +1,23 @@ +--- +# Permit co-located instances for solitary minikube virtual machines. +antiAffinity: "soft" + +# Shrink default JVM heap. +esJavaOpts: "-Xmx128m -Xms128m" + +# Allocate smaller chunks of memory per pod. +resources: + requests: + cpu: "100m" + memory: "512M" + limits: + cpu: "1000m" + memory: "512M" + +# Request smaller persistent volumes. +volumeClaimTemplate: + accessModes: [ "ReadWriteOnce" ] + storageClassName: "standard" + resources: + requests: + storage: 100M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/Makefile new file mode 100644 index 0000000..243e504 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/Makefile @@ -0,0 +1,19 @@ +default: test + +include ../../../helpers/examples.mk + +PREFIX := helm-es-multi +RELEASE := helm-es-multi-master +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values master.yaml $(PREFIX)-master ../../ + helm upgrade --wait --timeout=$(TIMEOUT) --install --values data.yaml $(PREFIX)-data ../../ + helm upgrade --wait --timeout=$(TIMEOUT) --install --values client.yaml $(PREFIX)-client ../../ + +test: install goss + +purge: + helm del $(PREFIX)-master + helm del $(PREFIX)-data + helm del $(PREFIX)-client diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/README.md new file mode 100644 index 0000000..db3380a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/README.md @@ -0,0 +1,29 @@ +# Multi + +This example deploy an Elasticsearch 7.17.3 cluster composed of 3 different Helm +releases: + +- `helm-es-multi-master` for the 3 master nodes using [master values][] +- `helm-es-multi-data` for the 3 data nodes using [data values][] +- `helm-es-multi-client` for the 3 client nodes using [client values][] + +## Usage + +* Deploy the 3 Elasticsearch releases: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/multi-master 9200 + curl -u elastic:changeme http://localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[client values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/multi/client.yaml +[data values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/multi/data.yaml +[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/multi/test/goss.yaml +[master values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/multi/master.yaml diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/client.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/client.yaml new file mode 100644 index 0000000..dbe5b05 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/client.yaml @@ -0,0 +1,14 @@ +--- + +clusterName: "multi" +nodeGroup: "client" + +roles: + master: "false" + ingest: "false" + data: "false" + ml: "false" + remote_cluster_client: "false" + +persistence: + enabled: false diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/data.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/data.yaml new file mode 100644 index 0000000..2e3a909 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/data.yaml @@ -0,0 +1,11 @@ +--- + +clusterName: "multi" +nodeGroup: "data" + +roles: + master: "false" + ingest: "true" + data: "true" + ml: "false" + remote_cluster_client: "false" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/master.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/master.yaml new file mode 100644 index 0000000..6b8c082 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/master.yaml @@ -0,0 +1,11 @@ +--- + +clusterName: "multi" +nodeGroup: "master" + +roles: + master: "true" + ingest: "false" + data: "false" + ml: "false" + remote_cluster_client: "false" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/test/goss.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/test/goss.yaml new file mode 100644 index 0000000..794416b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/multi/test/goss.yaml @@ -0,0 +1,9 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - 'green' + - '"cluster_name":"multi"' + - '"number_of_nodes":9' + - '"number_of_data_nodes":3' diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/networkpolicy/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/networkpolicy/Makefile new file mode 100644 index 0000000..e7b20c5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/networkpolicy/Makefile @@ -0,0 +1,14 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-networkpolicy +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/networkpolicy/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/networkpolicy/values.yaml new file mode 100644 index 0000000..1963d20 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/networkpolicy/values.yaml @@ -0,0 +1,37 @@ +networkPolicy: + http: + enabled: true + explicitNamespacesSelector: + # Accept from namespaces with all those different rules (from whitelisted Pods) + matchLabels: + role: frontend-http + matchExpressions: + - {key: role, operator: In, values: [frontend-http]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-http + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-http + transport: + enabled: true + allowExternal: true + explicitNamespacesSelector: + matchLabels: + role: frontend-transport + matchExpressions: + - {key: role, operator: In, values: [frontend-transport]} + additionalRules: + - podSelector: + matchLabels: + role: frontend-transport + - podSelector: + matchExpressions: + - key: role + operator: In + values: + - frontend-transport diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/Makefile new file mode 100644 index 0000000..078c33c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/Makefile @@ -0,0 +1,13 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := elasticsearch + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/README.md new file mode 100644 index 0000000..f410ef9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/README.md @@ -0,0 +1,24 @@ +# OpenShift + +This example deploy a 3 nodes Elasticsearch 7.17.3 cluster on [OpenShift][] +using [custom values][]. + +## Usage + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/elasticsearch-master 9200 + curl localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[custom values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/openshift/values.yaml +[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/openshift/test/goss.yaml +[openshift]: https://www.openshift.com/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/test/goss.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/test/goss.yaml new file mode 100644 index 0000000..f897164 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/test/goss.yaml @@ -0,0 +1,16 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "7.17.3"' + - '"cluster_name" : "elasticsearch"' + - "You Know, for Search" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/values.yaml new file mode 100644 index 0000000..8a21126 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/openshift/values.yaml @@ -0,0 +1,11 @@ +--- + +securityContext: + runAsUser: null + +podSecurityContext: + fsGroup: null + runAsUser: null + +sysctlInitContainer: + enabled: false diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/Makefile new file mode 100644 index 0000000..beddbef --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/Makefile @@ -0,0 +1,38 @@ +default: test + +include ../../../helpers/examples.mk + +RELEASE := helm-es-security +ELASTICSEARCH_IMAGE := docker.elastic.co/elasticsearch/elasticsearch:$(STACK_VERSION) +TIMEOUT := 1200s + +install: + helm upgrade --wait --timeout=$(TIMEOUT) --install --values values.yaml $(RELEASE) ../../ + +test: secrets install goss + +purge: + kubectl delete secrets elastic-credentials elastic-certificates elastic-certificate-pem elastic-certificate-crt|| true + helm del $(RELEASE) + +pull-elasticsearch-image: + docker pull $(ELASTICSEARCH_IMAGE) + +secrets: + docker rm -f elastic-helm-charts-certs || true + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 || true + password=$$([ ! -z "$$ELASTIC_PASSWORD" ] && echo $$ELASTIC_PASSWORD || echo $$(docker run --rm busybox:1.31.1 /bin/sh -c "< /dev/urandom tr -cd '[:alnum:]' | head -c20")) && \ + docker run --name elastic-helm-charts-certs -i -w /app \ + $(ELASTICSEARCH_IMAGE) \ + /bin/sh -c " \ + elasticsearch-certutil ca --out /app/elastic-stack-ca.p12 --pass '' && \ + elasticsearch-certutil cert --name security-master --dns security-master --ca /app/elastic-stack-ca.p12 --pass '' --ca-pass '' --out /app/elastic-certificates.p12" && \ + docker cp elastic-helm-charts-certs:/app/elastic-certificates.p12 ./ && \ + docker rm -f elastic-helm-charts-certs && \ + openssl pkcs12 -nodes -passin pass:'' -in elastic-certificates.p12 -out elastic-certificate.pem && \ + openssl x509 -outform der -in elastic-certificate.pem -out elastic-certificate.crt && \ + kubectl create secret generic elastic-certificates --from-file=elastic-certificates.p12 && \ + kubectl create secret generic elastic-certificate-pem --from-file=elastic-certificate.pem && \ + kubectl create secret generic elastic-certificate-crt --from-file=elastic-certificate.crt && \ + kubectl create secret generic elastic-credentials --from-literal=password=$$password --from-literal=username=elastic && \ + rm -f elastic-certificates.p12 elastic-certificate.pem elastic-certificate.crt elastic-stack-ca.p12 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/README.md new file mode 100644 index 0000000..e0b5eff --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/README.md @@ -0,0 +1,29 @@ +# Security + +This example deploy a 3 nodes Elasticsearch 7.17.3 with authentication and +autogenerated certificates for TLS (see [values][]). + +Note that this configuration should be used for test only. For a production +deployment you should generate SSL certificates following the [official docs][]. + +## Usage + +* Create the required secrets: `make secrets` + +* Deploy Elasticsearch chart with the default values: `make install` + +* You can now setup a port forward to query Elasticsearch API: + + ``` + kubectl port-forward svc/security-master 9200 + curl -u elastic:changeme https://localhost:9200/_cat/indices + ``` + +## Testing + +You can also run [goss integration tests][] using `make test` + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/security/test/goss.yaml +[official docs]: https://www.elastic.co/guide/en/elasticsearch/reference/7.17/configuring-tls.html#node-certificates +[values]: https://github.com/elastic/helm-charts/tree/7.17/elasticsearch/examples/security/values.yaml diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/test/goss.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/test/goss.yaml new file mode 100644 index 0000000..c52e05f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/test/goss.yaml @@ -0,0 +1,44 @@ +http: + https://security-master:9200/_cluster/health: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + https://localhost:9200/: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - '"cluster_name" : "security"' + - "You Know, for Search" + + https://localhost:9200/_xpack/license: + status: 200 + timeout: 2000 + allow-insecure: true + username: elastic + password: "{{ .Env.ELASTIC_PASSWORD }}" + body: + - "active" + - "basic" + +file: + /usr/share/elasticsearch/config/elasticsearch.yml: + exists: true + contains: + - "xpack.security.enabled: true" + - "xpack.security.transport.ssl.enabled: true" + - "xpack.security.transport.ssl.verification_mode: certificate" + - "xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.http.ssl.enabled: true" + - "xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" + - "xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/values.yaml new file mode 100644 index 0000000..ac26231 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/security/values.yaml @@ -0,0 +1,33 @@ +--- +clusterName: "security" +nodeGroup: "master" + +roles: + master: "true" + ingest: "true" + data: "true" + +protocol: https + +esConfig: + elasticsearch.yml: | + xpack.security.enabled: true + xpack.security.transport.ssl.enabled: true + xpack.security.transport.ssl.verification_mode: certificate + xpack.security.transport.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.transport.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.enabled: true + xpack.security.http.ssl.truststore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + xpack.security.http.ssl.keystore.path: /usr/share/elasticsearch/config/certs/elastic-certificates.p12 + +extraEnvs: + - name: ELASTIC_PASSWORD + valueFrom: + secretKeyRef: + name: elastic-credentials + key: password + +secretMounts: + - name: elastic-certificates + secretName: elastic-certificates + path: /usr/share/elasticsearch/config/certs diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/Makefile b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/Makefile new file mode 100644 index 0000000..9251d3b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/Makefile @@ -0,0 +1,16 @@ +default: test + +include ../../../helpers/examples.mk + +CHART := elasticsearch +RELEASE := helm-es-upgrade +FROM := 7.4.0 # versions before 7.4.O aren't compatible with Kubernetes >= 1.16.0 + +install: + ../../../helpers/upgrade.sh --chart $(CHART) --release $(RELEASE) --from $(FROM) + kubectl rollout status statefulset upgrade-master + +test: install goss + +purge: + helm del $(RELEASE) diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/README.md new file mode 100644 index 0000000..85977f5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/README.md @@ -0,0 +1,17 @@ +# Upgrade + +This example will deploy a 3 node Elasticsearch cluster chart using an old chart +version, then upgrade it. + + +## Usage + +* Deploy and upgrade Elasticsearch chart with the default values: `make install` + + +## Testing + +You can also run [goss integration tests][] using `make test`. + + +[goss integration tests]: https://github.com/elastic/helm-charts/tree/master/elasticsearch/examples/upgrade/test/goss.yaml diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/scripts/upgrade.sh b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/scripts/upgrade.sh new file mode 100644 index 0000000..59853e0 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/scripts/upgrade.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +set -euo pipefail + +usage() { + cat <<-EOF + USAGE: + $0 [--release ] [--from ] + $0 --help + + OPTIONS: + --release + Name of the Helm release to install + --from + Elasticsearch version to use for first install + EOF + exit 1 +} + +RELEASE="helm-es-upgrade" +FROM="" + +while [[ $# -gt 0 ]] +do + key="$1" + + case $key in + --help) + usage + ;; + --release) + RELEASE="$2" + shift 2 + ;; + --from) + FROM="$2" + shift 2 + ;; + *) + log "Unrecognized argument: '$key'" + usage + ;; + esac +done + +if ! command -v jq > /dev/null +then + echo 'jq is required to use this script' + echo 'please check https://stedolan.github.io/jq/download/ to install it' + exit 1 +fi + +# Elasticsearch chart < 7.4.0 are not compatible with K8S >= 1.16) +if [[ -z $FROM ]] +then + KUBE_MINOR_VERSION=$(kubectl version -o json | jq --raw-output --exit-status '.serverVersion.minor' | sed 's/[^0-9]*//g') + + if [ "$KUBE_MINOR_VERSION" -lt 16 ] + then + FROM="7.0.0-alpha1" + else + FROM="7.4.0" + fi +fi + +helm repo add elastic https://helm.elastic.co + +# Initial install +printf "Installing Elasticsearch chart %s\n" "$FROM" +helm upgrade --wait --timeout=600s --install "$RELEASE" elastic/elasticsearch --version "$FROM" --set clusterName=upgrade +kubectl rollout status sts/upgrade-master --timeout=600s + +# Upgrade +printf "Upgrading Elasticsearch chart\n" +helm upgrade --wait --timeout=600s --set terminationGracePeriod=121 --install "$RELEASE" ../../ --set clusterName=upgrade +kubectl rollout status sts/upgrade-master --timeout=600s diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/test/goss.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/test/goss.yaml new file mode 100644 index 0000000..714022d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/test/goss.yaml @@ -0,0 +1,16 @@ +http: + http://localhost:9200/_cluster/health: + status: 200 + timeout: 2000 + body: + - "green" + - '"number_of_nodes":3' + - '"number_of_data_nodes":3' + + http://localhost:9200: + status: 200 + timeout: 2000 + body: + - '"number" : "7.17.3"' + - '"cluster_name" : "upgrade"' + - "You Know, for Search" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/values.yaml new file mode 100644 index 0000000..de0283a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/examples/upgrade/values.yaml @@ -0,0 +1,2 @@ +--- +clusterName: upgrade diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/NOTES.txt b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/NOTES.txt new file mode 100644 index 0000000..88b5dd5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/NOTES.txt @@ -0,0 +1,6 @@ +1. Watch all cluster members come up. + $ kubectl get pods --namespace={{ .Release.Namespace }} -l app={{ template "elasticsearch.uname" . }} -w +{{- if .Values.tests.enabled -}} +2. Test cluster health using Helm test. + $ helm --namespace={{ .Release.Namespace }} test {{ .Release.Name }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/_helpers.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/_helpers.tpl new file mode 100644 index 0000000..b2ae034 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/_helpers.tpl @@ -0,0 +1,84 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "elasticsearch.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- define "elasticsearch.uname" -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-{{ .Values.nodeGroup }} +{{- else -}} +{{ .Values.nameOverride }}-{{ .Values.nodeGroup }} +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- end -}} + +{{/* +Generate certificates +*/}} +{{- define "elasticsearch.gen-certs" -}} +{{- $altNames := list ( printf "%s.%s" (include "elasticsearch.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "elasticsearch.name" .) .Release.Namespace ) -}} +{{- $ca := genCA "elasticsearch-ca" 365 -}} +{{- $cert := genSignedCert ( include "elasticsearch.name" . ) nil $altNames 365 $ca -}} +tls.crt: {{ $cert.Cert | toString | b64enc }} +tls.key: {{ $cert.Key | toString | b64enc }} +ca.crt: {{ $ca.Cert | toString | b64enc }} +{{- end -}} + +{{- define "elasticsearch.masterService" -}} +{{- if empty .Values.masterService -}} +{{- if empty .Values.fullnameOverride -}} +{{- if empty .Values.nameOverride -}} +{{ .Values.clusterName }}-master +{{- else -}} +{{ .Values.nameOverride }}-master +{{- end -}} +{{- else -}} +{{ .Values.fullnameOverride }} +{{- end -}} +{{- else -}} +{{ .Values.masterService }} +{{- end -}} +{{- end -}} + +{{- define "elasticsearch.endpoints" -}} +{{- $replicas := int (toString (.Values.replicas)) }} +{{- $uname := (include "elasticsearch.uname" .) }} + {{- range $i, $e := untilStep 0 $replicas 1 -}} +{{ $uname }}-{{ $i }}, + {{- end -}} +{{- end -}} + +{{- define "elasticsearch.esMajorVersion" -}} +{{- if .Values.esMajorVersion -}} +{{ .Values.esMajorVersion }} +{{- else -}} +{{- $version := int (index (.Values.imageTag | splitList ".") 0) -}} + {{- if and (contains "docker.elastic.co/elasticsearch/elasticsearch" .Values.image) (not (eq $version 0)) -}} +{{ $version }} + {{- else -}} +7 + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Use the fullname if the serviceAccount value is not set +*/}} +{{- define "elasticsearch.serviceAccount" -}} +{{- .Values.rbac.serviceAccountName | default (include "elasticsearch.uname" .) -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/configmap.yaml new file mode 100644 index 0000000..fd1ad30 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/configmap.yaml @@ -0,0 +1,34 @@ +{{- if .Values.esConfig }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.uname" . }}-config + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +data: +{{- range $path, $config := .Values.esConfig }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} +{{- if .Values.esJvmOptions }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "elasticsearch.uname" . }}-jvm-options + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +data: +{{- range $path, $config := .Values.esJvmOptions }} + {{ $path }}: | +{{ $config | indent 4 -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/ingress.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/ingress.yaml new file mode 100644 index 0000000..e60cebf --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/ingress.yaml @@ -0,0 +1,64 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "elasticsearch.uname" . -}} +{{- $httpPort := .Values.httpPort -}} +{{- $pathtype := .Values.ingress.pathtype -}} +{{- $ingressPath := .Values.ingress.path -}} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + app: {{ .Chart.Name }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +{{- with .Values.ingress.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + {{- if .Values.ingress.className }} + ingressClassName: {{ .Values.ingress.className | quote }} + {{- end }} +{{- if .Values.ingress.tls }} + tls: + {{- if .ingressPath }} + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} +{{- else }} +{{ toYaml .Values.ingress.tls | indent 4 }} + {{- end }} +{{- end}} + rules: + {{- range .Values.ingress.hosts }} + {{- if $ingressPath }} + - host: {{ . }} + http: + paths: + - path: {{ $ingressPath }} + pathType: {{ $pathtype }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ $httpPort }} + {{- else }} + - host: {{ .host }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ $pathtype }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ .servicePort | default $httpPort }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/networkpolicy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/networkpolicy.yaml new file mode 100644 index 0000000..62bb1bd --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/networkpolicy.yaml @@ -0,0 +1,61 @@ +{{- if (or .Values.networkPolicy.http.enabled .Values.networkPolicy.transport.enabled) }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +spec: + podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + ingress: # Allow inbound connections + +{{- if .Values.networkPolicy.http.enabled }} + # For HTTP access + - ports: + - port: {{ .Values.httpPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-http-client: "true" +{{- with .Values.networkPolicy.http.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.http.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} + +{{- if .Values.networkPolicy.transport.enabled }} + # For transport access + - ports: + - port: {{ .Values.transportPort }} + from: + # From authorized Pods (having the correct label) + - podSelector: + matchLabels: + {{ template "elasticsearch.uname" . }}-transport-client: "true" +{{- with .Values.networkPolicy.transport.explicitNamespacesSelector }} + # From authorized namespaces + namespaceSelector: +{{ toYaml . | indent 12 }} +{{- end }} +{{- with .Values.networkPolicy.transport.additionalRules }} + # Or from custom additional rules +{{ toYaml . | indent 8 }} +{{- end }} + # Or from other ElasticSearch Pods + - podSelector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} + +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/poddisruptionbudget.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..6d0bdf3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/poddisruptionbudget.yaml @@ -0,0 +1,15 @@ +{{- if .Values.maxUnavailable }} +{{- if .Capabilities.APIVersions.Has "policy/v1" -}} +apiVersion: policy/v1 +{{- else}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: "{{ template "elasticsearch.uname" . }}-pdb" +spec: + maxUnavailable: {{ .Values.maxUnavailable }} + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/podsecuritypolicy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..e22e75c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/podsecuritypolicy.yaml @@ -0,0 +1,18 @@ +{{- if .Values.podSecurityPolicy.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +{{- if .Capabilities.APIVersions.Has "policy/v1" -}} +apiVersion: policy/v1 +{{- else}} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodSecurityPolicy +metadata: + name: {{ default $fullName .Values.podSecurityPolicy.name | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +spec: +{{ toYaml .Values.podSecurityPolicy.spec | indent 2 }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/role.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/role.yaml new file mode 100644 index 0000000..d3a7ee3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/role.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +rules: + - apiGroups: + - extensions + resources: + - podsecuritypolicies + resourceNames: + {{- if eq .Values.podSecurityPolicy.name "" }} + - {{ $fullName | quote }} + {{- else }} + - {{ .Values.podSecurityPolicy.name | quote }} + {{- end }} + verbs: + - use +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/rolebinding.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/rolebinding.yaml new file mode 100644 index 0000000..e0ecced --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ $fullName | quote }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +subjects: + - kind: ServiceAccount + name: "{{ template "elasticsearch.serviceAccount" . }}" + namespace: {{ .Release.Namespace | quote }} +roleRef: + kind: Role + name: {{ $fullName | quote }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/service.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/service.yaml new file mode 100644 index 0000000..5fe52eb --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/service.yaml @@ -0,0 +1,78 @@ +{{- if .Values.service.enabled -}} +--- +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }} +{{- else }} + name: {{ template "elasticsearch.uname" . }} +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4}} +{{- end }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +spec: + type: {{ .Values.service.type }} + selector: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + protocol: TCP + port: {{ .Values.httpPort }} +{{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} +{{- end }} + - name: {{ .Values.service.transportPortName | default "transport" }} + protocol: TCP + port: {{ .Values.transportPort }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} +{{- end }} +{{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} +{{- end }} +{{- end }} +--- +kind: Service +apiVersion: v1 +metadata: +{{- if eq .Values.nodeGroup "master" }} + name: {{ template "elasticsearch.masterService" . }}-headless +{{- else }} + name: {{ template "elasticsearch.uname" . }}-headless +{{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" +{{- if .Values.service.labelsHeadless }} +{{ toYaml .Values.service.labelsHeadless | indent 4 }} +{{- end }} + annotations: + service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" +spec: + clusterIP: None # This is needed for statefulset hostnames like elasticsearch-0 to resolve + # Create endpoints also if the related pod isn't ready + publishNotReadyAddresses: true + selector: + app: "{{ template "elasticsearch.uname" . }}" + ports: + - name: {{ .Values.service.httpPortName | default "http" }} + port: {{ .Values.httpPort }} + - name: {{ .Values.service.transportPortName | default "transport" }} + port: {{ .Values.transportPort }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/serviceaccount.yaml new file mode 100644 index 0000000..a7ef847 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.rbac.create -}} +{{- $fullName := include "elasticsearch.uname" . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ template "elasticsearch.serviceAccount" . }}" + annotations: + {{- with .Values.rbac.serviceAccountAnnotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app: {{ $fullName | quote }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/statefulset.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/statefulset.yaml new file mode 100644 index 0000000..754581c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/statefulset.yaml @@ -0,0 +1,390 @@ +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "elasticsearch.uname" . }} + labels: + heritage: {{ .Release.Service | quote }} + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + esMajorVersion: "{{ include "elasticsearch.esMajorVersion" . }}" +spec: + serviceName: {{ template "elasticsearch.uname" . }}-headless + selector: + matchLabels: + app: "{{ template "elasticsearch.uname" . }}" + replicas: {{ .Values.replicas }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if .Values.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ template "elasticsearch.uname" . }} + {{- if .Values.persistence.labels.enabled }} + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 8 }} + {{- end }} + spec: +{{ toYaml .Values.volumeClaimTemplate | indent 6 }} + {{- end }} + template: + metadata: + name: "{{ template "elasticsearch.uname" . }}" + labels: + release: {{ .Release.Name | quote }} + chart: "{{ .Chart.Name }}" + app: "{{ template "elasticsearch.uname" . }}" + {{- range $key, $value := .Values.labels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + annotations: + {{- range $key, $value := .Values.podAnnotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{/* This forces a restart if the configmap has changed */}} + {{- if or .Values.esConfig .Values.esJvmOptions }} + configchecksum: {{ include (print .Template.BasePath "/configmap.yaml") . | sha256sum | trunc 63 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + securityContext: +{{ toYaml .Values.podSecurityContext | indent 8 }} + {{- if .Values.fsGroup }} + fsGroup: {{ .Values.fsGroup }} # Deprecated value, please use .Values.podSecurityContext.fsGroup + {{- end }} + {{- if or .Values.rbac.create .Values.rbac.serviceAccountName }} + serviceAccountName: "{{ template "elasticsearch.serviceAccount" . }}" + {{- end }} + automountServiceAccountToken: {{ .Values.rbac.automountToken }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if or (eq .Values.antiAffinity "hard") (eq .Values.antiAffinity "soft") .Values.nodeAffinity }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + affinity: + {{- end }} + {{- if eq .Values.antiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" .}}" + topologyKey: {{ .Values.antiAffinityTopologyKey }} + {{- else if eq .Values.antiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: {{ .Values.antiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - "{{ template "elasticsearch.uname" . }}" + {{- end }} + {{- with .Values.nodeAffinity }} + nodeAffinity: +{{ toYaml . | indent 10 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriod }} + volumes: + {{- range .Values.secretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- if .defaultMode }} + defaultMode: {{ .defaultMode }} + {{- end }} + {{- end }} + {{- if .Values.esConfig }} + - name: esconfig + configMap: + name: {{ template "elasticsearch.uname" . }}-config + {{- end }} + {{- if .Values.esJvmOptions }} + - name: esjvmoptions + configMap: + name: {{ template "elasticsearch.uname" . }}-jvm-options + {{- end }} +{{- if .Values.keystore }} + - name: keystore + emptyDir: {} + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + secret: {{ toYaml . | nindent 12 }} + {{- end }} +{{ end }} + {{- if .Values.extraVolumes }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumes) }} +{{ tpl .Values.extraVolumes . | indent 8 }} + {{- else }} +{{ toYaml .Values.extraVolumes | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + enableServiceLinks: {{ .Values.enableServiceLinks }} + {{- if .Values.hostAliases }} + hostAliases: {{ toYaml .Values.hostAliases | nindent 8 }} + {{- end }} + {{- if or (.Values.extraInitContainers) (.Values.sysctlInitContainer.enabled) (.Values.keystore) }} + initContainers: + {{- if .Values.sysctlInitContainer.enabled }} + - name: configure-sysctl + securityContext: + runAsUser: 0 + privileged: true + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: ["sysctl", "-w", "vm.max_map_count={{ .Values.sysctlVmMaxMapCount}}"] + resources: +{{ toYaml .Values.initResources | indent 10 }} + {{- end }} +{{ if .Values.keystore }} + - name: keystore + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - bash + - -c + - | + set -euo pipefail + + elasticsearch-keystore create + + for i in /tmp/keystoreSecrets/*/*; do + key=$(basename $i) + echo "Adding file $i to keystore key $key" + elasticsearch-keystore add-file "$key" "$i" + done + + # Add the bootstrap password since otherwise the Elasticsearch entrypoint tries to do this on startup + if [ ! -z ${ELASTIC_PASSWORD+x} ]; then + echo 'Adding env $ELASTIC_PASSWORD to keystore as key bootstrap.password' + echo "$ELASTIC_PASSWORD" | elasticsearch-keystore add -x bootstrap.password + fi + + cp -a /usr/share/elasticsearch/config/elasticsearch.keystore /tmp/keystore/ + env: {{ toYaml .Values.extraEnvs | nindent 10 }} + envFrom: {{ toYaml .Values.envFrom | nindent 10 }} + resources: {{ toYaml .Values.initResources | nindent 10 }} + volumeMounts: + - name: keystore + mountPath: /tmp/keystore + {{- range .Values.keystore }} + - name: keystore-{{ .secretName }} + mountPath: /tmp/keystoreSecrets/{{ .secretName }} + {{- end }} +{{ end }} + {{- if .Values.extraInitContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraInitContainers) }} +{{ tpl .Values.extraInitContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraInitContainers | indent 6 }} + {{- end }} + {{- end }} + {{- end }} + containers: + - name: "{{ template "elasticsearch.name" . }}" + securityContext: +{{ toYaml .Values.securityContext | indent 10 }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + readinessProbe: + exec: + command: + - bash + - -c + - | + set -e + # If the node is starting up wait for the cluster to be ready (request params: "{{ .Values.clusterHealthCheckParams }}" ) + # Once it has started only check that the node itself is responding + START_FILE=/tmp/.es_start_file + + # Disable nss cache to avoid filling dentry cache when calling curl + # This is required with Elasticsearch Docker using nss < 3.52 + export NSS_SDB_USE_CACHE=no + + http () { + local path="${1}" + local args="${2}" + set -- -XGET -s + + if [ "$args" != "" ]; then + set -- "$@" $args + fi + + if [ -n "${ELASTIC_PASSWORD}" ]; then + set -- "$@" -u "elastic:${ELASTIC_PASSWORD}" + fi + + curl --output /dev/null -k "$@" "{{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}${path}" + } + + if [ -f "${START_FILE}" ]; then + echo 'Elasticsearch is already running, lets check the node is healthy' + HTTP_CODE=$(http "/" "-w %{http_code}") + RC=$? + if [[ ${RC} -ne 0 ]]; then + echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with RC ${RC}" + exit ${RC} + fi + # ready if HTTP code 200, 503 is tolerable if ES version is 6.x + if [[ ${HTTP_CODE} == "200" ]]; then + exit 0 + elif [[ ${HTTP_CODE} == "503" && "{{ include "elasticsearch.esMajorVersion" . }}" == "6" ]]; then + exit 0 + else + echo "curl --output /dev/null -k -XGET -s -w '%{http_code}' \${BASIC_AUTH} {{ .Values.protocol }}://127.0.0.1:{{ .Values.httpPort }}/ failed with HTTP code ${HTTP_CODE}" + exit 1 + fi + + else + echo 'Waiting for elasticsearch cluster to become ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + if http "/_cluster/health?{{ .Values.clusterHealthCheckParams }}" "--fail" ; then + touch ${START_FILE} + exit 0 + else + echo 'Cluster is not yet ready (request params: "{{ .Values.clusterHealthCheckParams }}" )' + exit 1 + fi + fi +{{ toYaml .Values.readinessProbe | indent 10 }} + ports: + - name: http + containerPort: {{ .Values.httpPort }} + - name: transport + containerPort: {{ .Values.transportPort }} + resources: +{{ toYaml .Values.resources | indent 10 }} + env: + - name: node.name + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if eq .Values.roles.master "true" }} + {{- if ge (int (include "elasticsearch.esMajorVersion" .)) 7 }} + - name: cluster.initial_master_nodes + value: "{{ template "elasticsearch.endpoints" . }}" + {{- else }} + - name: discovery.zen.minimum_master_nodes + value: "{{ .Values.minimumMasterNodes }}" + {{- end }} + {{- end }} + {{- if lt (int (include "elasticsearch.esMajorVersion" .)) 7 }} + - name: discovery.zen.ping.unicast.hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- else }} + - name: discovery.seed_hosts + value: "{{ template "elasticsearch.masterService" . }}-headless" + {{- end }} + - name: cluster.name + value: "{{ .Values.clusterName }}" + - name: network.host + value: "{{ .Values.networkHost }}" + - name: cluster.deprecation_indexing.enabled + value: "{{ .Values.clusterDeprecationIndexing }}" + {{- if .Values.esJavaOpts }} + - name: ES_JAVA_OPTS + value: "{{ .Values.esJavaOpts }}" + {{- end }} + {{- range $role, $enabled := .Values.roles }} + - name: node.{{ $role }} + value: "{{ $enabled }}" + {{- end }} +{{- if .Values.extraEnvs }} +{{ toYaml .Values.extraEnvs | indent 10 }} +{{- end }} +{{- if .Values.envFrom }} + envFrom: +{{ toYaml .Values.envFrom | indent 10 }} +{{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: "{{ template "elasticsearch.uname" . }}" + mountPath: /usr/share/elasticsearch/data + {{- end }} +{{ if .Values.keystore }} + - name: keystore + mountPath: /usr/share/elasticsearch/config/elasticsearch.keystore + subPath: elasticsearch.keystore +{{ end }} + {{- range .Values.secretMounts }} + - name: {{ .name }} + mountPath: {{ .path }} + {{- if .subPath }} + subPath: {{ .subPath }} + {{- end }} + {{- end }} + {{- range $path, $config := .Values.esConfig }} + - name: esconfig + mountPath: /usr/share/elasticsearch/config/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- range $path, $config := .Values.esJvmOptions }} + - name: esjvmoptions + mountPath: /usr/share/elasticsearch/config/jvm.options.d/{{ $path }} + subPath: {{ $path }} + {{- end -}} + {{- if .Values.extraVolumeMounts }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraVolumeMounts) }} +{{ tpl .Values.extraVolumeMounts . | indent 10 }} + {{- else }} +{{ toYaml .Values.extraVolumeMounts | indent 10 }} + {{- end }} + {{- end }} +{{- if .Values.lifecycle }} + lifecycle: +{{ toYaml .Values.lifecycle | indent 10 }} +{{- end }} + {{- if .Values.extraContainers }} + # Currently some extra blocks accept strings + # to continue with backwards compatibility this is being kept + # whilst also allowing for yaml to be specified too. + {{- if eq "string" (printf "%T" .Values.extraContainers) }} +{{ tpl .Values.extraContainers . | indent 6 }} + {{- else }} +{{ toYaml .Values.extraContainers | indent 6 }} + {{- end }} + {{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/test/test-elasticsearch-health.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/test/test-elasticsearch-health.yaml new file mode 100644 index 0000000..9ab7dab --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/templates/test/test-elasticsearch-health.yaml @@ -0,0 +1,36 @@ +{{- if .Values.tests.enabled -}} +--- +apiVersion: v1 +kind: Pod +metadata: +{{- if .Values.healthNameOverride }} + name: {{ .Values.healthNameOverride | quote }} +{{- else }} + name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-delete-policy": hook-succeeded +spec: + securityContext: +{{ toYaml .Values.podSecurityContext | indent 4 }} + containers: +{{- if .Values.healthNameOverride }} + - name: {{ .Values.healthNameOverride | quote }} +{{- else }} + - name: "{{ .Release.Name }}-{{ randAlpha 5 | lower }}-test" +{{- end }} + image: "{{ .Values.image }}:{{ .Values.imageTag }}" + imagePullPolicy: "{{ .Values.imagePullPolicy }}" + command: + - "sh" + - "-c" + - | + #!/usr/bin/env bash -e + curl -XGET --fail '{{ template "elasticsearch.uname" . }}:{{ .Values.httpPort }}/_cluster/health?{{ .Values.clusterHealthCheckParams }}' + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 4 }} + {{- end }} + restartPolicy: Never +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/values.yaml new file mode 100644 index 0000000..9eb4370 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/elasticsearch/values.yaml @@ -0,0 +1,355 @@ +--- +clusterName: "elasticsearch" +nodeGroup: "master" + +# The service that non master groups will try to connect to when joining the cluster +# This should be set to clusterName + "-" + nodeGroup for your master group +masterService: "" + +# Elasticsearch roles that will be applied to this nodeGroup +# These will be set as environment variables. E.g. node.master=true +roles: + master: "true" + ingest: "true" + data: "true" + remote_cluster_client: "true" + ml: "true" + +replicas: 3 +minimumMasterNodes: 2 + +esMajorVersion: "" + +clusterDeprecationIndexing: "false" + +# Allows you to add any config files in /usr/share/elasticsearch/config/ +# such as elasticsearch.yml and log4j2.properties +esConfig: {} +# elasticsearch.yml: | +# key: +# nestedkey: value +# log4j2.properties: | +# key = value + +esJvmOptions: {} +# processors.options: | +# -XX:ActiveProcessorCount=3 + +# Extra environment variables to append to this nodeGroup +# This will be appended to the current 'env:' key. You can use any of the kubernetes env +# syntax here +extraEnvs: [] +# - name: MY_ENVIRONMENT_VAR +# value: the_value_goes_here + +# Allows you to load environment variables from kubernetes secret or config map +envFrom: [] +# - secretRef: +# name: env-secret +# - configMapRef: +# name: config-map + +# A list of secrets and their paths to mount inside the pod +# This is useful for mounting certificates for security and for mounting +# the X-Pack license +secretMounts: [] +# - name: elastic-certificates +# secretName: elastic-certificates +# path: /usr/share/elasticsearch/config/certs +# defaultMode: 0755 + +hostAliases: [] +#- ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" + +image: "docker.elastic.co/elasticsearch/elasticsearch" +imageTag: "7.17.3" +imagePullPolicy: "IfNotPresent" + +podAnnotations: + {} + # iam.amazonaws.com/role: es-cluster + +# additionals labels +labels: {} + +esJavaOpts: "" # example: "-Xmx1g -Xms1g" + +resources: + requests: + cpu: "1000m" + memory: "2Gi" + limits: + cpu: "1000m" + memory: "2Gi" + +initResources: + {} + # limits: + # cpu: "25m" + # # memory: "128Mi" + # requests: + # cpu: "25m" + # memory: "128Mi" + +networkHost: "0.0.0.0" + +volumeClaimTemplate: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 30Gi + +rbac: + create: false + serviceAccountAnnotations: {} + serviceAccountName: "" + automountToken: true + +podSecurityPolicy: + create: false + name: "" + spec: + privileged: true + fsGroup: + rule: RunAsAny + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - persistentVolumeClaim + - emptyDir + +persistence: + enabled: true + labels: + # Add default labels for the volumeClaimTemplate of the StatefulSet + enabled: false + annotations: {} + +extraVolumes: + [] + # - name: extras + # emptyDir: {} + +extraVolumeMounts: + [] + # - name: extras + # mountPath: /usr/share/extras + # readOnly: true + +extraContainers: + [] + # - name: do-something + # image: busybox + # command: ['do', 'something'] + +extraInitContainers: + [] + # - name: do-something + # image: busybox + # command: ['do', 'something'] + +# This is the PriorityClass settings as defined in +# https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +# By default this will make sure two pods don't end up on the same node +# Changing this to a region would allow you to spread pods across regions +antiAffinityTopologyKey: "kubernetes.io/hostname" + +# Hard means that by default pods will only be scheduled if there are enough nodes for them +# and that they will never end up on the same node. Setting this to soft will do this "best effort" +antiAffinity: "hard" + +# This is the node affinity settings as defined in +# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature +nodeAffinity: {} + +# The default is to deploy all pods serially. By setting this to parallel all pods are started at +# the same time when bootstrapping the cluster +podManagementPolicy: "Parallel" + +# The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when +# there are many services in the current namespace. +# If you experience slow pod startups you probably want to set this to `false`. +enableServiceLinks: true + +protocol: http +httpPort: 9200 +transportPort: 9300 + +service: + enabled: true + labels: {} + labelsHeadless: {} + type: ClusterIP + # Consider that all endpoints are considered "ready" even if the Pods themselves are not + # https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + publishNotReadyAddresses: false + nodePort: "" + annotations: {} + httpPortName: http + transportPortName: transport + loadBalancerIP: "" + loadBalancerSourceRanges: [] + externalTrafficPolicy: "" + +updateStrategy: RollingUpdate + +# This is the max unavailable setting for the pod disruption budget +# The default value of 1 will make sure that kubernetes won't allow more than 1 +# of your pods to be unavailable during maintenance +maxUnavailable: 1 + +podSecurityContext: + fsGroup: 1000 + runAsUser: 1000 + +securityContext: + capabilities: + drop: + - ALL + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + +# How long to wait for elasticsearch to stop gracefully +terminationGracePeriod: 120 + +sysctlVmMaxMapCount: 262144 + +readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 3 + timeoutSeconds: 5 + +# https://www.elastic.co/guide/en/elasticsearch/reference/7.17/cluster-health.html#request-params wait_for_status +clusterHealthCheckParams: "wait_for_status=green&timeout=1s" + +## Use an alternate scheduler. +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" + +imagePullSecrets: [] +nodeSelector: {} +tolerations: [] + +# Enabling this will publicly expose your Elasticsearch instance. +# Only enable this if you have security enabled on your cluster +ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "nginx" + pathtype: ImplementationSpecific + hosts: + - host: chart-example.local + paths: + - path: / + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +nameOverride: "" +fullnameOverride: "" +healthNameOverride: "" + +lifecycle: + {} + # preStop: + # exec: + # command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] + # postStart: + # exec: + # command: + # - bash + # - -c + # - | + # #!/bin/bash + # # Add a template to adjust number of shards/replicas + # TEMPLATE_NAME=my_template + # INDEX_PATTERN="logstash-*" + # SHARD_COUNT=8 + # REPLICA_COUNT=1 + # ES_URL=http://localhost:9200 + # while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done + # curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}' + +sysctlInitContainer: + enabled: true + +keystore: [] + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## In order for a Pod to access Elasticsearch, it needs to have the following label: + ## {{ template "uname" . }}-client: "true" + ## Example for default configuration to access HTTP port: + ## elasticsearch-master-http-client: "true" + ## Example for default configuration to access transport port: + ## elasticsearch-master-transport-client: "true" + + http: + enabled: false + ## if explicitNamespacesSelector is not set or set to {}, only client Pods being in the networkPolicy's namespace + ## and matching all criteria can reach the DB. + ## But sometimes, we want the Pods to be accessible to clients from other namespaces, in this case, we can use this + ## parameter to select these namespaces + ## + # explicitNamespacesSelector: + # # Accept from namespaces with all those different rules (only from whitelisted Pods) + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + ## Additional NetworkPolicy Ingress "from" rules to set. Note that all rules are OR-ed. + ## + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + + transport: + ## Note that all Elasticsearch Pods can talk to themselves using transport port even if enabled. + enabled: false + # explicitNamespacesSelector: + # matchLabels: + # role: frontend + # matchExpressions: + # - {key: role, operator: In, values: [frontend]} + # additionalRules: + # - podSelector: + # matchLabels: + # role: frontend + # - podSelector: + # matchExpressions: + # - key: role + # operator: In + # values: + # - frontend + +tests: + enabled: true + +# Deprecated +# please use the above podSecurityContext.fsGroup instead +fsGroup: "" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/Chart.lock b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/Chart.lock new file mode 100644 index 0000000..cc08a00 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 10.2.5 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.1 +digest: sha256:a7b6919993123d2aa5fac072d980523e4e3eee61ae8a236f321c2d205921911a +generated: "2022-11-30T07:44:11.034596899Z" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/Chart.yaml new file mode 100644 index 0000000..3379739 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/Chart.yaml @@ -0,0 +1,33 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.3.1 +dependencies: +- condition: zookeeper.enabled + name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 10.x.x +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Apache Kafka is a distributed streaming platform designed to build real-time + pipelines and can be used as a message broker or as a replacement for a log aggregation + solution for big data applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: kafka +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/kafka +- https://kafka.apache.org/ +version: 19.1.5 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/README.md new file mode 100644 index 0000000..447479c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/README.md @@ -0,0 +1,1046 @@ + + +# Apache Kafka packaged by Bitnami + +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. + +[Overview of Apache Kafka](http://kafka.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm repo add my-repo https://charts.bitnami.com/bitnami +helm install my-release my-repo/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add my-repo https://charts.bitnami.com/bitnami +helm install my-release my-repo/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### Kafka parameters + +| Name | Description | Value | +| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image repository | `bitnami/kafka` | +| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.3.1-debian-11-r19` | +| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `""` | +| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | +| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `true` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `_10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories in which kafka's log data is kept | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `_1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to ZooKeeper | `6000` | +| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | +| `authorizerClassName` | The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties | `""` | +| `allowEveryoneIfNoAclFound` | By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users | `true` | +| `superUsers` | You can add super users in server.properties | `User:admin` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.externalClientProtocol` | Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `""` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.sasl.mechanisms` | SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `plain` | +| `auth.sasl.jaas.clientUsers` | Kafka client user list | `["user"]` | +| `auth.sasl.jaas.clientPasswords` | Kafka client passwords. This is mandatory if more than one user is specified in clientUsers | `[]` | +| `auth.sasl.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.sasl.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperUser` | Kafka ZooKeeper user for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperPassword` | Kafka ZooKeeper password for SASL authentication | `""` | +| `auth.sasl.jaas.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser | `""` | +| `auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem` | `jks` | +| `auth.tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `auth.tls.existingSecrets` | Array existing secrets containing the TLS certificates for the Kafka brokers | `[]` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` | `false` | +| `auth.tls.password` | Password to access the JKS files or PEM key when they are password-protected. | `""` | +| `auth.tls.existingSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `auth.tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` | `""` | +| `auth.tls.jksKeystoreSAN` | The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate | `""` | +| `auth.tls.jksTruststore` | The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore | `""` | +| `auth.tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.zookeeper.tls.enabled` | Enable TLS for Zookeeper client connections. | `false` | +| `auth.zookeeper.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `auth.zookeeper.tls.verifyHostname` | Hostname validation. | `true` | +| `auth.zookeeper.tls.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications. | `""` | +| `auth.zookeeper.tls.existingSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. | `zookeeper.keystore.jks` | +| `auth.zookeeper.tls.existingSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. | `zookeeper.truststore.jks` | +| `auth.zookeeper.tls.passwordsSecret` | Existing secret containing Keystore and Truststore passwords. | `""` | +| `auth.zookeeper.tls.passwordsSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `auth.zookeeper.tls.passwordsSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `""` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | +| `command` | Override Kafka container command | `["/scripts/setup.sh"]` | +| `args` | Override Kafka container arguments | `[]` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of Kafka nodes | `1` | +| `minBrokerId` | Minimal broker.id value, nodes increment their `broker.id` respectively | `0` | +| `brokerRackAssignment` | Set Broker Assignment for multi tenant environment Allowed values: `aws-az` | `""` | +| `containerPorts.client` | Kafka client container port | `9092` | +| `containerPorts.internal` | Kafka inter-broker container port | `9093` | +| `containerPorts.external` | Kafka external container port | `9094` | +| `livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the container | `{}` | +| `resources.requests` | The requested resources for the container | `{}` | +| `podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `hostAliases` | Kafka pods host aliases | `[]` | +| `hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `podLabels` | Extra labels for Kafka pods | `{}` | +| `podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | Kafka statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.internal` | Kafka svc port for inter-broker connections | `9093` | +| `service.ports.external` | Kafka svc port for external connections | `9094` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `service.headless.labels` | Labels for the headless service. | `{}` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.4-debian-11-r6` | +| `externalAccess.autoDiscovery.image.digest` | Petete image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.service.labels` | Service labels for external access | `{}` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `logPersistence.annotations` | Annotations for the PVC | `{}` | +| `logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r57` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + + +### Metrics parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.6.0-debian-11-r34` | +| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | +| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | +| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | +| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.resources.limits` | The resources limits for the container | `{}` | +| `metrics.kafka.resources.requests` | The requested resources for the container | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set Kafka exporter containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | +| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.17.2-debian-11-r23` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resources.limits` | The resources limits for the JMX exporter container | `{}` | +| `metrics.jmx.resources.requests` | The requested resources for the JMX exporter container | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | + + +### Kafka provisioning parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `false` | +| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `provisioning.resources.limits` | The resources limits for the Kafka provisioning container | `{}` | +| `provisioning.resources.requests` | The requested resources for the Kafka provisioning container | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | Set Kafka provisioning containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart | `true` | +| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | +| `zookeeper.auth.client.enabled` | Enable ZooKeeper auth | `false` | +| `zookeeper.auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | +| `zookeeper.auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | +| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | +| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | +| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | +| `externalZookeeper.servers` | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. | `[]` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set replicaCount=3 \ + my-repo/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml my-repo/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +Using `extraEnvVars` with `KAFKA_CFG_` is the preferred and simplest way to add custom Kafka parameters not otherwise specified in this chart. Alternatively, you can provide a *full* Kafka configuration using `config` or `existingConfigmap`. +Setting either `config` or `existingConfigmap` will cause the chart to disregard `KAFKA_CFG_` settings, which are used by many other Kafka-related chart values described above, as well as dynamically generated parameters such as `zookeeper.connect`. This can cause unexpected behavior. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-cluster) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +Learn more about how to configure Kafka to use the different authentication protocols in the [chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/kafka/administration/enable-security/). + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.sasl.jaas.clientUsers`/`auth.sasl.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.sasl.jaas.interBrokerUser`/`auth.sasl.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **can** create a secret per Kafka broker you have in the cluster containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and the keystore (`kafka.keystore.jks`). Then, you need pass the secret names with the `auth.tls.existingSecrets` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.tls.password` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the commands below to create the secrets: + +```console +kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks +kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the truststore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +If, for some reason (like using Cert-Manager) you can not use the default JKS secret scheme, you can use the additional parameters: + +- `auth.tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `auth.tls.password` +- `auth.tls.jksTruststore` to overwrite the default value of the truststore key (`kafka.truststore.jks`). +- `auth.tls.jksKeystoreSAN` if you want to use a SAN certificate for your brokers. Setting this parameter would mean that the chart expects a existing key in the `auth.tls.jksTruststoreSecret` with the `auth.tls.jksKeystoreSAN` value and use this as a keystore for **all** brokers +> **Note**: If you are using cert-manager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that cert-manager creates. To handle this, the `auth.tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain) + +> **Note**: The truststore/keystore from above **must** be protected with the same password as in `auth.tls.password` + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=tls +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +You can deploy the chart with AclAuthorizer using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=sasl_tls +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +authorizerClassName=kafka.security.authorizer.AclAuthorizer +allowEveryoneIfNoAclFound=false +superUsers=User:admin +``` + +If you are using Kafka ACLs, you might encounter in kafka-authorizer.log the following event: `[...] Principal = User:ANONYMOUS is Allowed Operation [...]`. + +By setting the following parameter: `auth.clientProtocol=mtls`, it will set the configuration in Kafka to `ssl.client.auth=required`. This option will require the clients to authenticate to Kafka brokers. + +As result, we will be able to see in kafka-authorizer.log the events specific Subject: `[...] Principal = User:CN=kafka,OU=...,O=...,L=...,C=..,ST=... is [...]`. + +If you also enable exposing metrics using the Kafka exporter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are three ways of configuring external access. Using LoadBalancer services, using NodePort services or using ClusterIP services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.ports.external=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.ports.external=9094 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.service.nodePorts[0]='node-port-1' +externalAccess.service.nodePorts[1]='node-port-2' +``` + +Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` or `externalAccess.service.useHostIPs` is provided. + +#### Using ClusterIP services + +Note: This option requires that an ingress is deployed within your cluster + +```console +externalAccess.enabled=true +externalAccess.service.type=ClusterIP +externalAccess.service.ports.external=9094 +externalAccess.service.domain='ingress-ip' +``` + +Note: the deployed ingress must contain the following block: + +```console +tcp: + 9094: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-0-external:9094" + 9095: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-1-external:9094" + 9096: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-2-external:9094" +``` + +#### Name resolution with External-DNS + +You can use the following values to generate External-DNS annotations which automatically creates DNS records for each ReplicaSet pod: + +```yaml +externalAccess: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" +``` + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB®: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: + - | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "kafka.fullname" . }}-connect + - | + apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - | + apiVersion: v1 + kind: Service + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB® Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 19.0.0 + +This major updates Kafka to its newest version, 3.3.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/33/documentation.html#upgrade). + +### To 18.0.0 + +This major updates the Zookeeper subchart to it newest major, 10.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1000). + +### To 16.0.0 + +This major updates the Zookeeper subchart to it newest major, 9.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-900). + +### To 15.0.0 + +This major release bumps Kafka major version to `3.x` series. +It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are: + +- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map. +- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map. +- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- Several parameters marked as deprecated `14.x.x` are not supported anymore. + +Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes. + +### To 14.0.0 + +In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +VS + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +... +provisioning: + image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +See [PR#7114](https://github.com/bitnami/charts/pull/7114) for more info about the implemented changes + +### To 13.0.0 + +This major updates the Zookeeper subchart to it newest major, 7.0.0, which renames all TLS-related settings. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-700). + +### To 12.2.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.8.0 + +External access to brokers can now be achieved through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and athentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the [Listeners Configuration](#listeners-configuration) section for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka my-repo/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka my-repo/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/Chart.yaml new file mode 100644 index 0000000..653c063 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.2.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.2.1 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/README.md new file mode 100644 index 0000000..ec43a5f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/README.md @@ -0,0 +1,351 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_affinities.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..81902a6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_capabilities.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_errors.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_images.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_images.tpl new file mode 100644 index 0000000..46c659e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_ingress.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..831da9c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_labels.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_names.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_secrets.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4267d42 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_storage.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_tplvalues.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_utils.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_warnings.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_cassandra.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mariadb.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mongodb.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mysql.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_postgresql.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_redis.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_validations.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/.helmignore new file mode 100644 index 0000000..f0c1319 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/Chart.lock b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/Chart.lock new file mode 100644 index 0000000..4e24e04 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.1.2 +digest: sha256:1c365a4551a2f4098e9584dc176b289c10437c679c7c3e2ec6153cabf863e1a4 +generated: "2022-11-12T08:51:13.076591259Z" diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/Chart.yaml new file mode 100644 index 0000000..c5fbb17 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 3.8.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png +keywords: +- zookeeper +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: zookeeper +sources: +- https://github.com/bitnami/containers/tree/main/bitnami/zookeeper +- https://zookeeper.apache.org/ +version: 10.2.5 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/README.md new file mode 100644 index 0000000..540208e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/README.md @@ -0,0 +1,527 @@ + + +# Apache ZooKeeper packaged by Bitnami + +Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications. + +[Overview of Apache ZooKeeper](https://zookeeper.apache.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + + +### Common parameters + +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Extra objects to deploy (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `namespaceOverride` | Override namespace for ZooKeeper resources | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.8.0-debian-11-r56` | +| `image.digest` | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.client.enabled` | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | +| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.client.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `auth.quorum.enabled` | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.quorum.learnerUser` | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.serverUsers` | Comma, semicolon or whitespace separated list of users for the quorumServers. | `""` | +| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.quorum.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `preAllocSize` | Block size for transaction log file | `65536` | +| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | +| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | +| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | +| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | +| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | +| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | +| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | +| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | +| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | +| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | +| `dataLogDir` | Dedicated data log directory | `""` | +| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | +| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | + + +### Statefulset parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `containerPorts.client` | ZooKeeper client container port | `2181` | +| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | +| `containerPorts.follower` | ZooKeeper follower container port | `2888` | +| `containerPorts.election` | ZooKeeper election container port | `3888` | +| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | +| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | +| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | +| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | +| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled ZooKeeper containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set ZooKeeper containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set ZooKeeper containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `hostAliases` | ZooKeeper pods host aliases | `[]` | +| `podLabels` | Extra labels for ZooKeeper pods | `{}` | +| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | +| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | + + +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | ZooKeeper client service port | `2181` | +| `service.ports.tls` | ZooKeeper TLS service port | `3181` | +| `service.ports.follower` | ZooKeeper follower service port | `2888` | +| `service.ports.election` | ZooKeeper election service port | `3888` | +| `service.nodePorts.client` | Node port for clients | `""` | +| `service.nodePorts.tls` | Node port for TLS | `""` | +| `service.disableBaseClientPort` | Remove client port from service definitions. | `false` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | ZooKeeper service Cluster IP | `""` | +| `service.loadBalancerIP` | ZooKeeper service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | ZooKeeper service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | ZooKeeper service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for ZooKeeper service | `{}` | +| `service.extraPorts` | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for ZooKeeper pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + + +### Persistence parameters + +| Name | Description | Value | +| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- | +| `persistence.enabled` | Enable ZooKeeper data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use (only when deploying a single replica) | `""` | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `""` | +| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` | +| `persistence.dataLogDir.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC | `{}` | + + +### Volume Permissions parameters + +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r51` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Metrics parameters + +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- | +| `metrics.enabled` | Enable Prometheus to access ZooKeeper metrics endpoint | `false` | +| `metrics.containerPort` | ZooKeeper Prometheus Exporter container port | `9141` | +| `metrics.service.type` | ZooKeeper Prometheus Exporter service type | `ClusterIP` | +| `metrics.service.port` | ZooKeeper Prometheus Exporter service port | `9141` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +### TLS/SSL parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| `tls.client.enabled` | Enable TLS for client connections | `false` | +| `tls.client.auth` | SSL Client auth. Can be "none", "want" or "need". | `none` | +| `tls.client.autoGenerated` | Generate automatically self-signed TLS certificates for ZooKeeper client communications | `false` | +| `tls.client.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications | `""` | +| `tls.client.existingSecretKeystoreKey` | The secret key from the tls.client.existingSecret containing the Keystore. | `""` | +| `tls.client.existingSecretTruststoreKey` | The secret key from the tls.client.existingSecret containing the Truststore. | `""` | +| `tls.client.keystorePath` | Location of the KeyStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks` | +| `tls.client.truststorePath` | Location of the TrustStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` | +| `tls.client.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.client.passwordsSecretKeystoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.client.passwordsSecretTruststoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.client.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.client.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.quorum.enabled` | Enable TLS for quorum protocol | `false` | +| `tls.quorum.auth` | SSL Quorum Client auth. Can be "none", "want" or "need". | `none` | +| `tls.quorum.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` | +| `tls.quorum.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol | `""` | +| `tls.quorum.existingSecretKeystoreKey` | The secret key from the tls.quorum.existingSecret containing the Keystore. | `""` | +| `tls.quorum.existingSecretTruststoreKey` | The secret key from the tls.quorum.existingSecret containing the Truststore. | `""` | +| `tls.quorum.keystorePath` | Location of the KeyStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks` | +| `tls.quorum.truststorePath` | Location of the TrustStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` | +| `tls.quorum.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.quorum.passwordsSecretKeystoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.resources.limits` | The resources limits for the TLS init container | `{}` | +| `tls.resources.requests` | The requested resources for the TLS init container | `{}` | + + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + my-repo/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml my-repo/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Configure log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. + +In order to remove that log noise so levels can be set to 'INFO', two changes must be made. + +First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. + +Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: + +``` +livenessProbe: + enabled: false +readinessProbe: + enabled: false +customLivenessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +customReadinessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +``` + +You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to + +```console +zookeeper.root.logger=INFO, CONSOLE +``` +the available appender is + +- CONSOLE +- ROLLINGFILE +- RFAAUDIT +- TRACEFILE + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Configure the data log directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +### Set pod affinity + +This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 10.0.0 + +This new version of the chart adds support for server-server authentication. +The chart previously supported client-server authentication, to avioud confusion, the previous parameters have been renamed from `auth.*` to `auth.client.*`. + +### To 9.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.8.0. Upgrade compatibility is not guaranteed. + +### To 8.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `allowAnonymousLogin` is deprecated. +- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map. +- `service.port`, `service.tlsClientPort`, `service.followerPort`, and `service.electionPort` have been regrouped under the `service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- `podDisruptionBudget.*` parameters are renamed to `pdb.*`. + +### To 7.0.0 + +This new version renames the parameters used to configure TLS for both client and quorum. + +- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort` +- `service.tls.client_port` is renamed to `service.tlsClientPort` +- `service.tls.client_enable` is renamed to `tls.client.enabled` +- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath` +- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath` +- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword` +- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword` +- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled` +- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath` +- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath` +- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword` +- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword` + +### To 6.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 6.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/). + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/.helmignore new file mode 100644 index 0000000..50af031 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/Chart.yaml new file mode 100644 index 0000000..6f0c3a6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.1.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.1.2 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/README.md new file mode 100644 index 0000000..a2ecd60 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/README.md @@ -0,0 +1,350 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl new file mode 100644 index 0000000..497068f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl @@ -0,0 +1,98 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl new file mode 100644 index 0000000..9d9b760 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl new file mode 100644 index 0000000..a79cc2e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl new file mode 100644 index 0000000..46c659e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl new file mode 100644 index 0000000..831da9c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl new file mode 100644 index 0000000..252066c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl new file mode 100644 index 0000000..1bdac8b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl @@ -0,0 +1,70 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl new file mode 100644 index 0000000..4267d42 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl new file mode 100644 index 0000000..60e2a84 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl new file mode 100644 index 0000000..2db1668 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl new file mode 100644 index 0000000..8c22b2a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl new file mode 100644 index 0000000..ae10fa4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 0000000..ded1ae3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 0000000..b6906ff --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 0000000..f820ec1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 0000000..74472a0 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 0000000..164ec0d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl new file mode 100644 index 0000000..dcccfc1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl new file mode 100644 index 0000000..9a814cf --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/values.yaml new file mode 100644 index 0000000..f2df68e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/NOTES.txt b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100644 index 0000000..c287e1e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,76 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if and (not .Values.auth.client.enabled) (eq .Values.service.type "LoadBalancer") }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh + +{{- else }} + +ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if eq .Values.service.type "NodePort" }} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if eq .Values.service.type "LoadBalancer" }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }} + +{{- else if eq .Values.service.type "ClusterIP" }} + + kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPorts.client }} & + zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }} + +{{- end }} +{{- end }} + +{{- include "zookeeper.validateValues" . }} +{{- include "zookeeper.checkRollingTags" . }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/_helpers.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100644 index 0000000..d855bad --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,361 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the proper ZooKeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "zookeeper.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Return ZooKeeper Namespace to use +*/}} +{{- define "zookeeper.namespace" -}} +{{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper client-server authentication credentials secret +*/}} +{{- define "zookeeper.client.secretName" -}} +{{- if .Values.auth.client.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.client.existingSecret $) -}} +{{- else -}} + {{- printf "%s-client-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the ZooKeeper server-server authentication credentials secret +*/}} +{{- define "zookeeper.quorum.secretName" -}} +{{- if .Values.auth.quorum.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.quorum.existingSecret $) -}} +{{- else -}} + {{- printf "%s-quorum-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper client-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.client.createSecret" -}} +{{- if and .Values.auth.client.enabled (empty .Values.auth.client.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper server-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.quorum.createSecret" -}} +{{- if and .Values.auth.quorum.enabled (empty .Values.auth.quorum.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} + {{- else -}} + {{- randAlphaNum $len -}} + {{- end -}} +{{- end }} + +{{/* +Return the ZooKeeper configuration ConfigMap name +*/}} +{{- define "zookeeper.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ConfigMap object should be created for ZooKeeper configuration +*/}} +{{- define "zookeeper.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsSecret" -}} +{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper quorum TLS certificates +*/}} +{{- define "zookeeper.quorum.tlsSecretName" -}} +{{- $secretName := .Values.tls.quorum.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.quorum.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.quorum.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsSecret" -}} +{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper client TLS certificates +*/}} +{{- define "zookeeper.client.tlsSecretName" -}} +{{- $secretName := .Values.tls.client.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsKeystoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsTruststoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsKeystoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsTruststoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.client.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.client.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "zookeeper.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.client.auth" -}} +{{- if and .Values.auth.client.enabled (not .Values.auth.client.existingSecret) (or (not .Values.auth.client.clientUser) (not .Values.auth.client.serverUsers)) }} +zookeeper: auth.client.enabled + In order to enable client-server authentication, you need to provide the list + of users to be created and the user to use for clients authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.quorum.auth" -}} +{{- if and .Values.auth.quorum.enabled (not .Values.auth.quorum.existingSecret) (or (not .Values.auth.quorum.learnerUser) (not .Values.auth.quorum.serverUsers)) }} +zookeeper: auth.quorum.enabled + In order to enable server-server authentication, you need to provide the list + of users to be created and the user to use for quorum authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Client TLS enabled +*/}} +{{- define "zookeeper.validateValues.client.tls" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }} +zookeeper: tls.client.enabled + In order to enable Client TLS encryption, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Quorum TLS enabled +*/}} +{{- define "zookeeper.validateValues.quorum.tls" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }} +zookeeper: tls.quorum.enabled + In order to enable Quorum TLS, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100644 index 0000000..12b4f48 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "zookeeper.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/extra-list.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100644 index 0000000..5afc4b3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }}-metrics + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100644 index 0000000..6353283 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,41 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow inbound connections to ZooKeeper + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "common.names.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} + {{- end }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.containerPorts.follower }} + - port: {{ .Values.containerPorts.election }} + from: + - podSelector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/pdb.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/pdb.yaml new file mode 100644 index 0000000..f7faf65 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/pdb.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml new file mode 100644 index 0000000..87dcd35 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} +{{- end }} + diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml new file mode 100644 index 0000000..00afbfe --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml @@ -0,0 +1,98 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + init-certs.sh: |- + #!/bin/bash + + {{- if .Values.tls.client.enabled }} + if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/client/tls.crt" \ + -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -inkey "/certs/client/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/client/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.client.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/quorum/tls.crt" \ + -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -inkey "/certs/quorum/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/quorum/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.quorum.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID + # check ZOO_SERVER_ID in persistent volume via myid + # if not present, set based on POD hostname + if [[ -f "/bitnami/zookeeper/data/myid" ]]; then + export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)" + else + HOSTNAME="$(hostname -s)" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))" + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + fi + exec /entrypoint.sh /run.sh diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/secrets.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100644 index 0000000..82ebc2e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,77 @@ +{{- if (include "zookeeper.client.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-client-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "client-password" "providedValues" (list "auth.client.clientPassword") "context" $) }} + server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "server-password" "providedValues" (list "auth.client.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.quorum.createSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-quorum-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + quorum-learner-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-learner-password" "providedValues" (list "auth.quorum.learnerPassword") "context" $) }} + quorum-server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-server-password" "providedValues" (list "auth.quorum.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-client-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-quorum-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100644 index 0000000..958a57a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,21 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100644 index 0000000..2c8af33 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "common.names.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ template "zookeeper.namespace" . }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/statefulset.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100644 index 0000000..a44d17c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,542 @@ +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "zookeeper.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.quorum.createSecret" .) (include "zookeeper.client.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }} + checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/zookeeper + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/zookeeper + find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if .Values.dataLogDir }} + mkdir -p {{ .Values.dataLogDir }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.dataLogDir }} + find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- end }} + {{- if .Values.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }} + - name: init-certs + image: {{ include "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /scripts/init-certs.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/init-certs.sh + subPath: init-certs.sh + {{- if or .Values.tls.client.enabled }} + - name: client-certificates + mountPath: /certs/client + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + {{- end }} + {{- if or .Values.tls.quorum.enabled }} + - name: quorum-certificates + mountPath: /certs/quorum + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.containerPorts.client | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_PRE_ALLOC_SIZE + value: {{ .Values.preAllocSize | quote }} + - name: ZOO_SNAPCOUNT + value: {{ .Values.snapCount | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $minServerId := int .Values.minServerId }} + {{- $followerPort := int .Values.containerPorts.follower }} + {{- $electionPort := int .Values.containerPorts.election }} + {{- $releaseNamespace := include "zookeeper.namespace" . }} + {{- $zookeeperFullname := include "common.names.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.client.enabled | quote }} + {{- if .Values.auth.client.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.client.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.client.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.secretName" . }} + key: server-password + {{- end }} + - name: ZOO_ENABLE_QUORUM_AUTH + value: {{ ternary "yes" "no" .Values.auth.quorum.enabled | quote }} + {{- if .Values.auth.quorum.enabled }} + - name: ZOO_QUORUM_LEARNER_USER + value: {{ .Values.auth.quorum.learnerUser | quote }} + - name: ZOO_QUORUM_LEARNER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-learner-password + - name: ZOO_QUORUM_SERVER_USERS + value: {{ .Values.auth.quorum.serverUsers | quote }} + - name: ZOO_QUORUM_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "no" "yes" .Values.auth.client.enabled | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: ZOO_TLS_PORT_NUMBER + value: {{ .Values.containerPorts.tls | quote }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.tls.client.enabled | quote }} + - name: ZOO_TLS_CLIENT_AUTH + value: {{ .Values.tls.client.auth | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.tls.client.keystorePath | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.tls.client.truststorePath | quote }} + {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.tls.quorum.enabled | quote }} + - name: ZOO_TLS_QUORUM_CLIENT_AUTH + value: {{ .Values.tls.quorum.auth | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.tls.quorum.keystorePath | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.tls.quorum.truststorePath | quote }} + {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: client + containerPort: {{ .Values.containerPorts.client }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-tls + containerPort: {{ .Values.containerPorts.tls }} + {{- end }} + - name: follower + containerPort: {{ .Values.containerPorts.follower }} + - name: election + containerPort: {{ .Values.containerPorts.election }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} + exec: + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] + {{- end }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + {{- if not .Values.service.disableBaseClientPort }} + port: client + {{- else }} + port: follower + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + readOnly: true + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }} + {{- end }} + volumes: + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if or .Values.configuration .Values.existingConfigmap }} + - name: config + configMap: + name: {{ include "zookeeper.configmapName" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: client-certificates + secret: + secretName: {{ include "zookeeper.client.tlsSecretName" . }} + defaultMode: 256 + - name: client-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-certificates + secret: + secretName: {{ include "zookeeper.quorum.tlsSecretName" . }} + defaultMode: 256 + - name: quorum-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- if .Values.commonLabels }} + labels: + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.dataLogDir.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100644 index 0000000..ee05e1d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.headless.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/svc.yaml new file mode 100644 index 0000000..6ad0b10 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + {{- if not .Values.service.disableBaseClientPort }} + - name: tcp-client + port: {{ .Values.service.ports.client }} + targetPort: client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.tls.client.enabled }} + - name: tcp-client-tls + port: {{ .Values.service.ports.tls }} + targetPort: client-tls + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }} + nodePort: {{ .Values.service.nodePorts.tls }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} + targetPort: follower + - name: tcp-election + port: {{ .Values.service.ports.election }} + targetPort: election + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml new file mode 100644 index 0000000..a07480d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml @@ -0,0 +1,55 @@ +{{- if (include "zookeeper.client.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-client-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-client-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-quorum-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-quorum-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/values.yaml new file mode 100644 index 0000000..89420a3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,874 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Extra objects to deploy (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param namespaceOverride Override namespace for ZooKeeper resources +## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent +## +namespaceOverride: "" + +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section ZooKeeper chart parameters + +## Bitnami ZooKeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## @param image.registry ZooKeeper image registry +## @param image.repository ZooKeeper image repository +## @param image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy ZooKeeper image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.8.0-debian-11-r56 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + client: + ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.client.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" + quorum: + ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## Note: Make sure the user is included in auth.quorum.serverUsers + ## + learnerUser: "" + ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## + learnerPassword: "" + ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers. + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" +## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats +## +tickTime: 2000 +## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 +## @param syncLimit How far out of date a server can be from a leader +## +syncLimit: 5 +## @param preAllocSize Block size for transaction log file +## +preAllocSize: 65536 +## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) +## +snapCount: 100000 +## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 +## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate +## Defaults to 20 times the tickTime +## +maxSessionTimeout: 40000 +## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms) +## This env var is ignored if Xmx an Xms are configured via `jvmFlags` +## +heapSize: 1024 +## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed +## +fourlwCommandsWhitelist: srvr, mntr, ruok +## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively +## Servers increment their ID starting at this minimal value. +## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively. +## +minServerId: 1 +## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses +## +listenOnAllIPs: false +## Ongoing data directory cleanup configuration +## +autopurge: + ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain + ## + snapRetainCount: 3 + ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered + ## Set to a positive integer to enable the auto purging + ## + purgeInterval: 0 +## @param logLevel Log level for the ZooKeeper server. ERROR by default +## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs +## +logLevel: ERROR +## @param jvmFlags Default JVM flags for the ZooKeeper process +## +jvmFlags: "" +## @param dataLogDir Dedicated data log directory +## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots. +## E.g. +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" +## @param configuration Configure ZooKeeper with a custom zoo.cfg file +## e.g: +## configuration: |- +## deploy-working-dir=/bitnami/geode/data +## log-level=info +## ... +## +configuration: "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper +## NOTE: When it's set the `configuration` parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes +## +extraEnvVarsSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) +## +args: [] + +## @section Statefulset parameters + +## @param replicaCount Number of ZooKeeper nodes +## +replicaCount: 1 +## @param containerPorts.client ZooKeeper client container port +## @param containerPorts.tls ZooKeeper TLS container port +## @param containerPorts.follower ZooKeeper follower container port +## @param containerPorts.election ZooKeeper election container port +## +containerPorts: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 +## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## ZooKeeper resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the ZooKeeper containers +## @param resources.requests.memory The requested memory for the ZooKeeper containers +## @param resources.requests.cpu The requested cpu for the ZooKeeper containers +## +resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context +## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context +## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## @param hostAliases ZooKeeper pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Kubernetes pod scheduler registry +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type ZooKeeper statefulset strategy type +## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumes: +## - name: zookeeper-keystore +## secret: +## defaultMode: 288 +## secretName: zookeeper-keystore +## - name: zookeeper-truststore +## secret: +## defaultMode: 288 +## secretName: zookeeper-truststore +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumeMounts: +## - name: zookeeper-keystore +## mountPath: /certs/keystore +## readOnly: true +## - name: zookeeper-truststore +## mountPath: /certs/truststore +## readOnly: true +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ZooKeeper pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## ZooKeeper Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the ZooKeeper pod +## @param pdb.minAvailable Minimum available ZooKeeper replicas +## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client ZooKeeper client service port + ## @param service.ports.tls ZooKeeper TLS service port + ## @param service.ports.follower ZooKeeper follower service port + ## @param service.ports.election ZooKeeper election service port + ## + ports: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param service.nodePorts.client Node port for clients + ## @param service.nodePorts.tls Node port for TLS + ## + nodePorts: + client: "" + tls: "" + ## @param service.disableBaseClientPort Remove client port from service definitions. + ## + disableBaseClientPort: false + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP ZooKeeper service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ZooKeeper service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.headless.annotations Annotations for the Headless Service + ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods + ## + headless: + publishNotReadyAddresses: true + annotations: {} +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Redis® is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + +## @section Other Parameters + +## Service account for ZooKeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica) + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for ZooKeeper data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## Persistence for a dedicated data log directory + ## + dataLogDir: + ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory + ## + size: 8Gi + ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r51 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.enabled Enabled init container Security Context + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + enabled: true + runAsUser: 0 + +## @section Metrics parameters +## + +## ZooKeeper Prometheus Exporter configuration +## +metrics: + ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint + ## + enabled: false + ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port + ## + containerPort: 9141 + ## Service configuration + ## + service: + ## @param metrics.service.type ZooKeeper Prometheus Exporter service type + ## + type: ClusterIP + ## @param metrics.service.port ZooKeeper Prometheus Exporter service port + ## + port: 9141 + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ZooKeeperSyncedFollowers + ## annotations: + ## message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + ## expr: max(synced_followers{service="my-release-metrics"}) < 2 + ## for: 5m + ## labels: + ## severity: critical + ## - alert: ZooKeeperOutstandingRequests + ## annotations: + ## message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + ## expr: outstanding_requests{service="my-release-metrics"} > 10 + ## for: 5m + ## labels: + ## severity: critical + ## + rules: [] + +## @section TLS/SSL parameters +## + +## Enable SSL/TLS encryption +## +tls: + client: + ## @param tls.client.enabled Enable TLS for client connections + ## + enabled: false + ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications + ## Currently only supports PEM certificates + ## + autoGenerated: false + ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications + ## + existingSecret: "" + ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.client.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.client.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + quorum: + ## @param tls.quorum.enabled Enable TLS for quorum protocol + ## + enabled: false + ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol + ## + existingSecret: "" + ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.quorum.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.quorum.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param tls.resources.limits The resources limits for the TLS init container + ## @param tls.resources.requests The requested resources for the TLS init container + ## + resources: + limits: {} + requests: {} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/NOTES.txt b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/NOTES.txt new file mode 100644 index 0000000..1bdfb33 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/NOTES.txt @@ -0,0 +1,310 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh + +{{- else }} + +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms -}} +{{- $tlsEndpointIdentificationAlgorithm := default "" .Values.auth.tls.endpointIdentificationAlgorithm -}} +{{- $tlsPasswordSecret := printf "$(kubectl get secret %s --namespace %s -o jsonpath='{.data.password}' | base64 -d | cut -d , -f 1)" .Values.auth.tls.existingSecret $releaseNamespace -}} +{{- $tlsPassword := ternary .Values.auth.tls.password $tlsPasswordSecret (eq .Values.auth.tls.existingSecret "") -}} +{{- $servicePort := int .Values.service.ports.client -}} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $externalClientProtocol "PLAINTEXT") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: + + - kafka_jaas.conf: + +KafkaClient { +{{- if $saslMechanisms | regexFind "scram" }} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else }} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- end }} +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" +password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +}; + + - client.properties: + +security.protocol={{ $clientProtocol }} +{{- if $saslMechanisms | regexFind "scram-sha-256" }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if $saslMechanisms | regexFind "scram-sha-512" }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- if eq $clientProtocol "SASL_SSL" }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.jks + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- + {{- end }} + {{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= + {{- end }} +{{- end }} + +{{- else if (include "kafka.client.tlsEncryption" .) }} + +You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below: + +security.protocol={{ $clientProtocol }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} +{{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }} + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} +{{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if eq .Values.auth.clientProtocol "mtls" }} +ssl.keystore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.keystore.location=/tmp/client.keystore.jks + {{- if not (empty $tlsPassword) }} +ssl.keystore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- + {{- end }} +{{- end }} +{{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} + +{{- end }} + +To create a pod that you can use as a Kafka client run the following commands: + + kubectl run {{ $fullname }}-client --restart='Never' --image {{ template "kafka.image" . }} --namespace {{ $releaseNamespace }} --command -- sleep infinity + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/client.properties {{ $fullname }}-client:/tmp/client.properties + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + kubectl cp --namespace {{ $releaseNamespace }} /path/to/kafka_jaas.conf {{ $fullname }}-client:/tmp/kafka_jaas.conf + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) (eq .Values.auth.tls.type "jks") }} + kubectl cp --namespace {{ $releaseNamespace }} ./kafka.truststore.jks {{ $fullname }}-client:/tmp/kafka.truststore.jks + {{- if eq .Values.auth.clientProtocol "mtls" }} + kubectl cp --namespace {{ $releaseNamespace }} ./client.keystore.jks {{ $fullname }}-client:/tmp/client.keystore.jks + {{- end }} + {{- end }} + kubectl exec --tty -i {{ $fullname }}-client --namespace {{ $releaseNamespace }} -- bash + {{- if (include "kafka.client.saslAuthentication" .) }} + export KAFKA_OPTS="-Djava.security.auth.login.config=/tmp/kafka_jaas.conf" + {{- end }} + + PRODUCER: + kafka-console-producer.sh \ + {{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--producer.config /tmp/client.properties \{{ end }} + --broker-list {{ join "," $brokerList }} \ + --topic test + + CONSUMER: + kafka-console-consumer.sh \ + {{ if or (include "kafka.client.saslAuthentication" .) (include "kafka.client.tlsEncryption" .) }}--consumer.config /tmp/client.properties \{{ end }} + --bootstrap-server {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }} \ + --topic test \ + --from-beginning + +{{- if .Values.externalAccess.enabled }} + +To connect to your Kafka server from outside the cluster, follow the instructions below: + +{{- if eq "NodePort" .Values.externalAccess.service.type }} +{{- if .Values.externalAccess.service.domain }} + + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }} + +{{- else }} + + Kafka brokers domain: You can get the external node IP from the Kafka configuration file with the following commands (Check the EXTERNAL listener) + + 1. Obtain the pod name: + + kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka" + + 2. Obtain pod configuration: + + kubectl exec -it KAFKA_POD -- cat /opt/bitnami/kafka/config/server.properties | grep advertised.listeners + +{{- end }} + + Kafka brokers port: You will have a different node port for each Kafka broker. You can get the list of configured node ports using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].spec.ports[0].nodePort}' | tr ' ' '\n')" + +{{- else if contains "LoadBalancer" .Values.externalAccess.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IPs to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w' + + Kafka Brokers domain: You will have a different external IP for each Kafka broker. You can get the list of external IPs using the command below: + + echo "$(kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -o jsonpath='{.items[*].status.loadBalancer.ingress[0].ip}' | tr ' ' '\n')" + + Kafka Brokers port: {{ .Values.externalAccess.service.ports.external }} + +{{- else if eq "ClusterIP" .Values.externalAccess.service.type }} + + Kafka brokers domain: Use your provided hostname to reach Kafka brokers, {{ .Values.externalAccess.service.domain }} + + Kafka brokers port: You will have a different port for each Kafka broker starting at {{ .Values.externalAccess.service.ports.external }} + +{{- end }} + +{{- if not (eq $clientProtocol $externalClientProtocol) }} +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: + + - kafka_jaas.conf: + +KafkaClient { +{{- if $saslMechanisms | regexFind "scram" }} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else }} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- end }} +username="{{ index .Values.auth.sasl.jaas.clientUsers 0 }}" +password="$(kubectl get secret {{ $fullname }}-jaas --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; +}; + + - client.properties: + +security.protocol={{ $externalClientProtocol }} +{{- if $saslMechanisms | regexFind "scram-sha-256" }} +sasl.mechanism=SCRAM-SHA-256 +{{- else if $saslMechanisms | regexFind "scram-sha-512" }} +sasl.mechanism=SCRAM-SHA-512 +{{- else }} +sasl.mechanism=PLAIN +{{- end }} +{{- if eq $externalClientProtocol "SASL_SSL" }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.jks + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- + {{- end }} + {{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= + {{- end }} +{{- end }} + +{{- else if (include "kafka.externalClient.tlsEncryption" .) }} + +You need to configure your Kafka client to access using TLS authentication. To do so, you need to create the 'client.properties' configuration file with the content below: + +security.protocol={{ $externalClientProtocol }} +ssl.truststore.type={{ upper .Values.auth.tls.type }} +{{- if eq .Values.auth.tls.type "jks" }} +ssl.truststore.location=/tmp/kafka.truststore.{{ .Values.auth.tls.type }} + {{- if not (empty $tlsPassword) }} +ssl.truststore.password={{ $tlsPassword }} + {{- end }} +{{- else if eq .Values.auth.tls.type "pem" }} +ssl.truststore.certificates=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +{{- end }} +{{- if eq .Values.auth.externalClientProtocol "mtls" }} +ssl.keystore.type={{ upper .Values.auth.tls.type }} + {{- if eq .Values.auth.tls.type "jks" }} +ssl.keystore.location=/tmp/client.keystore.jks + {{- if not (empty $tlsPassword) }} +ssl.keystore.password={{ $tlsPassword }} + {{- end }} + {{- else if eq .Values.auth.tls.type "pem" }} +ssl.keystore.certificate.chain=-----BEGIN CERTIFICATE----- \ +... \ +-----END CERTIFICATE----- +ssl.keystore.key=-----BEGIN ENCRYPTED PRIVATE KEY----- \ +... \ +-----END ENCRYPTED PRIVATE KEY----- + {{- end }} +{{- end }} +{{- if eq $tlsEndpointIdentificationAlgorithm "" }} +ssl.endpoint.identification.algorithm= +{{- end }} + +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- include "kafka.checkRollingTags" . }} +{{- include "kafka.validateValues" . }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/_helpers.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/_helpers.tpl new file mode 100644 index 0000000..51ec867 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/_helpers.tpl @@ -0,0 +1,509 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kafka.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified zookeeper name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.zookeeper.fullname" -}} +{{- if .Values.zookeeper.fullnameOverride -}} +{{- .Values.zookeeper.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "kafka.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka image name +*/}} +{{- define "kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container auto-discovery image) +*/}} +{{- define "kafka.externalAccess.autoDiscovery.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.externalAccess.autoDiscovery.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "kafka.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{- end -}} + +{{/* +Create a default fully qualified Kafka exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kafka.metrics.kafka.fullname" -}} + {{- printf "%s-exporter" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} +{{- end -}} + +{{/* + Create the name of the service account to use for Kafka exporter pods + */}} +{{- define "kafka.metrics.kafka.serviceAccountName" -}} +{{- if .Values.metrics.kafka.serviceAccount.create -}} + {{ default (include "kafka.metrics.kafka.fullname" .) .Values.metrics.kafka.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.metrics.kafka.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Kafka exporter image name +*/}} +{{- define "kafka.metrics.kafka.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.kafka.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper JMX exporter image name +*/}} +{{- define "kafka.metrics.jmx.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.metrics.jmx.image "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "kafka.imagePullSecrets" -}} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.externalAccess.autoDiscovery.image .Values.volumePermissions.image .Values.metrics.kafka.image .Values.metrics.jmx.image) "global" .Values.global) }} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "kafka.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if authentication via SASL should be configured for client communications +*/}} +{{- define "kafka.client.saslAuthentication" -}} +{{- $saslProtocols := list "sasl" "sasl_tls" -}} +{{- if has .Values.auth.clientProtocol $saslProtocols -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if authentication via SASL should be configured for inter-broker communications +*/}} +{{- define "kafka.interBroker.saslAuthentication" -}} +{{- $saslProtocols := list "sasl" "sasl_tls" -}} +{{- if has .Values.auth.interBrokerProtocol $saslProtocols -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for client connections should be configured +*/}} +{{- define "kafka.client.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has .Values.auth.clientProtocol $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the configured value for the external client protocol, defaults to the same value as clientProtocol +*/}} +{{- define "kafka.externalClientProtocol" -}} + {{- coalesce .Values.auth.externalClientProtocol .Values.auth.clientProtocol -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for external client connections should be configured +*/}} +{{- define "kafka.externalClient.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has (include "kafka.externalClientProtocol" . ) $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS for inter broker communication connections should be configured +*/}} +{{- define "kafka.interBroker.tlsEncryption" -}} +{{- $tlsProtocols := list "tls" "mtls" "sasl_tls" -}} +{{- if (has .Values.auth.interBrokerProtocol $tlsProtocols) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if encryption via TLS should be configured +*/}} +{{- define "kafka.tlsEncryption" -}} +{{- if or (include "kafka.client.tlsEncryption" .) (include "kafka.interBroker.tlsEncryption" .) (include "kafka.externalClient.tlsEncryption" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the type of listener +Usage: +{{ include "kafka.listenerType" ( dict "protocol" .Values.path.to.the.Value ) }} +*/}} +{{- define "kafka.listenerType" -}} +{{- if eq .protocol "plaintext" -}} +PLAINTEXT +{{- else if or (eq .protocol "tls") (eq .protocol "mtls") -}} +SSL +{{- else if eq .protocol "sasl_tls" -}} +SASL_SSL +{{- else if eq .protocol "sasl" -}} +SASL_PLAINTEXT +{{- end -}} +{{- end -}} + +{{/* +Return the protocol used with zookeeper +*/}} +{{- define "kafka.zookeeper.protocol" -}} +{{- if and .Values.auth.zookeeper.tls.enabled .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser -}} +SASL_SSL +{{- else if and .Values.auth.zookeeper.tls.enabled -}} +SSL +{{- else if and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser -}} +SASL +{{- else -}} +PLAINTEXT +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka JAAS credentials secret +*/}} +{{- define "kafka.jaasSecretName" -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-jaas" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a JAAS credentials secret object should be created +*/}} +{{- define "kafka.createJaasSecret" -}} +{{- $secretName := .Values.auth.sasl.jaas.existingSecret -}} +{{- if and (or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) (and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser)) (empty $secretName) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created +*/}} +{{- define "kafka.createTlsSecret" -}} +{{- if and (include "kafka.tlsEncryption" .) (empty .Values.auth.tls.existingSecrets) (eq .Values.auth.tls.type "pem") .Values.auth.tls.autoGenerated }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* +Returns the secret name for the Kafka Provisioning client +*/}} +{{- define "kafka.client.passwordsSecretName" -}} +{{- if .Values.provisioning.auth.tls.passwordsSecret -}} + {{- printf "%s" (tpl .Values.provisioning.auth.tls.passwordsSecret $) -}} +{{- else -}} + {{- printf "%s-client-secret" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the Kafka Provisioning client +*/}} +{{- define "kafka.provisioning.serviceAccountName" -}} +{{- if .Values.provisioning.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.provisioning.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.provisioning.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.createConfigmap" -}} +{{- if and .Values.config (not .Values.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka log4j ConfigMap name. +*/}} +{{- define "kafka.log4j.configMapName" -}} +{{- if .Values.existingLog4jConfigMap -}} + {{- printf "%s" (tpl .Values.existingLog4jConfigMap $) -}} +{{- else -}} + {{- printf "%s-log4j-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a log4j ConfigMap object should be created. +*/}} +{{- define "kafka.log4j.createConfigMap" -}} +{{- if and .Values.log4j (not .Values.existingLog4jConfigMap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the SASL mechanism to use for the Kafka exporter to access Kafka +The exporter uses a different nomenclature so we need to do this hack +*/}} +{{- define "kafka.metrics.kafka.saslMechanism" -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms }} +{{- if contains "scram-sha-512" $saslMechanisms }} + {{- print "scram-sha512" -}} +{{- else if contains "scram-sha-256" $saslMechanisms }} + {{- print "scram-sha256" -}} +{{- else -}} + {{- print "plain" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the Kafka configuration configmap +*/}} +{{- define "kafka.metrics.jmx.configmapName" -}} +{{- if .Values.metrics.jmx.existingConfigmap -}} + {{- printf "%s" (tpl .Values.metrics.jmx.existingConfigmap $) -}} +{{- else -}} + {{- printf "%s-jmx-configuration" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap object should be created +*/}} +{{- define "kafka.metrics.jmx.createConfigmap" -}} +{{- if and .Values.metrics.jmx.enabled .Values.metrics.jmx.config (not .Values.metrics.jmx.existingConfigmap) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "kafka.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.externalAccess.autoDiscovery.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.kafka.image }} +{{- include "common.warnings.rollingTag" .Values.metrics.jmx.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "kafka.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "kafka.validateValues.authProtocols" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.nodePortListLength" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.domainSpecified" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceType" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryRBAC" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerIPs" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerNames" "context" .)) -}} +{{- $messages := append $messages (include "kafka.validateValues.externalAccessServiceList" (dict "element" "loadBalancerAnnotations" "context" . )) -}} +{{- $messages := append $messages (include "kafka.validateValues.saslMechanisms" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsSecrets.length" .) -}} +{{- $messages := append $messages (include "kafka.validateValues.tlsPasswords" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Authentication protocols for Kafka */}} +{{- define "kafka.validateValues.authProtocols" -}} +{{- $authProtocols := list "plaintext" "tls" "mtls" "sasl" "sasl_tls" -}} +{{- if or (not (has .Values.auth.clientProtocol $authProtocols)) (not (has .Values.auth.interBrokerProtocol $authProtocols)) (not (has (include "kafka.externalClientProtocol" . ) $authProtocols)) -}} +kafka: auth.clientProtocol auth.externalClientProtocol auth.interBrokerProtocol + Available authentication protocols are "plaintext", "tls", "mtls", "sasl" and "sasl_tls" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as NodePort list */}} +{{- define "kafka.validateValues.nodePortListLength" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- $nodePortListLength := len .Values.externalAccess.service.nodePorts }} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $nodePortListLength )) (eq .Values.externalAccess.service.type "NodePort") -}} +kafka: .Values.externalAccess.service.nodePorts + Number of replicas and nodePort array length must be the same. Currently: replicaCount = {{ $replicaCount }} and nodePorts = {{ $nodePortListLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - domain must be defined if external service type ClusterIP */}} +{{- define "kafka.validateValues.domainSpecified" -}} +{{- if and (eq .Values.externalAccess.service.type "ClusterIP") (eq .Values.externalAccess.service.domain "") -}} +kafka: .Values.externalAccess.service.domain + Domain must be specified if service type ClusterIP is set for external service +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - service type for external access */}} +{{- define "kafka.validateValues.externalAccessServiceType" -}} +{{- if and (not (eq .Values.externalAccess.service.type "NodePort")) (not (eq .Values.externalAccess.service.type "LoadBalancer")) (not (eq .Values.externalAccess.service.type "ClusterIP")) -}} +kafka: externalAccess.service.type + Available service type for external access are NodePort, LoadBalancer or ClusterIP. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - RBAC should be enabled when autoDiscovery is enabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryRBAC" -}} +{{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled (not .Values.rbac.create ) }} +kafka: rbac.create + By specifying "externalAccess.enabled=true" and "externalAccess.autoDiscovery.enabled=true" + an initContainer will be used to auto-detect the external IPs/ports by querying the + K8s API. Please note this initContainer requires specific RBAC resources. You can create them + by specifying "--set rbac.create=true". +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - LoadBalancerIPs or LoadBalancerNames should be set when autoDiscovery is disabled */}} +{{- define "kafka.validateValues.externalAccessAutoDiscoveryIPsOrNames" -}} +{{- $loadBalancerNameListLength := len .Values.externalAccess.service.loadBalancerNames -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (eq .Values.externalAccess.service.type "LoadBalancer") (not .Values.externalAccess.autoDiscovery.enabled) (eq $loadBalancerNameListLength 0) (eq $loadBalancerIPListLength 0) }} +kafka: externalAccess.service.loadBalancerNames or externalAccess.service.loadBalancerIPs + By specifying "externalAccess.enabled=true", "externalAccess.autoDiscovery.enabled=false" and + "externalAccess.service.type=LoadBalancer" at least one of externalAccess.service.loadBalancerNames + or externalAccess.service.loadBalancerIPs must be set and the length of those arrays must be equal + to the number of replicas. +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - number of replicas must be the same as loadBalancerIPs list */}} +{{- define "kafka.validateValues.externalAccessServiceList" -}} +{{- $replicaCount := int .context.Values.replicaCount }} +{{- $listLength := len (get .context.Values.externalAccess.service .element) -}} +{{- if and .context.Values.externalAccess.enabled (not .context.Values.externalAccess.autoDiscovery.enabled) (eq .context.Values.externalAccess.service.type "LoadBalancer") (gt $listLength 0) (not (eq $replicaCount $listLength)) }} +kafka: externalAccess.service.{{ .element }} + Number of replicas and {{ .element }} array length must be the same. Currently: replicaCount = {{ $replicaCount }} and {{ .element }} = {{ $listLength }} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - SASL mechanisms must be provided when using SASL */}} +{{- define "kafka.validateValues.saslMechanisms" -}} +{{- if and (or (.Values.auth.clientProtocol | regexFind "sasl") (.Values.auth.interBrokerProtocol | regexFind "sasl") (and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser)) (not .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms + The SASL mechanisms are required when either auth.clientProtocol or auth.interBrokerProtocol use SASL or Zookeeper user is provided. +{{- end }} +{{- if not (contains .Values.auth.sasl.interBrokerMechanism .Values.auth.sasl.mechanisms) }} +kafka: auth.sasl.mechanisms + auth.sasl.interBrokerMechanism must be provided and it should be one of the specified mechanisms at auth.saslMechanisms +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - Secrets containing TLS certs must be provided when TLS authentication is enabled */}} +{{- define "kafka.validateValues.tlsSecrets" -}} +{{- if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "jks") (empty .Values.auth.tls.existingSecrets) }} +kafka: auth.tls.existingSecrets + A secret containing the Kafka JKS keystores and truststore is required + when TLS encryption in enabled and TLS format is "JKS" +{{- else if and (include "kafka.tlsEncryption" .) (eq .Values.auth.tls.type "pem") (empty .Values.auth.tls.existingSecrets) (not .Values.auth.tls.autoGenerated) }} +kafka: auth.tls.existingSecrets + A secret containing the Kafka TLS certificates and keys is required + when TLS encryption in enabled and TLS format is "PEM" +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka - The number of secrets containing TLS certs should be equal to the number of replicas */}} +{{- define "kafka.validateValues.tlsSecrets.length" -}} +{{- $replicaCount := int .Values.replicaCount }} +{{- if and (include "kafka.tlsEncryption" .) (not (empty .Values.auth.tls.existingSecrets)) }} +{{- $existingSecretsLength := len .Values.auth.tls.existingSecrets }} +{{- if ne $replicaCount $existingSecretsLength }} +kafka: .Values.auth.tls.existingSecrets + Number of replicas and existingSecrets array length must be the same. Currently: replicaCount = {{ $replicaCount }} and existingSecrets = {{ $existingSecretsLength }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Kafka provisioning - keyPasswordSecretKey, keystorePasswordSecretKey or truststorePasswordSecretKey must not be used without passwordsSecret */}} +{{- define "kafka.validateValues.tlsPasswords" -}} +{{- if and (include "kafka.client.tlsEncryption" .) (not .Values.auth.tls.passwordsSecret) }} +{{- if or .Values.auth.tls.keyPasswordSecretKey .Values.auth.tls.keystorePasswordSecretKey .Values.auth.tls.truststorePasswordSecretKey }} +kafka: auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + auth.tls.keyPasswordSecretKey,auth.tls.keystorePasswordSecretKey,auth.tls.truststorePasswordSecretKey + must not be used without passwordsSecret setted. +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/configmap.yaml new file mode 100644 index 0000000..509fd1c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "kafka.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + server.properties: |- + {{ .Values.config | nindent 4 }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/extra-list.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/extra-list.yaml new file mode 100644 index 0000000..9ac65f9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jaas-secret.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jaas-secret.yaml new file mode 100644 index 0000000..8f632e5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jaas-secret.yaml @@ -0,0 +1,40 @@ +{{- if (include "kafka.createJaasSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-jaas" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + {{- $clientPasswords := .Values.auth.sasl.jaas.clientPasswords }} + {{- if $clientPasswords }} + client-passwords: {{ join "," $clientPasswords | b64enc | quote }} + system-user-password: {{ index $clientPasswords 0 | b64enc | quote }} + {{- else }} + {{- $passwords := list }} + {{- range $clientUsers }} + {{- $passwords = append $passwords (randAlphaNum 10) }} + {{- end }} + client-passwords: {{ join "," $passwords | b64enc | quote }} + system-user-password: {{ index $passwords 0 | b64enc | quote }} + {{- end }} + {{- end }} + {{- $zookeeperUser := .Values.auth.sasl.jaas.zookeeperUser }} + {{- if and .Values.zookeeper.auth.client.enabled $zookeeperUser }} + {{- $zookeeperPassword := .Values.auth.sasl.jaas.zookeeperPassword }} + zookeeper-password: {{ default (randAlphaNum 10) $zookeeperPassword | b64enc | quote }} + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + {{- $interBrokerPassword := .Values.auth.sasl.jaas.interBrokerPassword }} + inter-broker-password: {{ default (randAlphaNum 10) $interBrokerPassword | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jmx-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jmx-configmap.yaml new file mode 100644 index 0000000..9207f6d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jmx-configmap.yaml @@ -0,0 +1,64 @@ +{{- if (include "kafka.metrics.jmx.createConfigmap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-jmx-configuration" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + jmx-kafka-prometheus.yml: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.config "context" $ ) | nindent 4 }} + rules: + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 + {{- if .Values.metrics.jmx.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.extraRules "context" $ ) | nindent 6 }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jmx-metrics-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jmx-metrics-svc.yaml new file mode 100644 index 0000000..35c79f4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/jmx-metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.jmx.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }} + {{- if .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-deployment.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-deployment.yaml new file mode 100644 index 0000000..bf731f2 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -0,0 +1,171 @@ +{{- if .Values.metrics.kafka.enabled }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $servicePort := int .Values.service.ports.client -}} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ include "kafka.metrics.kafka.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: cluster-metrics + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.metrics.kafka.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.metrics.kafka.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.kafka.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.kafka.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.priorityClassName }} + priorityClassName: {{ .Values.metrics.kafka.priorityClassName }} + {{- end }} + {{- if .Values.metrics.kafka.schedulerName }} + schedulerName: {{ .Values.metrics.kafka.schedulerName }} + {{- end }} + {{- if .Values.metrics.kafka.podSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + {{- if .Values.metrics.kafka.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + {{- if .Values.metrics.kafka.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.command "context" $) | nindent 12 }} + {{- else }} + command: + - bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ce + - | + kafka_exporter \ + {{- range $i, $e := until $replicaCount }} + --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + --sasl.enabled \ + --sasl.username=$SASL_USERNAME \ + --sasl.password=$SASL_USER_PASSWORD \ + --sasl.mechanism={{ include "kafka.metrics.kafka.saslMechanism" . }} \ + {{- end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + --tls.enabled \ + {{- if .Values.metrics.kafka.certificatesSecret }} + --tls.key-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }} \ + --tls.cert-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }} \ + {{- if .Values.metrics.kafka.tlsCaSecret }} + --tls.ca-file=/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- else }} + --tls.ca-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ + {{- end }} + --web.listen-address=:{{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + env: + - name: SASL_USERNAME + value: {{ index $clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: system-user-password + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.metrics.kafka.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + mountPath: /opt/bitnami/kafka-exporter/cacert/ + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.metrics.kafka.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + secret: + secretName: {{ .Values.metrics.kafka.tlsCaSecret }} + defaultMode: 0440 + {{- end }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-serviceaccount.yaml new file mode 100644 index 0000000..f8e3eb3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.kafka.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.metrics.kafka.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-svc.yaml new file mode 100644 index 0000000..9daae4a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.kafka.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + sessionAffinity: {{ .Values.metrics.kafka.service.sessionAffinity }} + {{- if .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.ports.metrics }} + protocol: TCP + targetPort: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning-secret.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning-secret.yaml new file mode 100644 index 0000000..0c0fb1b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning-secret.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.provisioning.enabled (include "kafka.client.tlsEncryption" .) (not .Values.provisioning.auth.tls.passwordsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kafka.client.passwordsSecretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + truststore-password: {{ default "" .Values.provisioning.auth.tls.keystorePassword | b64enc | quote }} + keystore-password: {{ default "" .Values.provisioning.auth.tls.truststorePassword | b64enc | quote }} + key-password: {{ default "" .Values.provisioning.auth.tls.keyPassword | b64enc | quote }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning-serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning-serviceaccount.yaml new file mode 100644 index 0000000..4761467 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.provisioning.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.provisioning.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.provisioning.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning.yaml new file mode 100644 index 0000000..765e883 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/kafka-provisioning.yaml @@ -0,0 +1,260 @@ +{{- if .Values.provisioning.enabled }} +{{- $replicaCount := int .Values.replicaCount }} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.provisioning.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.provisioning.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kafka.provisioning.serviceAccountName" . }} + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName | quote }} + {{- end }} + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.provisioning.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if or .Values.provisioning.initContainers .Values.provisioning.waitForKafka }} + initContainers: + {{- if .Values.provisioning.waitForKafka }} + - name: wait-for-available-kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + wait-for-port \ + --host={{ include "common.names.fullname" . }} \ + --state=inuse \ + --timeout=120 \ + {{ .Values.service.ports.client | int64 }}; + echo "Kafka is available"; + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: kafka-provisioning + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ec + - | + echo "Configuring environment" + . /opt/bitnami/scripts/libkafka.sh + export CLIENT_CONF="${CLIENT_CONF:-/opt/bitnami/kafka/config/client.properties}" + if [ ! -f "$CLIENT_CONF" ]; then + touch $CLIENT_CONF + + kafka_common_conf_set "$CLIENT_CONF" security.protocol {{ include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD" + {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }} + file_to_multiline_property() { + awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.key "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.key }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")" + {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD" + ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" + {{- end }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- if contains "plain" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism PLAIN + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-256" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-256 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-512" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-512 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- end }} + {{- end }} + fi + + echo "Running pre-provisioning script if any given" + {{ .Values.provisioning.preScript | nindent 14 }} + + kafka_provisioning_commands=( + {{- range $topic := .Values.provisioning.topics }} + "/opt/bitnami/kafka/bin/kafka-topics.sh \ + --create \ + --if-not-exists \ + --bootstrap-server ${KAFKA_SERVICE} \ + --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ + --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ + {{- range $name, $value := $topic.config }} + --config {{ $name }}={{ $value }} \ + {{- end }} + --command-config ${CLIENT_CONF} \ + --topic {{ $topic.name }}" + {{- end }} + {{- range $command := .Values.provisioning.extraProvisioningCommands }} + {{- $command | quote | nindent 16 }} + {{- end }} + ) + + echo "Starting provisioning" + for ((index=0; index < ${#kafka_provisioning_commands[@]}; index+={{ .Values.provisioning.parallel }})) + do + for j in $(seq ${index} $((${index}+{{ .Values.provisioning.parallel }}-1))) + do + ${kafka_provisioning_commands[j]} & # Async command + done + wait # Wait the end of the jobs + done + + echo "Running post-provisioning script if any given" + {{ .Values.provisioning.postScript | nindent 14 }} + + echo "Provisioning succeeded" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + - name: KAFKA_CLIENT_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }} + - name: KAFKA_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }} + - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }} + {{- end }} + - name: KAFKA_SERVICE + value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + - name: SASL_USERNAME + value: {{ index $clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: system-user-password + {{- end }} + {{- if .Values.provisioning.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.provisioning.extraEnvVarsCM .Values.provisioning.extraEnvVarsSecret }} + envFrom: + {{- if .Values.provisioning.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.provisioning.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties + subPath: log4j.properties + {{- end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + secret: + secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/log4j-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/log4j-configmap.yaml new file mode 100644 index 0000000..8f7bc6c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,17 @@ +{{- if (include "kafka.log4j.createConfigMap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka.log4j.configMapName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{- include "common.tplvalues.render" ( dict "value" .Values.log4j "context" $ ) | nindent 4 }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/networkpolicy-egress.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/networkpolicy-egress.yaml new file mode 100644 index 0000000..068024a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/networkpolicy-egress.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/networkpolicy-ingress.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/networkpolicy-ingress.yaml new file mode 100644 index 0000000..258dcab --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/networkpolicy-ingress.yaml @@ -0,0 +1,53 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-ingress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow client connections + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }} + {{- end }} + {{- end }} + # Allow communication inter-broker + - ports: + - port: {{ .Values.containerPorts.internal }} + from: + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + # Allow External connection + {{- if .Values.externalAccess.enabled }} + - ports: + - port: {{ .Values.containerPorts.external }} + {{- if .Values.externalAccess.from }} + from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.enabled }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/poddisruptionbudget.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..e0a6015 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/poddisruptionbudget.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/prometheusrule.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/prometheusrule.yaml new file mode 100644 index 0000000..bce728a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/prometheusrule.yaml @@ -0,0 +1,20 @@ +{{- if and (or .Values.metrics.kafka.enabled .Values.metrics.jmx.enabled) .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.groups }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.labels "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" .) | nindent 4 }} + {{- end }} +spec: + groups: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.groups "context" .) | nindent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/role.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/role.yaml new file mode 100644 index 0000000..63215b3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/role.yaml @@ -0,0 +1,24 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/rolebinding.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/rolebinding.yaml new file mode 100644 index 0000000..fb5e3a1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ include "common.names.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/scripts-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/scripts-configmap.yaml new file mode 100644 index 0000000..57e1250 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,202 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "common.names.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $interBrokerPort := .Values.service.ports.internal }} + {{- $clientPort := .Values.service.ports.client }} + {{- $jksTruststoreSecret := .Values.auth.tls.jksTruststoreSecret -}} + {{- $jksTruststore := .Values.auth.tls.jksTruststore -}} + {{- $jksKeystoreSAN := .Values.auth.tls.jksKeystoreSAN -}} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + # Auxiliary functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + if [[ -f "{{ .Values.logsDirs | splitList "," | first }}/meta.properties" ]]; then + export KAFKA_CFG_BROKER_ID="$(grep "broker.id" "{{ .Values.logsDirs | splitList "," | first }}/meta.properties" | awk -F '=' '{print $2}')" + else + export KAFKA_CFG_BROKER_ID="$((ID + {{ .Values.minBrokerId }}))" + fi + + {{- if eq .Values.brokerRackAssignment "aws-az" }} + export KAFKA_CFG_BROKER_RACK=$(curl "http://169.254.169.254/latest/meta-data/placement/availability-zone-id") + {{- end }} + + {{- if .Values.externalAccess.enabled }} + # Configure external ip and port + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_HOST=$(echo '{{ .Values.externalAccess.service.loadBalancerNames | default .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.ports.external }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if and .Values.externalAccess.service.usePodIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${MY_POD_IP}" + {{- else if or .Values.externalAccess.service.useHostIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${HOST_IP}" + {{- else if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} + {{- else }} + export EXTERNAL_ACCESS_HOST=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- else }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_PORT="$((ID + {{ .Values.externalAccess.service.ports.external }}))" + {{- end }} + + # Configure Kafka advertised listeners + {{- if .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ join "," .Values.advertisedListeners }} + {{- else }} + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_HOST}:${EXTERNAL_ACCESS_PORT}" + {{- end }} + {{- end }} + + {{- if (include "kafka.tlsEncryption" .) }} + mkdir -p /opt/bitnami/kafka/config/certs + {{- if eq .Values.auth.tls.type "jks" }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs-${ID}" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs-${ID}/%s" (default "kafka.keystore.jks" $jksKeystoreSAN) | quote }} + {{- else }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs/%s" (default "kafka-${ID}.keystore.jks" $jksKeystoreSAN) | quote }} + {{- end }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + cp "$JKS_TRUSTSTORE" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "$JKS_KEYSTORE" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + + {{- else if eq .Values.auth.tls.type "pem" }} + + {{- if or (not (empty .Values.auth.tls.existingSecrets)) .Values.auth.tls.autoGenerated }} + PEM_CA="/certs-${ID}/ca.crt" + PEM_CERT="/certs-${ID}/tls.crt" + PEM_KEY="/certs-${ID}/tls.key" + {{- else }} + PEM_CA="/certs/kafka.truststore.pem" + PEM_CERT="/certs/kafka-${ID}.keystore.pem" + PEM_KEY="/certs/kafka-${ID}.keystore.key" + {{- end }} + if [[ -f "$PEM_CERT" ]] && [[ -f "$PEM_KEY" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + PEM_CA_LOCATION="${CERT_DIR}/kafka.truststore.pem" + PEM_CERT_LOCATION="${CERT_DIR}/kafka.keystore.pem" + {{- if .Values.auth.tls.pemChainIncluded }} + cat $PEM_CERT | csplit - -s -z '/\-*END CERTIFICATE\-*/+1' '{*}' -f ${CERT_DIR}/xx + FIND_CA_RESULT=$(find ${CERT_DIR} -not -name 'xx00' -name 'xx*') + if [[ $(echo $FIND_CA_RESULT | wc -l) < 1 ]]; then + echo "auth.tls.pemChainIncluded was set, but PEM chain only contained 1 cert" + exit 1 + fi + echo $FIND_CA_RESULT | sort | xargs cat >> "$PEM_CA_LOCATION" + cat ${CERT_DIR}/xx00 > "$PEM_CERT_LOCATION" + {{- else }} + if [[ -f "$PEM_CA" ]]; then + cp "$PEM_CA" "$PEM_CA_LOCATION" + cp "$PEM_CERT" "$PEM_CERT_LOCATION" + else + echo "PEM_CA not provided, and auth.tls.pemChainIncluded was not true. One of these values must be set when using PEM type for TLS." + exit 1 + fi + {{- end }} + + # Ensure the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -in "$PEM_KEY" > "/opt/bitnami/kafka/config/certs/kafka.keystore.key" + else + echo "Couldn't find the expected PEM files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.pem" + {{- end }} + {{- end }} + + # Configure zookeeper client + {{- if and (not (empty .Values.auth.zookeeper.tls.existingSecret)) .Values.auth.zookeeper.tls.enabled }} + JKS_TRUSTSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretTruststoreKey) | quote }} + JKS_KEYSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretKeystoreKey) | quote }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + TRUSTSTORE_LOCATION="${CERT_DIR}/zookeeper.truststore.jks" + cp "$JKS_TRUSTSTORE" "$TRUSTSTORE_LOCATION" + cp "$JKS_KEYSTORE" "${CERT_DIR}/zookeeper.keystore.jks" + export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE="${TRUSTSTORE_LOCATION}" + fi + {{- end }} + + exec /entrypoint.sh /run.sh diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/serviceaccount.yaml new file mode 100644 index 0000000..73091f5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/serviceaccount.yaml @@ -0,0 +1,20 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/servicemonitor-jmx-metrics.yaml new file mode 100644 index 0000000..1919fee --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/servicemonitor-metrics.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/servicemonitor-metrics.yaml new file mode 100644 index 0000000..3431946 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/servicemonitor-metrics.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: cluster-metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/statefulset.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/statefulset.yaml new file mode 100644 index 0000000..2146a3d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/statefulset.yaml @@ -0,0 +1,610 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $interBrokerPort := .Values.service.ports.internal }} +{{- $clientPort := .Values.service.ports.client }} +{{- $interBrokerProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.interBrokerProtocol) -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: {{ .Values.podManagementPolicy }} + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: kafka + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if (include "kafka.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createJaasSecret" .) }} + checksum/jaas-secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createTlsSecret" .) }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostIPC: {{ .Values.hostIPC }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.initContainers }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + find "{{ .Values.logPersistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + {{- if .Values.zookeeper.enabled }} + value: {{ printf "%s%s" (include "kafka.zookeeper.fullname" .) (tpl .Values.zookeeperChrootPath .) | quote }} + {{- else }} + value: {{ include "common.tplvalues.render" (dict "value" (printf "%s%s" (join "," .Values.externalZookeeper.servers) (tpl .Values.zookeeperChrootPath .)) "context" $) }} + {{- end }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: {{ .Values.interBrokerListenerName | quote }} + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + {{- if .Values.listenerSecurityProtocolMap }} + value: {{ .Values.listenerSecurityProtocolMap | quote }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $externalClientProtocol }}" + {{- else }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" + {{- end }} + {{- if or ($clientProtocol | regexFind "SASL") ($externalClientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.sasl.jaas.zookeeperUser }} + - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS + value: {{ upper .Values.auth.sasl.mechanisms | quote }} + - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL + value: {{ upper .Values.auth.sasl.interBrokerMechanism | quote }} + {{- end }} + - name: KAFKA_CFG_LISTENERS + {{- if .Values.listeners }} + value: {{ join "," .Values.listeners }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + {{- else }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + {{- end }} + {{- if .Values.externalAccess.enabled }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + {{- if eq .Values.externalAccess.service.type "NodePort" }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- end }} + {{- else }} + - name: KAFKA_CFG_ADVERTISED_LISTENERS + {{- if .Values.advertisedListeners }} + value: {{ join "," .Values.advertisedListeners }} + {{- else }} + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + {{- end }} + {{- end }} + - name: ALLOW_PLAINTEXT_LISTENER + value: {{ ternary "yes" "no" .Values.allowPlaintextListener | quote }} + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_OPTS + value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" + {{- if (include "kafka.client.saslAuthentication" .) }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.auth.sasl.jaas.clientUsers | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.auth.sasl.jaas.interBrokerUser | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: inter-broker-password + {{- end }} + {{- end }} + {{- if and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.sasl.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password + {{- end }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: {{ include "kafka.zookeeper.protocol" . }} + {{- if .Values.auth.zookeeper.tls.enabled }} + - name: KAFKA_ZOOKEEPER_TLS_TYPE + value: {{ upper .Values.auth.zookeeper.tls.type | quote }} + - name: KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME + value: {{ .Values.auth.zookeeper.tls.verifyHostname | quote }} + {{- if .Values.auth.zookeeper.tls.passwordsSecret }} + - name: KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretKeystoreKey | quote }} + - name: KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretTruststoreKey | quote }} + {{- end }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_TLS_TYPE + value: {{ upper .Values.auth.tls.type | quote }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ default "" .Values.auth.tls.endpointIdentificationAlgorithm | quote }} + - name: KAFKA_TLS_CLIENT_AUTH + value: {{ ternary "required" "none" (or (eq (include "kafka.externalClientProtocol" . ) "mtls") (eq .Values.auth.clientProtocol "mtls")) | quote }} + - name: KAFKA_CERTIFICATE_PASSWORD + {{- if .Values.auth.tls.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.auth.tls.existingSecret }} + key: password + {{- else }} + value: {{ default "" .Values.auth.tls.password | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: "5555" + {{- end }} + - name: KAFKA_VOLUME_DIR + value: {{ .Values.persistence.mountPath | quote }} + - name: KAFKA_LOG_DIR + value: {{ .Values.logPersistence.mountPath | quote }} + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: {{ .Values.deleteTopicEnable | quote }} + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: {{ .Values.autoCreateTopicsEnable | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ .Values.heapOpts | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: {{ .Values.logFlushIntervalMessages | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: {{ .Values.logFlushIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVAL_MS + value: {{ .Values.logRetentionCheckIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: {{ .Values.logRetentionHours | quote }} + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: {{ .Values.maxMessageBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: {{ .Values.logSegmentBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_DIRS + value: {{ .Values.logsDirs | quote }} + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: {{ .Values.defaultReplicationFactor | quote }} + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: {{ .Values.offsetsTopicReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: {{ .Values.transactionStateLogReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: {{ .Values.transactionStateLogMinIsr | quote }} + - name: KAFKA_CFG_NUM_IO_THREADS + value: {{ .Values.numIoThreads | quote }} + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: {{ .Values.numNetworkThreads | quote }} + - name: KAFKA_CFG_NUM_PARTITIONS + value: {{ .Values.numPartitions | quote }} + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: {{ .Values.numRecoveryThreadsPerDataDir | quote }} + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: {{ .Values.socketReceiveBufferBytes | quote }} + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: {{ .Values.socketSendBufferBytes | quote }} + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + - name: KAFKA_CFG_AUTHORIZER_CLASS_NAME + value: {{ .Values.authorizerClassName | quote }} + - name: KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND + value: {{ .Values.allowEveryoneIfNoAclFound | quote }} + - name: KAFKA_CFG_SUPER_USERS + value: {{ .Values.superUsers | quote }} + {{- if .Values.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + ports: + - name: kafka-client + containerPort: {{ .Values.containerPorts.client }} + - name: kafka-internal + containerPort: {{ .Values.containerPorts.internal }} + {{- if .Values.externalAccess.enabled }} + - name: kafka-external + containerPort: {{ .Values.containerPorts.external }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- end }} + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + mountPath: {{ .Values.persistence.mountPath }}/config/server.properties + subPath: server.properties + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties + subPath: log4j.properties + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + mountPath: /shared + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $_ := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} + readOnly: true + {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} + readOnly: true + {{- end }} + {{- end }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + mountPath: /kafka-zookeeper-cert + readOnly: true + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + mountPath: /truststore + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ include "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.jmx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} + command: + - java + args: + - -XX:MaxRAMPercentage=100 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + configMap: + name: {{ include "kafka.configmapName" . }} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + - name: scripts + configMap: + name: {{ include "common.names.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + emptyDir: {} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $secret := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} + secret: + secretName: {{ tpl $secret $ }} + defaultMode: 256 + {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + secret: + secretName: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $index }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + secret: + secretName: {{ .Values.auth.zookeeper.tls.existingSecret }} + defaultMode: 256 + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + secret: + secretName: {{ .Values.auth.tls.jksTruststoreSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} +{{- end }} +{{- if not .Values.logPersistence.enabled }} + - name: logs + emptyDir: {} +{{- else if .Values.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.logPersistence.existingClaim .) }} +{{- end }} + {{- if or (and .Values.persistence.enabled (not .Values.persistence.existingClaim)) (and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim)) }} + volumeClaimTemplates: + {{- end }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} +{{- if and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim) }} + - metadata: + name: logs + {{- if .Values.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.logPersistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc-external-access.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc-external-access.yaml new file mode 100644 index 0000000..8d77a47 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc-external-access.yaml @@ -0,0 +1,63 @@ +{{- if .Values.externalAccess.enabled }} +{{- $fullName := include "common.names.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +{{- $_ := set $ "targetPod" $targetPod }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + namespace: {{ $root.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if $root.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $root.Values.externalAccess.service.labels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations $root.Values.externalAccess.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $root.Values.externalAccess.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $root.Values.externalAccess.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.externalAccess.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerIPs)) (eq (len $root.Values.externalAccess.service.loadBalancerIPs) $replicaCount) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + publishNotReadyAddresses: {{ $root.Values.externalAccess.service.publishNotReadyAddresses }} + ports: + - name: tcp-kafka + port: {{ $root.Values.externalAccess.service.ports.external }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: kafka-external + {{- if $root.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc-headless.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc-headless.yaml new file mode 100644 index 0000000..af46212 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc-headless.yaml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.service.headless.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.headless.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.headless.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: {{ .Values.service.ports.internal }} + protocol: TCP + targetPort: kafka-internal + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc.yaml new file mode 100644 index 0000000..97947b7 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/svc.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.ports.client }} + protocol: TCP + targetPort: kafka-client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} + - name: tcp-external + port: {{ .Values.service.ports.external }} + protocol: TCP + targetPort: kafka-external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/tls-secrets.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/tls-secrets.yaml new file mode 100644 index 0000000..fdf350e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/templates/tls-secrets.yaml @@ -0,0 +1,30 @@ +{{- if (include "kafka.createTlsSecret" .) }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $ca := genCA "kafka-ca" 365 }} +{{- range $i := until $replicaCount }} +{{- $replicaHost := printf "%s-%d.%s-headless" $fullname $i $fullname }} +{{- $altNames := list (printf "%s.%s.svc.%s" $replicaHost $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s" $replicaHost $releaseNamespace) (printf "%s.%s" $fullname $releaseNamespace) $replicaHost $fullname }} +{{- $cert := genSignedCert $replicaHost nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $cert.Cert | b64enc | quote }} + tls.key: {{ $cert.Key | b64enc | quote }} +--- +{{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/values.yaml new file mode 100644 index 0000000..a6ca071 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/charts/kafka/values.yaml @@ -0,0 +1,1752 @@ +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section Kafka parameters + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## @param image.registry Kafka image registry +## @param image.repository Kafka image repository +## @param image.tag Kafka image tag (immutable tags are recommended) +## @param image.digest Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Kafka image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 3.3.1-debian-11-r19 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## @param config Configuration file for Kafka. Auto-generated based on other parameters when not specified +## Specify content for server.properties +## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart) +## The server.properties is auto-generated based on other parameters when this parameter is not specified +## e.g: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +config: "" +## @param existingConfigmap ConfigMap with Kafka Configuration +## NOTE: This will override `config` AND any KAFKA_CFG_ environment variables +## +existingConfigmap: "" +## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers +## An optional log4j.properties file to overwrite the default of the Kafka brokers +## ref: https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +log4j: "" +## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file +## The name of an existing ConfigMap containing a log4j.properties file +## NOTE: this will override `log4j` +## +existingLog4jConfigMap: "" +## @param heapOpts Kafka Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m +## @param deleteTopicEnable Switch to enable topic deletion or not +## +deleteTopicEnable: false +## @param autoCreateTopicsEnable Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments +## +autoCreateTopicsEnable: true +## @param logFlushIntervalMessages The number of messages to accept before forcing a flush of data to disk +## +logFlushIntervalMessages: _10000 +## @param logFlushIntervalMs The maximum amount of time a message can sit in a log before we force a flush +## +logFlushIntervalMs: 1000 +## @param logRetentionBytes A size-based retention policy for logs +## +logRetentionBytes: _1073741824 +## @param logRetentionCheckIntervalMs The interval at which log segments are checked to see if they can be deleted +## +logRetentionCheckIntervalMs: 300000 +## @param logRetentionHours The minimum age of a log file to be eligible for deletion due to age +## +logRetentionHours: 168 +## @param logSegmentBytes The maximum size of a log segment file. When this size is reached a new log segment will be created +## +logSegmentBytes: _1073741824 +## @param logsDirs A comma separated list of directories in which kafka's log data is kept +## ref: https://kafka.apache.org/documentation/#brokerconfigs_log.dirs +logsDirs: /bitnami/kafka/data +## @param maxMessageBytes The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 +## @param defaultReplicationFactor Default replication factors for automatically created topics +## +defaultReplicationFactor: 1 +## @param offsetsTopicReplicationFactor The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 1 +## @param transactionStateLogReplicationFactor The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 1 +## @param transactionStateLogMinIsr Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 1 +## @param numIoThreads The number of threads doing disk I/O +## +numIoThreads: 8 +## @param numNetworkThreads The number of threads handling network requests +## +numNetworkThreads: 3 +## @param numPartitions The default number of log partitions per topic +## +numPartitions: 1 +## @param numRecoveryThreadsPerDataDir The number of threads per data directory to be used for log recovery at startup and flushing at shutdown +## +numRecoveryThreadsPerDataDir: 1 +## @param socketReceiveBufferBytes The receive buffer (SO_RCVBUF) used by the socket server +## +socketReceiveBufferBytes: 102400 +## @param socketRequestMaxBytes The maximum size of a request that the socket server will accept (protection against OOM) +## +socketRequestMaxBytes: _104857600 +## @param socketSendBufferBytes The send buffer (SO_SNDBUF) used by the socket server +## +socketSendBufferBytes: 102400 +## @param zookeeperConnectionTimeoutMs Timeout in ms for connecting to ZooKeeper +## +zookeeperConnectionTimeoutMs: 6000 +## @param zookeeperChrootPath Path which puts data under some path in the global ZooKeeper namespace +## ref: https://kafka.apache.org/documentation/#brokerconfigs_zookeeper.connect +## +zookeeperChrootPath: "" +## @param authorizerClassName The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties +## +authorizerClassName: "" +## @param allowEveryoneIfNoAclFound By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users +## +allowEveryoneIfNoAclFound: true +## @param superUsers You can add super users in server.properties +## +superUsers: User:admin +## Authentication parameters +## https://github.com/bitnami/containers/tree/main/bitnami/kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## @param auth.clientProtocol Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.externalClientProtocol Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.interBrokerProtocol Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## + clientProtocol: plaintext + # Note: empty by default for backwards compatibility reasons, find more information at + # https://github.com/bitnami/charts/pull/8902/ + externalClientProtocol: "" + interBrokerProtocol: plaintext + ## SASL configuration + ## + sasl: + ## @param auth.sasl.mechanisms SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` + ## + mechanisms: plain,scram-sha-256,scram-sha-512 + ## @param auth.sasl.interBrokerMechanism SASL mechanism for inter broker communication. + ## + interBrokerMechanism: plain + ## JAAS configuration for SASL authentication. + ## + jaas: + ## @param auth.sasl.jaas.clientUsers Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + ## @param auth.sasl.jaas.clientPasswords Kafka client passwords. This is mandatory if more than one user is specified in clientUsers + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + ## @param auth.sasl.jaas.interBrokerUser Kafka inter broker communication user for SASL authentication + ## + interBrokerUser: admin + ## @param auth.sasl.jaas.interBrokerPassword Kafka inter broker communication password for SASL authentication + ## + interBrokerPassword: "" + ## @param auth.sasl.jaas.zookeeperUser Kafka ZooKeeper user for SASL authentication + ## + zookeeperUser: "" + ## @param auth.sasl.jaas.zookeeperPassword Kafka ZooKeeper password for SASL authentication + ## + zookeeperPassword: "" + ## @param auth.sasl.jaas.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + existingSecret: "" + ## TLS configuration + ## + tls: + ## @param auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem` + ## + type: jks + ## @param auth.tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false + ## @param auth.tls.existingSecrets Array existing secrets containing the TLS certificates for the Kafka brokers + ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore. + ## Create these secrets following the steps below: + ## 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks + ## kubectl create secret generic SECRET_NAME_1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks + ## ... + ## + ## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key. + ## Create these secrets following the steps below: + ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA + ## 2) Rename your CA file to `kafka.ca.crt`. + ## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker. + ## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-0.tls.crt --from-file=tls.key=./kafka-0.tls.key + ## kubectl create secret generic SECRET_NAME_1 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-1.tls.crt --from-file=tls.key=./kafka-1.tls.key + ## ... + ## + existingSecrets: [] + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` + ## Note: ignored when using 'jks' format or `auth.tls.existingSecrets` is not empty + ## + autoGenerated: false + ## @param auth.tls.password Password to access the JKS files or PEM key when they are password-protected. + ## Note: ignored when using 'existingSecret'. + ## + password: "" + ## @param auth.tls.existingSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) + ## + existingSecret: "" + ## @param auth.tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreSecret: "" + ## @param auth.tls.jksKeystoreSAN The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate + ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services: + ## - kafka-0.kafka-headless.kafka.svc.cluster.local + ## - kafka-1.kafka-headless.kafka.svc.cluster.local + ## - kafka-2.kafka-headless.kafka.svc.cluster.local + ## Note: ignored when using 'pem' format for certificates. + ## + jksKeystoreSAN: "" + ## @param auth.tls.jksTruststore The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststore: "" + ## @param auth.tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate + ## Disable server host name verification by setting it to an empty string. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + endpointIdentificationAlgorithm: https + ## Zookeeper client configuration for kafka brokers + ## + zookeeper: + ## TLS configuration + ## + tls: + ## @param auth.zookeeper.tls.enabled Enable TLS for Zookeeper client connections. + ## + enabled: false + ## @param auth.zookeeper.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## + type: jks + ## @param auth.zookeeper.tls.verifyHostname Hostname validation. + ## + verifyHostname: true + ## @param auth.zookeeper.tls.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications. + ## + existingSecret: "" + ## @param auth.zookeeper.tls.existingSecretKeystoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: zookeeper.keystore.jks + ## @param auth.zookeeper.tls.existingSecretTruststoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: zookeeper.truststore.jks + ## @param auth.zookeeper.tls.passwordsSecret Existing secret containing Keystore and Truststore passwords. + ## + passwordsSecret: "" + ## @param auth.zookeeper.tls.passwordsSecretKeystoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: keystore-password + ## @param auth.zookeeper.tls.passwordsSecretTruststoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: truststore-password +## @param listeners The address(es) the socket server listens on. Auto-calculated it's set to an empty array +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] +## @param advertisedListeners The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] +## @param listenerSecurityProtocolMap The protocol->listener mapping. Auto-calculated it's set to nil +## When it's nil, the listeners will be configured based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) +## +listenerSecurityProtocolMap: "" +## @param allowPlaintextListener Allow to use the PLAINTEXT listener +## +allowPlaintextListener: true +## @param interBrokerListenerName The listener that the brokers should communicate on +## +interBrokerListenerName: INTERNAL +## @param command Override Kafka container command +## +command: + - /scripts/setup.sh +## @param args Override Kafka container arguments +## +args: [] +## @param extraEnvVars Extra environment variables to add to Kafka pods +## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration +## e.g: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables +## +extraEnvVarsSecret: "" + +## @section Statefulset parameters + +## @param replicaCount Number of Kafka nodes +## +replicaCount: 1 +## @param minBrokerId Minimal broker.id value, nodes increment their `broker.id` respectively +## Brokers increment their ID starting at this minimal value. +## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively. +## +minBrokerId: 0 +## @param brokerRackAssignment Set Broker Assignment for multi tenant environment Allowed values: `aws-az` +## ref: https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica +## +brokerRackAssignment: "" +## @param containerPorts.client Kafka client container port +## @param containerPorts.internal Kafka inter-broker container port +## @param containerPorts.external Kafka external container port +## +containerPorts: + client: 9092 + internal: 9093 + external: 9094 +## Configure extra options for Kafka containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Kafka containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Kafka containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + periodSeconds: 10 + successThreshold: 1 +## @param startupProbe.enabled Enable startupProbe on Kafka containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup +## +lifecycleHooks: {} +## Kafka resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the container +## @param resources.requests The requested resources for the container +## +resources: + limits: {} + requests: {} +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable security context for the pods +## @param podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable Kafka containers' Security Context +## @param containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## e.g: +## containerSecurityContext: +## enabled: true +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## @param hostAliases Kafka pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param hostNetwork Specify if host network should be enabled for Kafka pods +## +hostNetwork: false +## @param hostIPC Specify if host IPC should be enabled for Kafka pods +## +hostIPC: false +## @param podLabels Extra labels for Kafka pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Extra annotations for Kafka pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate +## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution +## +terminationGracePeriodSeconds: "" +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by kafka pods +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Name of the k8s scheduler (other than default) +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type Kafka statefulset strategy type +## @param updateStrategy.rollingUpdate Kafka statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) +## e.g: +## extraVolumes: +## - name: kafka-jaas +## secret: +## secretName: kafka-jaas +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) +## extraVolumeMounts: +## - name: kafka-jaas +## mountPath: /bitnami/kafka/config/kafka_jaas.conf +## subPath: kafka_jaas.conf +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the Kafka pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional Add init containers to the Kafka pod(s) +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## Kafka Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the Kafka pod +## @param pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas +## @param pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters + +## Service parameters +## +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client Kafka svc port for client connections + ## @param service.ports.internal Kafka svc port for inter-broker connections + ## @param service.ports.external Kafka svc port for external connections + ## + ports: + client: 9092 + internal: 9093 + external: 9094 + ## @param service.nodePorts.client Node port for the Kafka client connections + ## @param service.nodePorts.external Node port for the Kafka external connections + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + client: "" + external: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP Kafka service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Kafka service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy Kafka service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Kafka service + ## + annotations: {} + ## Headless service properties + ## + headless: + ## @param service.headless.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + publishNotReadyAddresses: false + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param service.headless.labels Labels for the headless service. + ## + labels: {} + ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value) + ## + extraPorts: [] +## External Access to Kafka brokers configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs/ports by querying the K8s API + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry + ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository + ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended) + ## @param externalAccess.autoDiscovery.image.digest Petete image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy + ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.25.4-debian-11-r6 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param externalAccess.autoDiscovery.resources.limits The resources limits for the auto-discovery init container + ## @param externalAccess.autoDiscovery.resources.requests The requested resources for the auto-discovery init container + ## + resources: + limits: {} + requests: {} + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## Note: A new service per broker will be created + ## + service: + ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP + ## + type: LoadBalancer + ## @param externalAccess.service.ports.external Kafka port used for external access when service type is LoadBalancer + ## + ports: + external: 9094 + ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## @param externalAccess.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param externalAccess.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## @param externalAccess.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort + ## + useHostIPs: false + ## @param externalAccess.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP + ## NodePort: If not specified, the container will try to get the kubernetes node external IP + ## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured + ## + domain: "" + ## @param externalAccess.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + publishNotReadyAddresses: false + ## @param externalAccess.service.labels Service labels for external access + ## + labels: {} + ## @param externalAccess.service.annotations Service annotations for external access + ## + annotations: {} + ## @param externalAccess.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the kafka. + ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## e.g: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port + ## e.g: + ## - ipBlock: + ## cidr: 172.9.0.0/16 + ## except: + ## - 172.9.1.0/24 + ## + externalAccess: + from: [] + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: [] + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: true + ## @param persistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for Kafka data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for Kafka data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /bitnami/kafka +## Log Persistence parameters +## +logPersistence: + ## @param logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: false + ## @param logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/bitnami/kafka/logs + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r57 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters + +## ServiceAccount for Kafka +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods + ## + create: true + ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.serviceAccountName template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access Control +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## @param rbac.create Whether to create & use RBAC resources or not + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## @section Metrics parameters + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter + ## + kafka: + ## @param metrics.kafka.enabled Whether or not to create a standalone Kafka exporter to expose Kafka metrics + ## + enabled: false + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## @param metrics.kafka.image.registry Kafka exporter image registry + ## @param metrics.kafka.image.repository Kafka exporter image repository + ## @param metrics.kafka.image.tag Kafka exporter image tag (immutable tags are recommended) + ## @param metrics.kafka.image.digest Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.kafka.image.pullPolicy Kafka exporter image pull policy + ## @param metrics.kafka.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.6.0-debian-11-r34 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## @param metrics.kafka.certificatesSecret Name of the existing secret containing the optional certificate and key files + ## for Kafka exporter client authentication + ## + certificatesSecret: "" + ## @param metrics.kafka.tlsCert The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) + ## + tlsCert: cert-file + ## @param metrics.kafka.tlsKey The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) + ## + tlsKey: key-file + ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication + ## + tlsCaSecret: "" + ## @param metrics.kafka.tlsCaCert The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) + ## + tlsCaCert: ca-file + ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter + ## e.g: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + ## @param metrics.kafka.command Override Kafka exporter container command + ## + command: [] + ## @param metrics.kafka.args Override Kafka exporter container arguments + ## + args: [] + ## @param metrics.kafka.containerPorts.metrics Kafka exporter metrics container port + ## + containerPorts: + metrics: 9308 + ## Kafka exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.kafka.resources.limits The resources limits for the container + ## @param metrics.kafka.resources.requests The requested resources for the container + ## + resources: + limits: {} + requests: {} + ## Kafka exporter pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.kafka.podSecurityContext.enabled Enable security context for the pods + ## @param metrics.kafka.podSecurityContext.fsGroup Set Kafka exporter pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context + ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.kafka.hostAliases Kafka exporter pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.kafka.podLabels Extra labels for Kafka exporter pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.kafka.podAnnotations Extra annotations for Kafka exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param metrics.kafka.podAffinityPreset Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.kafka.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node metrics.kafka.affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param metrics.kafka.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param metrics.kafka.nodeAffinityPreset.key Node label key to match Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param metrics.kafka.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.kafka.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: metrics.kafka.podAffinityPreset, metrics.kafka.podAntiAffinityPreset, and metrics.kafka.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param metrics.kafka.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param metrics.kafka.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka exporter + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.kafka.priorityClassName Kafka exporter pods' priorityClassName + ## + priorityClassName: "" + ## @param metrics.kafka.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param metrics.kafka.extraVolumes Optionally specify extra list of additional volumes for the Kafka exporter pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param metrics.kafka.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param metrics.kafka.sidecars Add additional sidecar containers to the Kafka exporter pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param metrics.kafka.initContainers Add init containers to the Kafka exporter pods + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka exporter service configuration + ## + service: + ## @param metrics.kafka.service.ports.metrics Kafka exporter metrics service port + ## + ports: + metrics: 9308 + ## @param metrics.kafka.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.kafka.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.ports.metrics }}" + prometheus.io/path: "/metrics" + ## Kafka exporter pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods + ## + create: true + ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template + ## + name: "" + ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Prometheus JMX exporter: exposes the majority of Kafkas metrics + ## + jmx: + ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus + ## + enabled: false + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## @param metrics.jmx.image.registry JMX exporter image registry + ## @param metrics.jmx.image.repository JMX exporter image repository + ## @param metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) + ## @param metrics.jmx.image.digest JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy + ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.17.2-debian-11-r23 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Prometheus JMX exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context + ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port + ## + containerPorts: + metrics: 5556 + ## Prometheus JMX exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.jmx.resources.limits The resources limits for the JMX exporter container + ## @param metrics.jmx.resources.requests The requested resources for the JMX exporter container + ## + resources: + limits: {} + requests: {} + ## Prometheus JMX exporter service configuration + ## + service: + ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port + ## + ports: + metrics: 5556 + ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + clusterIP: "" + ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}" + prometheus.io/path: "/" + ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter + ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + ## @param metrics.jmx.config [string] Configuration file for JMX exporter + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + ## @param metrics.jmx.existingConfigmap Name of existing ConfigMap with JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + existingConfigmap: "" + ## @param metrics.jmx.extraRules Add extra rules to JMX exporter configuration + ## e.g: + ## extraRules: |- + ## - pattern: kafka.server<>(connection-count) + ## name: kafka_server_socket_server_metrics_$3 + ## labels: + ## listener: $1 + extraRules: "" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + + prometheusRule: + ## @param metrics.prometheusRule.enabled if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.groups Prometheus Rule Groups for Kafka + ## + groups: [] + +## @section Kafka provisioning parameters + +## Kafka provisioning +## +provisioning: + ## @param provisioning.enabled Enable kafka provisioning Job + ## + enabled: false + ## @param provisioning.numPartitions Default number of partitions for topics when unspecified + ## + numPartitions: 1 + ## @param provisioning.replicationFactor Default replication factor for topics when unspecified + ## + replicationFactor: 1 + ## @param provisioning.topics Kafka topics to provision + ## - name: topic-name + ## partitions: 1 + ## replicationFactor: 1 + ## ## https://kafka.apache.org/documentation/#topicconfigs + ## config: + ## max.message.bytes: 64000 + ## flush.messages: 1 + ## + topics: [] + ## @param provisioning.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param provisioning.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources + ## - echo "Allow user to consume from any topic" + ## - >- + ## /opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --add + ## --allow-principal User:user + ## --consumer --topic '*' + ## - "/opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --list" + ## + extraProvisioningCommands: [] + ## @param provisioning.parallel Number of provisioning commands to run at the same time + ## + parallel: 1 + ## @param provisioning.preScript Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + preScript: "" + ## @param provisioning.postScript Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + postScript: "" + ## Auth Configuration for kafka provisioning Job + ## + auth: + ## TLS configuration for kafka provisioning Job + ## + tls: + ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## Note: ignored if auth.tls.clientProtocol different from one of these values: "tls" "mtls" "sasl_tls". + ## + type: jks + ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job. + ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore. + ## When using 'pem' format for certificates, the secret should contain a public CA certificate, a public certificate and one private key. + ## + certificatesSecret: "" + ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) + ## + cert: tls.crt + ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key) + ## + key: tls.key + ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) + ## + caCert: ca.crt + ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) + ## + keystore: keystore.jks + ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) + ## + truststore: truststore.jks + ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. + ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key. + ## + passwordsSecret: "" + ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keyPasswordSecretKey: key-password + ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keystorePasswordSecretKey: keystore-password + ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + truststorePasswordSecretKey: truststore-password + ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. + ## + keyPassword: "" + ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. + ## + keystorePassword: "" + ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. + ## + truststorePassword: "" + ## @param provisioning.command Override provisioning container command + ## + command: [] + ## @param provisioning.args Override provisioning container arguments + ## + args: [] + ## @param provisioning.extraEnvVars Extra environment variables to add to the provisioning pod + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param provisioning.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param provisioning.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods + ## + podAnnotations: {} + ## @param provisioning.podLabels Extra labels for Kafka provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## Kafka provisioning pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param provisioning.serviceAccount.create Enable creation of ServiceAccount for Kafka provisioning pods + ## + create: false + ## @param provisioning.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the provisioning.serviceAccount.name template + ## + name: "" + ## @param provisioning.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Kafka provisioning resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param provisioning.resources.limits The resources limits for the Kafka provisioning container + ## @param provisioning.resources.requests The requested resources for the Kafka provisioning container + ## + resources: + limits: {} + requests: {} + ## Kafka provisioning pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable security context for the pods + ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka provisioning containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context + ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param provisioning.waitForKafka If true use an init container to wait until kafka is ready before starting provisioning + ## + waitForKafka: true + +## @section ZooKeeper chart parameters + +## ZooKeeper chart configuration +## https://github.com/bitnami/charts/blob/main/bitnami/zookeeper/values.yaml +## +zookeeper: + ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart + ## + enabled: true + ## @param zookeeper.replicaCount Number of ZooKeeper nodes + ## + replicaCount: 1 + ## ZooKeeper authenticaiton + ## + auth: + client: + ## @param zookeeper.auth.client.enabled Enable ZooKeeper auth + ## + enabled: false + ## @param zookeeper.auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param zookeeper.auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param zookeeper.auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param zookeeper.auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## ZooKeeper Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s) + ## @param zookeeper.persistence.storageClass Persistent Volume storage class + ## @param zookeeper.persistence.accessModes Persistent Volume access modes + ## @param zookeeper.persistence.size Persistent Volume size + ## + persistence: + enabled: true + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi + +## External Zookeeper Configuration +## All of these values are only used if `zookeeper.enabled=false` +## +externalZookeeper: + ## @param externalZookeeper.servers List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. + ## + servers: [] diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/NOTES.txt b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/NOTES.txt new file mode 100644 index 0000000..6bce8ae --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/NOTES.txt @@ -0,0 +1,30 @@ + +################################################################### +### IMPORTANT: Ensure that storage is explicitly configured ### +### Default storage options are subject to change. ### +### ### +### IMPORTANT: The use of .env: {...} is deprecated. ### +### Please use .extraEnv: [] instead. ### +################################################################### + +You can log into the Jaeger Query UI here: + +{{- if contains "NodePort" .Values.query.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "jaeger.fullname" . }}-query) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT/ + +{{- else if contains "LoadBalancer" .Values.query.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "jaeger.fullname" . }}-query' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "jaeger.fullname" . }}-query -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP/ +{{- else if contains "ClusterIP" .Values.query.service.type }} + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=query" -o jsonpath="{.items[0].metadata.name}") + echo http://127.0.0.1:8080/ + kubectl port-forward --namespace {{ .Release.Namespace }} $POD_NAME 8080:16686 +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/_helpers.tpl b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/_helpers.tpl new file mode 100644 index 0000000..7dbf3c7 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/_helpers.tpl @@ -0,0 +1,455 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "jaeger.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "jaeger.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "jaeger.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create image tag value which defaults to .Chart.AppVersion. +*/}} +{{- define "jaeger.image.tag" -}} +{{- .Values.tag | default .Chart.AppVersion }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "jaeger.labels" -}} +helm.sh/chart: {{ include "jaeger.chart" . }} +{{ include "jaeger.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "jaeger.selectorLabels" -}} +app.kubernetes.io/name: {{ include "jaeger.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the cassandra schema service account to use +*/}} +{{- define "jaeger.cassandraSchema.serviceAccountName" -}} +{{- if .Values.schema.serviceAccount.create -}} + {{ default (printf "%s-cassandra-schema" (include "jaeger.fullname" .)) .Values.schema.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.schema.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the spark service account to use +*/}} +{{- define "jaeger.spark.serviceAccountName" -}} +{{- if .Values.spark.serviceAccount.create -}} + {{ default (printf "%s-spark" (include "jaeger.fullname" .)) .Values.spark.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.spark.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the esIndexCleaner service account to use +*/}} +{{- define "jaeger.esIndexCleaner.serviceAccountName" -}} +{{- if .Values.esIndexCleaner.serviceAccount.create -}} + {{ default (printf "%s-es-index-cleaner" (include "jaeger.fullname" .)) .Values.esIndexCleaner.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.esIndexCleaner.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the esRollover service account to use +*/}} +{{- define "jaeger.esRollover.serviceAccountName" -}} +{{- if .Values.esRollover.serviceAccount.create -}} + {{ default (printf "%s-es-rollover" (include "jaeger.fullname" .)) .Values.esRollover.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.esRollover.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the esLookback service account to use +*/}} +{{- define "jaeger.esLookback.serviceAccountName" -}} +{{- if .Values.esLookback.serviceAccount.create -}} + {{ default (printf "%s-es-lookback" (include "jaeger.fullname" .)) .Values.esLookback.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.esLookback.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the hotrod service account to use +*/}} +{{- define "jaeger.hotrod.serviceAccountName" -}} +{{- if .Values.hotrod.serviceAccount.create -}} + {{ default (printf "%s-hotrod" (include "jaeger.fullname" .)) .Values.hotrod.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.hotrod.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the query service account to use +*/}} +{{- define "jaeger.query.serviceAccountName" -}} +{{- if .Values.query.serviceAccount.create -}} + {{ default (include "jaeger.query.name" .) .Values.query.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.query.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the agent service account to use +*/}} +{{- define "jaeger.agent.serviceAccountName" -}} +{{- if .Values.agent.serviceAccount.create -}} + {{ default (include "jaeger.agent.name" .) .Values.agent.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.agent.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the collector service account to use +*/}} +{{- define "jaeger.collector.serviceAccountName" -}} +{{- if .Values.collector.serviceAccount.create -}} + {{ default (include "jaeger.collector.name" .) .Values.collector.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.collector.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the collector ingress host +*/}} +{{- define "jaeger.collector.ingressHost" -}} +{{- if (kindIs "string" .) }} + {{- . }} +{{- else }} + {{- .host }} +{{- end }} +{{- end -}} + +{{/* +Create the collector ingress servicePort +*/}} +{{- define "jaeger.collector.ingressServicePort" -}} +{{- if (kindIs "string" .context) }} + {{- .defaultServicePort }} +{{- else }} + {{- .context.servicePort }} +{{- end }} +{{- end -}} + +{{/* +Create the name of the ingester service account to use +*/}} +{{- define "jaeger.ingester.serviceAccountName" -}} +{{- if .Values.ingester.serviceAccount.create -}} + {{ default (include "jaeger.ingester.name" .) .Values.ingester.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.ingester.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified query name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "jaeger.query.name" -}} +{{- $nameGlobalOverride := printf "%s-query" (include "jaeger.fullname" .) -}} +{{- if .Values.query.fullnameOverride -}} +{{- printf "%s" .Values.query.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $nameGlobalOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified agent name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "jaeger.agent.name" -}} +{{- $nameGlobalOverride := printf "%s-agent" (include "jaeger.fullname" .) -}} +{{- if .Values.agent.fullnameOverride -}} +{{- printf "%s" .Values.agent.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $nameGlobalOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified collector name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "jaeger.collector.name" -}} +{{- $nameGlobalOverride := printf "%s-collector" (include "jaeger.fullname" .) -}} +{{- if .Values.collector.fullnameOverride -}} +{{- printf "%s" .Values.collector.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $nameGlobalOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified ingester name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "jaeger.ingester.name" -}} +{{- $nameGlobalOverride := printf "%s-ingester" (include "jaeger.fullname" .) -}} +{{- if .Values.ingester.fullnameOverride -}} +{{- printf "%s" .Values.ingester.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $nameGlobalOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{- define "cassandra.host" -}} +{{- if .Values.provisionDataStore.cassandra -}} +{{- if .Values.storage.cassandra.nameOverride }} +{{- printf "%s" .Values.storage.cassandra.nameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name "cassandra" | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- else }} +{{- .Values.storage.cassandra.host }} +{{- end -}} +{{- end -}} + +{{- define "cassandra.contact_points" -}} +{{- $port := .Values.storage.cassandra.port | toString }} +{{- if .Values.provisionDataStore.cassandra -}} +{{- if .Values.storage.cassandra.nameOverride }} +{{- $host := printf "%s" .Values.storage.cassandra.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- printf "%s:%s" $host $port }} +{{- else }} +{{- $host := printf "%s-%s" .Release.Name "cassandra" | trunc 63 | trimSuffix "-" -}} +{{- printf "%s:%s" $host $port }} +{{- end -}} +{{- else }} +{{- printf "%s:%s" .Values.storage.cassandra.host $port }} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "elasticsearch.client.url" -}} +{{- $port := .Values.storage.elasticsearch.port | toString -}} +{{- printf "%s://%s:%s" .Values.storage.elasticsearch.scheme .Values.storage.elasticsearch.host $port }} +{{- end -}} + +{{- define "jaeger.hotrod.tracing.host" -}} +{{- default (include "jaeger.agent.name" .) .Values.hotrod.tracing.host -}} +{{- end -}} + + +{{/* +Configure list of IP CIDRs allowed access to load balancer (if supported) +*/}} +{{- define "loadBalancerSourceRanges" -}} +{{- if .service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} +{{- end -}} + +{{- define "helm-toolkit.utils.joinListWithComma" -}} +{{- $local := dict "first" true -}} +{{- range $k, $v := . -}}{{- if not $local.first -}},{{- end -}}{{- $v -}}{{- $_ := set $local "first" false -}}{{- end -}} +{{- end -}} + + +{{/* +Cassandra related environment variables +*/}} +{{- define "cassandra.env" -}} +- name: CASSANDRA_SERVERS + value: {{ include "cassandra.host" . }} +- name: CASSANDRA_PORT + value: {{ .Values.storage.cassandra.port | quote }} +{{ if .Values.storage.cassandra.tls.enabled }} +- name: CASSANDRA_TLS_ENABLED + value: "true" +- name: CASSANDRA_TLS_SERVER_NAME + valueFrom: + secretKeyRef: + name: {{ .Values.storage.cassandra.tls.secretName }} + key: commonName +- name: CASSANDRA_TLS_KEY + value: "/cassandra-tls/client-key.pem" +- name: CASSANDRA_TLS_CERT + value: "/cassandra-tls/client-cert.pem" +- name: CASSANDRA_TLS_CA + value: "/cassandra-tls/ca-cert.pem" +{{- end }} +{{- if .Values.storage.cassandra.keyspace }} +- name: CASSANDRA_KEYSPACE + value: {{ .Values.storage.cassandra.keyspace }} +{{- end }} +- name: CASSANDRA_USERNAME + value: {{ .Values.storage.cassandra.user }} +- name: CASSANDRA_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.storage.cassandra.existingSecret }}{{ .Values.storage.cassandra.existingSecret }}{{- else }}{{ include "jaeger.fullname" . }}-cassandra{{- end }} + key: password +{{- range $key, $value := .Values.storage.cassandra.env }} +- name: {{ $key | quote }} + value: {{ $value | quote }} +{{ end -}} +{{- if .Values.storage.cassandra.extraEnv }} +{{ toYaml .Values.storage.cassandra.extraEnv }} +{{- end }} +{{- end -}} + +{{/* +Elasticsearch related environment variables +*/}} +{{- define "elasticsearch.env" -}} +- name: ES_SERVER_URLS + value: {{ include "elasticsearch.client.url" . }} +{{- if not .Values.storage.elasticsearch.anonymous }} +- name: ES_USERNAME + value: {{ .Values.storage.elasticsearch.user }} +{{- end }} +{{- if .Values.storage.elasticsearch.usePassword }} +- name: ES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.storage.elasticsearch.existingSecret }}{{ .Values.storage.elasticsearch.existingSecret }}{{- else }}{{ include "jaeger.fullname" . }}-elasticsearch{{- end }} + key: {{ default "password" .Values.storage.elasticsearch.existingSecretKey }} +{{- end }} +{{- if .Values.storage.elasticsearch.tls.enabled }} +- name: ES_TLS_ENABLED + value: "true" +- name: ES_TLS_CA + value: /es-tls/ca-cert.pem +{{- end }} +{{- if .Values.storage.elasticsearch.indexPrefix }} +- name: ES_INDEX_PREFIX + value: {{ .Values.storage.elasticsearch.indexPrefix }} +{{- end }} +{{- range $key, $value := .Values.storage.elasticsearch.env }} +- name: {{ $key | quote }} + value: {{ $value | quote }} +{{ end -}} +{{- if .Values.storage.elasticsearch.extraEnv }} +{{ toYaml .Values.storage.elasticsearch.extraEnv }} +{{- end }} +{{- end -}} + +{{/* +grpcPlugin related environment variables +*/}} +{{- define "grpcPlugin.env" -}} +{{- if .Values.storage.grpcPlugin.extraEnv }} +{{- toYaml .Values.storage.grpcPlugin.extraEnv }} +{{- end }} +{{- end -}} + +{{/* +Cassandra, Elasticsearch, or grpc-plugin related environment variables depending on which is used +*/}} +{{- define "storage.env" -}} +{{- if eq .Values.storage.type "cassandra" -}} +{{ include "cassandra.env" . }} +{{- else if eq .Values.storage.type "elasticsearch" -}} +{{ include "elasticsearch.env" . }} +{{- else if eq .Values.storage.type "grpc-plugin" -}} +{{ include "grpcPlugin.env" . }} +{{- end -}} +{{- end -}} + +{{/* +Cassandra related command line options +*/}} +{{- define "cassandra.cmdArgs" -}} +{{- range $key, $value := .Values.storage.cassandra.cmdlineParams -}} +{{- if $value }} +- --{{ $key }}={{ $value }} +{{- else }} +- --{{ $key }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Elasticsearch related command line options +*/}} +{{- define "elasticsearch.cmdArgs" -}} +{{- range $key, $value := .Values.storage.elasticsearch.cmdlineParams -}} +{{- if $value }} +- --{{ $key }}={{ $value }} +{{- else }} +- --{{ $key }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Cassandra or Elasticsearch related command line options depending on which is used +*/}} +{{- define "storage.cmdArgs" -}} +{{- if eq .Values.storage.type "cassandra" -}} +{{- include "cassandra.cmdArgs" . -}} +{{- else if eq .Values.storage.type "elasticsearch" -}} +{{- include "elasticsearch.cmdArgs" . -}} +{{- end -}} +{{- end -}} + +{{/* +Add extra argument to the command line options +Usage: + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.collector.cmdlineParams ) | nindent 10 }} +*/}} +{{- define "extra.cmdArgs" -}} +{{- range $key, $value := .cmdlineParams -}} +{{- if $value }} +- --{{ $key }}={{ $value }} +{{- else }} +- --{{ $key }} +{{- end }} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-ds.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-ds.yaml new file mode 100644 index 0000000..f6223f9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-ds.yaml @@ -0,0 +1,143 @@ +{{- if .Values.agent.enabled -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "jaeger.agent.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: agent +{{- if .Values.agent.annotations }} + annotations: + {{- toYaml .Values.agent.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + {{- include "jaeger.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: agent + {{- with .Values.agent.daemonset.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} + template: + metadata: +{{- if .Values.agent.podAnnotations }} + annotations: + {{- toYaml .Values.agent.podAnnotations | nindent 8 }} +{{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: agent +{{- if .Values.agent.podLabels }} + {{- toYaml .Values.agent.podLabels | nindent 8 }} +{{- end }} + spec: + securityContext: + {{- toYaml .Values.agent.podSecurityContext | nindent 8 }} + {{- if .Values.agent.useHostNetwork }} + hostNetwork: true + {{- end }} + dnsPolicy: {{ .Values.agent.dnsPolicy }} + {{- with .Values.agent.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + serviceAccountName: {{ template "jaeger.agent.serviceAccountName" . }} + {{- with .Values.agent.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.agent.initContainers }} + initContainers: + {{- toYaml .Values.agent.initContainers | nindent 8 }} + {{- end}} + containers: + - name: {{ template "jaeger.agent.name" . }} + securityContext: + {{- toYaml .Values.agent.securityContext | nindent 10 }} + image: {{ .Values.agent.image }}:{{- .Values.agent.tag | default (include "jaeger.image.tag" .) }} + imagePullPolicy: {{ .Values.agent.pullPolicy }} + args: + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.agent.cmdlineParams ) | nindent 10 }} + env: + {{- if .Values.agent.extraEnv }} + {{- toYaml .Values.agent.extraEnv | nindent 10 }} + {{- end }} + {{- if not (hasKey .Values.agent.cmdlineParams "reporter.grpc.host-port") }} + - name: REPORTER_GRPC_HOST_PORT + value: {{ include "jaeger.collector.name" . }}:{{ .Values.collector.service.grpc.port }} + {{- end }} + ports: + - name: zipkin-compact + containerPort: {{ .Values.agent.service.zipkinThriftPort }} + protocol: UDP + {{- if .Values.agent.daemonset.useHostPort }} + hostPort: {{ .Values.agent.service.zipkinThriftPort }} + {{- end }} + - name: jaeger-compact + containerPort: {{ .Values.agent.service.compactPort }} + protocol: UDP + {{- if .Values.agent.daemonset.useHostPort }} + hostPort: {{ .Values.agent.service.compactPort }} + {{- end }} + - name: jaeger-binary + containerPort: {{ .Values.agent.service.binaryPort }} + protocol: UDP + {{- if .Values.agent.daemonset.useHostPort }} + hostPort: {{ .Values.agent.service.binaryPort }} + {{- end }} + - name: http + containerPort: {{ .Values.agent.service.samplingPort }} + protocol: TCP + {{- if .Values.agent.daemonset.useHostPort }} + hostPort: {{ .Values.agent.service.samplingPort }} + {{- end }} + - name: admin + containerPort: 14271 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: admin + readinessProbe: + httpGet: + path: / + port: admin + resources: + {{- toYaml .Values.agent.resources | nindent 10 }} + volumeMounts: + {{- range .Values.agent.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.agent.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + volumes: + {{- range .Values.agent.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.agent.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- with .Values.agent.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.agent.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.agent.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-sa.yaml new file mode 100644 index 0000000..1a8d7f4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-sa.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.agent.enabled .Values.agent.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.agent.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: agent + {{- with .Values.agent.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.agent.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-servicemonitor.yaml new file mode 100644 index 0000000..255a6a7 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and (.Values.agent.enabled) (.Values.agent.serviceMonitor.enabled)}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "jaeger.agent.name" . }} + {{- if .Values.agent.serviceMonitor.namespace }} + namespace: {{ .Values.agent.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: agent + {{- if .Values.agent.serviceMonitor.additionalLabels }} + {{- toYaml .Values.agent.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.agent.serviceMonitor.annotations }} + annotations: + {{- toYaml .Values.agent.serviceMonitor.annotations | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: admin + path: /metrics + {{- if .Values.agent.serviceMonitor.interval }} + interval: {{ .Values.agent.serviceMonitor.interval }} + {{- end }} + {{- if .Values.agent.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.agent.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.agent.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.agent.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.agent.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.agent.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/component: agent + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-svc.yaml new file mode 100644 index 0000000..e4243f2 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/agent-svc.yaml @@ -0,0 +1,40 @@ +{{- if .Values.agent.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jaeger.agent.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: agent +{{- if .Values.agent.service.annotations }} + annotations: + {{- toYaml .Values.agent.service.annotations | nindent 4 }} +{{- end }} +spec: + ports: + - name: zipkin-compact + port: {{ .Values.agent.service.zipkinThriftPort }} + protocol: UDP + targetPort: zipkin-compact + - name: jaeger-compact + port: {{ .Values.agent.service.compactPort }} + protocol: UDP + targetPort: jaeger-compact + - name: jaeger-binary + port: {{ .Values.agent.service.binaryPort }} + protocol: UDP + targetPort: jaeger-binary + - name: http + port: {{ .Values.agent.service.samplingPort }} + protocol: TCP + targetPort: http + - name: admin + port: 14271 + protocol: TCP + targetPort: admin + type: {{ .Values.agent.service.type }} + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: agent +{{- template "loadBalancerSourceRanges" .Values.agent }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-agent-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-agent-svc.yaml new file mode 100644 index 0000000..bc3c8b6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-agent-svc.yaml @@ -0,0 +1,30 @@ +{{- if .Values.allInOne.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jaeger.agent.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: service-agent +spec: + clusterIP: None + ports: + - name: zk-compact-trft + port: 5775 + protocol: UDP + targetPort: 0 + - name: config-rest + port: 5778 + targetPort: 0 + - name: jg-compact-trft + port: 6831 + protocol: UDP + targetPort: 0 + - name: jg-binary-trft + port: 6832 + protocol: UDP + targetPort: 0 + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: all-in-one +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-collector-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-collector-svc.yaml new file mode 100644 index 0000000..1863ab2 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-collector-svc.yaml @@ -0,0 +1,33 @@ +{{- if .Values.allInOne.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jaeger.collector.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: service-collector +spec: + clusterIP: None + ports: + - name: http-zipkin + port: 9411 + targetPort: 0 + - name: grpc-http + port: 14250 + targetPort: 0 + - name: c-tchan-trft + port: 14267 + targetPort: 0 + - name: http-c-binary-trft + port: 14268 + targetPort: 0 + - name: otlp-grpc + port: 4317 + targetPort: 0 + - name: otlp-http + port: 4318 + targetPort: 0 + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: all-in-one +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-configmap.yaml new file mode 100644 index 0000000..99f93b6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.allInOne.samplingConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "jaeger.fullname" . }}-sampling-strategies + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: all-in-one +data: + strategies.json: |- +{{ tpl .Values.allInOne.samplingConfig . | indent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-deploy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-deploy.yaml new file mode 100644 index 0000000..89ab723 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-deploy.yaml @@ -0,0 +1,115 @@ +{{- if .Values.allInOne.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "jaeger.fullname" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: all-in-one + prometheus.io/port: "14269" + prometheus.io/scrape: "true" +spec: + replicas: 1 + strategy: + type: Recreate + selector: + matchLabels: + {{- include "jaeger.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: all-in-one + template: + metadata: + labels: + {{- include "jaeger.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: all-in-one +{{- if .Values.allInOne.podLabels }} + {{- toYaml .Values.allInOne.podLabels | nindent 8 }} +{{- end }} + annotations: + prometheus.io/port: "14269" + prometheus.io/scrape: "true" + spec: + containers: + - env: + {{- if .Values.allInOne.extraEnv }} + {{- toYaml .Values.allInOne.extraEnv | nindent 12 }} + {{- end }} + - name: SPAN_STORAGE_TYPE + value: memory + - name: COLLECTOR_ZIPKIN_HOST_PORT + value: :9411 + - name: JAEGER_DISABLED + value: "false" + - name: COLLECTOR_OTLP_ENABLED + value: "true" + {{- if .Values.allInOne.samplingConfig }} + - name: SAMPLING_STRATEGIES_FILE + value: /etc/conf/strategies.json + {{- end }} + image: {{ .Values.allInOne.image }}:{{- .Values.allInOne.tag | default (include "jaeger.image.tag" .) }} + imagePullPolicy: {{ .Values.allInOne.pullPolicy }} + name: jaeger + args: + {{- range $arg := .Values.allInOne.args }} + - "{{ tpl $arg $ }}" + {{- end }} + ports: + - containerPort: 5775 + protocol: UDP + - containerPort: 6831 + protocol: UDP + - containerPort: 6832 + protocol: UDP + - containerPort: 5778 + protocol: TCP + - containerPort: 16686 + protocol: TCP + - containerPort: 16685 + protocol: TCP + - containerPort: 9411 + protocol: TCP + - containerPort: 4317 + protocol: TCP + - containerPort: 4318 + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: / + port: 14269 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 15 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 14269 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + {{- with .Values.allInOne.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + + {{- if .Values.allInOne.samplingConfig}} + volumeMounts: + - name: strategies + mountPath: /etc/conf/ + {{- end }} + serviceAccountName: {{ template "jaeger.fullname" . }} + {{- if .Values.allInOne.samplingConfig}} + volumes: + - name: strategies + configMap: + name: {{ include "jaeger.fullname" . }}-sampling-strategies + {{- end }} + {{- with .Values.allInOne.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-ing.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-ing.yaml new file mode 100644 index 0000000..0deb03b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-ing.yaml @@ -0,0 +1,33 @@ +{{- if and (.Values.allInOne.enabled) (.Values.allInOne.ingress.enabled) -}} +{{- $ingressSupportsIngressClassName := eq (include "common.ingress.supportsIngressClassname" .) "true" }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ template "jaeger.query.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: all-in-one + {{- if .Values.allInOne.ingress.annotations }} + annotations: + {{- toYaml .Values.allInOne.ingress.annotations | nindent 4 }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.allInOne.ingress.ingressClassName }} + ingressClassName: {{ .Values.allInOne.ingress.ingressClassName }} + {{- end }} + rules: + {{- range $host := .Values.allInOne.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: / + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: ImplementationSpecific + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "jaeger.query.name" $) "servicePort" 16686 "context" $) | nindent 14 }} + {{- end -}} + {{- if .Values.allInOne.ingress.tls }} + tls: + {{- toYaml .Values.allInOne.ingress.tls | nindent 4 }} + {{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-query-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-query-svc.yaml new file mode 100644 index 0000000..5f0c2f5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-query-svc.yaml @@ -0,0 +1,21 @@ +{{- if .Values.allInOne.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jaeger.query.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: service-query +spec: + clusterIP: None + ports: + - name: http-query + port: 16686 + targetPort: 16686 + - name: grpc-query + port: 16685 + targetPort: 16685 + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: all-in-one +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-sa.yaml new file mode 100644 index 0000000..599c476 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/allinone-sa.yaml @@ -0,0 +1,9 @@ +{{- if .Values.allInOne.enabled -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.fullname" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: all-in-one +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-schema-job.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-schema-job.yaml new file mode 100644 index 0000000..01c1e51 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-schema-job.yaml @@ -0,0 +1,102 @@ +{{- if .Values.collector.enabled -}} +{{- if eq .Values.storage.type "cassandra" -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "jaeger.fullname" . }}-cassandra-schema + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: cassandra-schema +{{- if .Values.schema.annotations }} + annotations: + {{- toYaml .Values.schema.annotations | nindent 4 }} +{{- end }} +spec: + activeDeadlineSeconds: {{ .Values.schema.activeDeadlineSeconds }} + template: + metadata: + name: {{ include "jaeger.fullname" . }}-cassandra-schema +{{- if .Values.schema.podAnnotations }} + annotations: + {{- toYaml .Values.schema.podAnnotations | nindent 8 }} +{{- end }} +{{- if .Values.schema.podLabels }} + labels: + {{- toYaml .Values.schema.podLabels | nindent 8 }} +{{- end }} + spec: + securityContext: + {{- toYaml .Values.schema.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "jaeger.cassandraSchema.serviceAccountName" . }} + {{- with .Values.schema.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ include "jaeger.fullname" . }}-cassandra-schema + image: {{ .Values.schema.image }}:{{- include "jaeger.image.tag" . }} + imagePullPolicy: {{ .Values.schema.pullPolicy }} + securityContext: + {{- toYaml .Values.schema.securityContext | nindent 10 }} + env: + {{- if .Values.schema.extraEnv }} + {{- toYaml .Values.schema.extraEnv | nindent 10 }} + {{- end }} + {{ range $key, $value := .Values.schema.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{ end }} + {{- include "cassandra.env" . | nindent 10 }} + - name: CQLSH_HOST + value: {{ template "cassandra.host" . }} + {{ if .Values.storage.cassandra.tls.enabled }} + - name: CQLSH_SSL + value: "--ssl" + {{- end }} + - name: DATACENTER + value: {{ .Values.cassandra.config.dc_name | quote }} + {{- if .Values.storage.cassandra.keyspace }} + - name: KEYSPACE + value: {{ .Values.storage.cassandra.keyspace }} + {{- end }} + resources: + {{- toYaml .Values.schema.resources | nindent 10 }} + volumeMounts: + {{- range .Values.schema.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/root/.cassandra/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/root/.cassandra/client-cert.pem" + subPath: "client-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/root/.cassandra/client-key.pem" + subPath: "client-key.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/root/.cassandra/cqlshrc" + subPath: "cqlshrc" + readOnly: true + {{- end }} + restartPolicy: OnFailure + volumes: + {{- range .Values.schema.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + secret: + secretName: {{ .Values.storage.cassandra.tls.secretName }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-schema-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-schema-sa.yaml new file mode 100644 index 0000000..5106321 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-schema-sa.yaml @@ -0,0 +1,10 @@ +{{- if and (eq .Values.storage.type "cassandra") .Values.schema.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.cassandraSchema.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: cassandra-schema +automountServiceAccountToken: {{ .Values.schema.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-secret.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-secret.yaml new file mode 100644 index 0000000..aa3437f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/cassandra-secret.yaml @@ -0,0 +1,11 @@ +{{ if and (eq .Values.storage.type "cassandra") .Values.storage.cassandra.usePassword (not .Values.storage.cassandra.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "jaeger.fullname" . }}-cassandra + labels: + {{- include "jaeger.labels" . | nindent 4 }} +type: Opaque +data: + password: {{ .Values.storage.cassandra.password | b64enc | quote }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-configmap.yaml new file mode 100644 index 0000000..94c2b97 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-configmap.yaml @@ -0,0 +1,13 @@ +{{- if .Values.collector.samplingConfig }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "jaeger.fullname" . }}-sampling-strategies + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: collector +data: + strategies.json: |- +{{ tpl .Values.collector.samplingConfig . | indent 4 }} +{{- end }} + diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-deploy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-deploy.yaml new file mode 100644 index 0000000..6934884 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-deploy.yaml @@ -0,0 +1,211 @@ +{{- if .Values.collector.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "jaeger.collector.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: collector +{{- if .Values.collector.annotations }} + annotations: + {{- toYaml .Values.collector.annotations | nindent 4 }} +{{- end }} +spec: +{{- if not .Values.collector.autoscaling.enabled }} + replicas: {{ .Values.collector.replicaCount }} +{{- end }} + selector: + matchLabels: + {{- include "jaeger.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: collector + template: + metadata: + annotations: + checksum/config-env: {{ include (print $.Template.BasePath "/collector-configmap.yaml") . | sha256sum }} +{{- if .Values.collector.podAnnotations }} + {{- toYaml .Values.collector.podAnnotations | nindent 8 }} +{{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: collector +{{- if .Values.collector.podLabels }} + {{- toYaml .Values.collector.podLabels | nindent 8 }} +{{- end }} + spec: + {{- with .Values.collector.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + securityContext: + {{- toYaml .Values.collector.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "jaeger.collector.serviceAccountName" . }} + {{- with .Values.collector.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ template "jaeger.collector.name" . }} + securityContext: + {{- toYaml .Values.collector.securityContext | nindent 10 }} + image: {{ .Values.collector.image }}:{{- .Values.collector.tag | default (include "jaeger.image.tag" .) }} + imagePullPolicy: {{ .Values.collector.pullPolicy }} + args: + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.collector.cmdlineParams ) | nindent 10 }} + {{- if not .Values.ingester.enabled -}} + {{- include "storage.cmdArgs" . | nindent 10 }} + {{- end }} + env: + {{- if .Values.collector.extraEnv }} + {{- toYaml .Values.collector.extraEnv | nindent 10 }} + {{- end }} + {{- if .Values.collector.service.zipkin }} + - name: COLLECTOR_ZIPKIN_HOST_PORT + value: {{ .Values.collector.service.zipkin.port | quote }} + {{- end }} + {{- if or .Values.collector.service.otlp.grpc .Values.collector.service.otlp.http }} + - name: COLLECTOR_OTLP_ENABLED + value: "true" + {{- end }} + {{- if ne (default 4317 .Values.collector.service.otlp.grpc.port | toString ) "4317" }} + - name: COLLECTOR_OTLP_GRPC_HOST_PORT + value: {{ .Values.collector.service.otlp.grpc.port | quote }} + {{- end }} + {{- if ne (default 4318 .Values.collector.service.otlp.http.port | toString) "4318" }} + - name: COLLECTOR_OTLP_HTTP_HOST_PORT + value: {{ .Values.collector.service.otlp.http.port | quote }} + {{- end }} + {{- if .Values.ingester.enabled }} + - name: SPAN_STORAGE_TYPE + value: kafka + {{- range $key, $value := .Values.storage.kafka.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + {{- if .Values.storage.kafka.extraEnv }} + {{- toYaml .Values.storage.kafka.extraEnv | nindent 10 }} + {{- end }} + - name: KAFKA_PRODUCER_BROKERS + value: {{ tpl (include "helm-toolkit.utils.joinListWithComma" .Values.storage.kafka.brokers) . }} + - name: KAFKA_PRODUCER_TOPIC + value: {{ .Values.storage.kafka.topic }} + - name: KAFKA_PRODUCER_AUTHENTICATION + value: {{ .Values.storage.kafka.authentication }} + {{ else }} + - name: SPAN_STORAGE_TYPE + value: {{ .Values.storage.type }} + {{- include "storage.env" . | nindent 10 }} + {{- end }} + {{- if .Values.collector.samplingConfig}} + - name: SAMPLING_STRATEGIES_FILE + value: /etc/conf/strategies.json + {{- end }} + ports: + - containerPort: {{ .Values.collector.service.grpc.port }} + name: grpc + protocol: TCP + - containerPort: {{ .Values.collector.service.http.port }} + name: http + protocol: TCP + - containerPort: 14269 + name: admin + protocol: TCP + {{- if .Values.collector.service.zipkin }} + - containerPort: {{ .Values.collector.service.zipkin.port }} + name: zipkin + protocol: TCP + {{- end }} + {{- if or .Values.collector.service.otlp.grpc .Values.collector.service.otlp.http }} + - containerPort: {{ default 4317 .Values.collector.service.otlp.grpc.port }} + name: otlp-grpc + protocol: TCP + - containerPort: {{ default 4318 .Values.collector.service.otlp.http.port }} + name: otlp-http + protocol: TCP + {{- end }} + readinessProbe: + httpGet: + path: / + port: admin + livenessProbe: + httpGet: + path: / + port: admin + resources: + {{- toYaml .Values.collector.resources | nindent 10 }} + volumeMounts: + {{- range .Values.collector.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.collector.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/client-cert.pem" + subPath: "client-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/client-key.pem" + subPath: "client-key.pem" + readOnly: true + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + mountPath: "/es-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + {{- end }} + {{- if .Values.collector.samplingConfig}} + - name: strategies + mountPath: /etc/conf/ + {{- end }} + dnsPolicy: {{ .Values.collector.dnsPolicy }} + restartPolicy: Always + volumes: + {{- range .Values.collector.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.collector.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.collector.samplingConfig}} + - name: strategies + configMap: + name: {{ include "jaeger.fullname" . }}-sampling-strategies + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + secret: + secretName: {{ .Values.storage.cassandra.tls.secretName }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + secret: + secretName: {{ .Values.storage.elasticsearch.tls.secretName }} + {{- end }} + {{- with .Values.collector.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.collector.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.collector.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-hpa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-hpa.yaml new file mode 100644 index 0000000..3a44174 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-hpa.yaml @@ -0,0 +1,43 @@ +{{- if .Values.collector.autoscaling.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "jaeger.collector.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: collector +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "jaeger.collector.name" . }} + minReplicas: {{ .Values.collector.autoscaling.minReplicas }} + maxReplicas: {{ .Values.collector.autoscaling.maxReplicas }} + metrics: + {{- if .Values.collector.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if eq "autoscaling/v2" (include "common.capabilities.hpa.apiVersion" ( dict "context" $ )) }} + target: + averageUtilization: {{ .Values.collector.autoscaling.targetMemoryUtilizationPercentage }} + type: Utilization + {{- else }} + targetAverageUtilization: {{ .Values.collector.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + - type: Resource + resource: + name: cpu + {{- if eq "autoscaling/v2" (include "common.capabilities.hpa.apiVersion" ( dict "context" $ )) }} + target: + averageUtilization: {{ .Values.collector.autoscaling.targetCPUUtilizationPercentage | default 80 }} + type: Utilization + {{- else }} + targetAverageUtilization: {{ .Values.collector.autoscaling.targetCPUUtilizationPercentage | default 80 }} + {{- end }} + {{- with .Values.collector.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-ing.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-ing.yaml new file mode 100644 index 0000000..4e15ab9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-ing.yaml @@ -0,0 +1,40 @@ +{{- if .Values.collector.ingress.enabled -}} +{{- $ingressSupportsIngressClassName := eq (include "common.ingress.supportsIngressClassname" .) "true" }} +{{- $defaultServicePort := .Values.collector.service.http.port -}} +{{- $basePath := .Values.collector.basePath -}} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" $ }} +kind: Ingress +metadata: + name: {{ template "jaeger.collector.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + {{- if .Values.collector.ingress.labels }} + {{- toYaml .Values.collector.ingress.labels | nindent 4 }} + {{- end }} + app.kubernetes.io/component: collector + {{- if .Values.collector.ingress.annotations }} + annotations: + {{- toYaml .Values.collector.ingress.annotations | nindent 4 }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.collector.ingress.ingressClassName }} + ingressClassName: {{ .Values.collector.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.collector.ingress.hosts }} + - host: {{ include "jaeger.collector.ingressHost" . }} + http: + paths: + - path: {{ $basePath }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: ImplementationSpecific + {{- end }} + {{- $servicePortString := (include "jaeger.collector.ingressServicePort" (dict "defaultServicePort" $defaultServicePort "context" .)) }} + {{- $servicePort := default $servicePortString ($servicePortString | float64) }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "jaeger.collector.name" $) "servicePort" $servicePort "context" $) | nindent 14 }} + {{- end -}} + {{- if .Values.collector.ingress.tls }} + tls: + {{- toYaml .Values.collector.ingress.tls | nindent 4 }} + {{- end -}} + {{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-sa.yaml new file mode 100644 index 0000000..c344453 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-sa.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.collector.enabled .Values.collector.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.collector.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: collector + {{- with .Values.collector.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.collector.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-servicemonitor.yaml new file mode 100644 index 0000000..fbd94fe --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and (.Values.collector.enabled) (.Values.collector.serviceMonitor.enabled)}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "jaeger.collector.name" . }} + {{- if .Values.collector.serviceMonitor.namespace }} + namespace: {{ .Values.collector.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: collector + {{- if .Values.collector.serviceMonitor.additionalLabels }} + {{- toYaml .Values.collector.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.collector.serviceMonitor.annotations }} + annotations: + {{- toYaml .Values.collector.serviceMonitor.annotations | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: admin + path: /metrics + {{- if .Values.collector.serviceMonitor.interval }} + interval: {{ .Values.collector.serviceMonitor.interval }} + {{- end }} + {{- if .Values.collector.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.collector.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.collector.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.collector.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.collector.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.collector.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/component: collector + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-svc.yaml new file mode 100644 index 0000000..b6bf739 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/collector-svc.yaml @@ -0,0 +1,65 @@ +{{- if .Values.collector.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jaeger.collector.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: collector +{{- if .Values.collector.service.annotations }} + annotations: + {{- toYaml .Values.collector.service.annotations | nindent 4 }} +{{- end }} +spec: + ports: + - name: grpc + port: {{ .Values.collector.service.grpc.port }} +{{- if and (eq .Values.collector.service.type "NodePort") (.Values.collector.service.grpc.nodePort) }} + nodePort: {{ .Values.collector.service.grpc.nodePort }} +{{- end }} + protocol: TCP + targetPort: grpc + - name: http + port: {{ .Values.collector.service.http.port }} +{{- if and (eq .Values.collector.service.type "NodePort") (.Values.collector.service.http.nodePort) }} + nodePort: {{ .Values.collector.service.http.nodePort }} +{{- end }} + protocol: TCP + targetPort: http +{{- if .Values.collector.service.zipkin }} + - name: zipkin + port: {{ .Values.collector.service.zipkin.port }} +{{- if and (eq .Values.collector.service.type "NodePort") (.Values.collector.service.zipkin.nodePort) }} + nodePort: {{ .Values.collector.service.zipkin.nodePort }} +{{- end }} + protocol: TCP + targetPort: zipkin +{{- end }} +{{- if or .Values.collector.service.otlp.grpc .Values.collector.service.otlp.http }} + - name: otlp-grpc + port: {{ default 4317 .Values.collector.service.otlp.grpc.port }} +{{- if and (eq .Values.collector.service.type "NodePort") (.Values.collector.service.otlp.grpc.nodePort) }} + nodePort: {{ .Values.collector.service.otlp.grpc.nodePort }} +{{- end }} + protocol: TCP + targetPort: otlp-grpc + - name: otlp-http + port: {{ default 4318 .Values.collector.service.otlp.http.port }} +{{- if and (eq .Values.collector.service.type "NodePort") (.Values.collector.service.otlp.http.nodePort) }} + nodePort: {{ .Values.collector.service.otlp.http.nodePort }} +{{- end }} + protocol: TCP + targetPort: otlp-http +{{- end }} + - name: admin + port: 14269 + targetPort: admin + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: collector + {{- if and (eq .Values.collector.service.type "ClusterIP") .Values.collector.service.clusterIP }} + clusterIP: {{ .Values.collector.service.clusterIP }} + {{- end }} + type: {{ .Values.collector.service.type }} +{{- template "loadBalancerSourceRanges" .Values.collector }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/elasticsearch-secret.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/elasticsearch-secret.yaml new file mode 100644 index 0000000..7b5b25c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/elasticsearch-secret.yaml @@ -0,0 +1,16 @@ +{{ if and (eq .Values.storage.type "elasticsearch") .Values.storage.elasticsearch.usePassword (not .Values.storage.elasticsearch.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "jaeger.fullname" . }}-elasticsearch + labels: + {{- include "jaeger.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-1" + "helm.sh/hook-delete-policy": before-hook-creation + "helm.sh/resource-policy": keep +type: Opaque +data: + password: {{ .Values.storage.elasticsearch.password | b64enc | quote }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-index-cleaner-cronjob.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-index-cleaner-cronjob.yaml new file mode 100644 index 0000000..c3dd1d4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-index-cleaner-cronjob.yaml @@ -0,0 +1,112 @@ +{{- if .Values.esIndexCleaner.enabled -}} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }} +kind: CronJob +metadata: + name: {{ include "jaeger.fullname" . }}-es-index-cleaner + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: es-index-cleaner +{{- if .Values.esIndexCleaner.annotations }} + annotations: + {{- toYaml .Values.esIndexCleaner.annotations | nindent 4 }} +{{- end }} +spec: + concurrencyPolicy: {{ .Values.esIndexCleaner.concurrencyPolicy }} + schedule: {{ .Values.esIndexCleaner.schedule | quote }} + successfulJobsHistoryLimit: {{ .Values.esIndexCleaner.successfulJobsHistoryLimit }} + failedJobsHistoryLimit: {{ .Values.esIndexCleaner.failedJobsHistoryLimit }} + suspend: false + jobTemplate: + spec: + {{- if .Values.esIndexCleaner.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.esIndexCleaner.activeDeadlineSeconds }} + {{- end }} + {{- if .Values.esIndexCleaner.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.esIndexCleaner.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + {{- if .Values.esIndexCleaner.podAnnotations }} + annotations: + {{- toYaml .Values.esIndexCleaner.podAnnotations | nindent 12 }} + {{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 12 }} + app.kubernetes.io/component: es-index-cleaner + {{- if .Values.esIndexCleaner.podLabels }} + {{- toYaml .Values.esIndexCleaner.podLabels | nindent 12 }} + {{- end }} + spec: + serviceAccountName: {{ template "jaeger.esIndexCleaner.serviceAccountName" . }} + {{- with .Values.esIndexCleaner.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.esIndexCleaner.podSecurityContext | nindent 12 }} + containers: + - name: {{ include "jaeger.fullname" . }}-es-index-cleaner + securityContext: + {{- toYaml .Values.esIndexCleaner.securityContext | nindent 14 }} + image: {{ .Values.esIndexCleaner.image }}:{{- .Values.esIndexCleaner.tag | default (include "jaeger.image.tag" .) }} + imagePullPolicy: {{ .Values.esIndexCleaner.pullPolicy }} + args: + - {{ .Values.esIndexCleaner.numberOfDays | quote }} + - {{ include "elasticsearch.client.url" . }} + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.esIndexCleaner.cmdlineParams ) | nindent 14 }} + env: + {{- if .Values.esIndexCleaner.extraEnv }} + {{- toYaml .Values.esIndexCleaner.extraEnv | nindent 14 }} + {{- end }} + {{ include "elasticsearch.env" . | nindent 14 }} + resources: + {{- toYaml .Values.esIndexCleaner.resources | nindent 14 }} + volumeMounts: + {{- range .Values.esIndexCleaner.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.esIndexCleaner.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + mountPath: "/es-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + {{- end }} + restartPolicy: OnFailure + {{- with .Values.esIndexCleaner.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.esIndexCleaner.affinity }} + affinity: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.esIndexCleaner.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} + volumes: + {{- range .Values.esIndexCleaner.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.esIndexCleaner.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + secret: + secretName: {{ .Values.storage.elasticsearch.tls.secretName }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-index-cleaner-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-index-cleaner-sa.yaml new file mode 100644 index 0000000..02e2f22 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-index-cleaner-sa.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.esIndexCleaner.enabled .Values.esIndexCleaner.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.esIndexCleaner.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: es-index-cleaner +automountServiceAccountToken: {{ .Values.esIndexCleaner.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-lookback-cronjob.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-lookback-cronjob.yaml new file mode 100644 index 0000000..39504e1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-lookback-cronjob.yaml @@ -0,0 +1,113 @@ +{{- if .Values.esLookback.enabled -}} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }} +kind: CronJob +metadata: + name: {{ include "jaeger.fullname" . }}-es-lookback + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: es-lookback +{{- if .Values.esLookback.annotations }} + annotations: + {{- toYaml .Values.esLookback.annotations | nindent 4 }} +{{- end }} +spec: + concurrencyPolicy: {{ .Values.esLookback.concurrencyPolicy }} + schedule: {{ .Values.esLookback.schedule | quote }} + successfulJobsHistoryLimit: {{ .Values.esLookback.successfulJobsHistoryLimit }} + failedJobsHistoryLimit: {{ .Values.esLookback.failedJobsHistoryLimit }} + suspend: false + jobTemplate: + spec: + {{- if .Values.esLookback.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.esLookback.activeDeadlineSeconds }} + {{- end }} + {{- if .Values.esLookback.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.esLookback.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + {{- if .Values.esLookback.podAnnotations }} + annotations: + {{- toYaml .Values.esLookback.podAnnotations | nindent 12 }} + {{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 12 }} + app.kubernetes.io/component: es-lookback + {{- if .Values.esLookback.podLabels }} + {{- toYaml .Values.esLookback.podLabels | nindent 12 }} + {{- end }} + spec: + serviceAccountName: {{ template "jaeger.esLookback.serviceAccountName" . }} + {{- with .Values.esLookback.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.esLookback.podSecurityContext | nindent 12 }} + restartPolicy: OnFailure + {{- with .Values.esLookback.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.esLookback.affinity }} + affinity: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.esLookback.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} + + containers: + - name: {{ include "jaeger.fullname" . }}-es-lookback + securityContext: + {{- toYaml .Values.esLookback.securityContext | nindent 14 }} + image: "{{ .Values.esLookback.image }}:{{- include "jaeger.image.tag" . }}" + imagePullPolicy: {{ .Values.esLookback.pullPolicy }} + args: + - lookback + - {{ include "elasticsearch.client.url" . }} + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.esLookback.cmdlineParams ) | nindent 14 }} + env: + {{ include "elasticsearch.env" . | nindent 14 }} + {{- if .Values.esLookback.extraEnv }} + {{- toYaml .Values.esLookback.extraEnv | nindent 14 }} + {{- end }} + resources: + {{- toYaml .Values.esLookback.resources | nindent 14 }} + volumeMounts: + {{- range .Values.esLookback.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.esLookback.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + mountPath: "/es-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + {{- end }} + volumes: + {{- range .Values.esLookback.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.esLookback.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + secret: + secretName: {{ .Values.storage.elasticsearch.tls.secretName }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-lookback-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-lookback-sa.yaml new file mode 100644 index 0000000..1c5140d --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-lookback-sa.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.esLookback.enabled .Values.esLookback.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.esLookback.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: es-lookback +automountServiceAccountToken: {{ .Values.esLookback.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-cronjob.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-cronjob.yaml new file mode 100644 index 0000000..5136f52 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-cronjob.yaml @@ -0,0 +1,113 @@ +{{- if .Values.esRollover.enabled -}} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }} +kind: CronJob +metadata: + name: {{ include "jaeger.fullname" . }}-es-rollover + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: es-rollover +{{- if .Values.esRollover.annotations }} + annotations: + {{- toYaml .Values.esRollover.annotations | nindent 4 }} +{{- end }} +spec: + concurrencyPolicy: {{ .Values.esRollover.concurrencyPolicy }} + schedule: {{ .Values.esRollover.schedule | quote }} + successfulJobsHistoryLimit: {{ .Values.esRollover.successfulJobsHistoryLimit }} + failedJobsHistoryLimit: {{ .Values.esRollover.failedJobsHistoryLimit }} + suspend: false + jobTemplate: + spec: + {{- if .Values.esRollover.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.esRollover.activeDeadlineSeconds }} + {{- end }} + {{- if .Values.esRollover.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.esRollover.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + {{- if .Values.esRollover.podAnnotations }} + annotations: + {{- toYaml .Values.esRollover.podAnnotations | nindent 12 }} + {{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 12 }} + app.kubernetes.io/component: es-rollover + {{- if .Values.esRollover.podLabels }} + {{- toYaml .Values.esRollover.podLabels | nindent 12 }} + {{- end }} + spec: + serviceAccountName: {{ template "jaeger.esRollover.serviceAccountName" . }} + {{- with .Values.esRollover.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml .Values.esRollover.podSecurityContext | nindent 12 }} + restartPolicy: OnFailure + {{- with .Values.esRollover.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.esRollover.affinity }} + affinity: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.esRollover.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} + + containers: + - name: {{ include "jaeger.fullname" . }}-es-rollover + securityContext: + {{- toYaml .Values.esRollover.securityContext | nindent 14 }} + image: "{{ .Values.esRollover.image }}:{{- include "jaeger.image.tag" . }}" + imagePullPolicy: {{ .Values.esRollover.pullPolicy }} + args: + - rollover + - {{ include "elasticsearch.client.url" . }} + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.esRollover.cmdlineParams ) | nindent 14 }} + env: + {{ include "elasticsearch.env" . | nindent 14 }} + {{- if .Values.esRollover.extraEnv }} + {{- toYaml .Values.esRollover.extraEnv | nindent 14 }} + {{- end }} + resources: + {{- toYaml .Values.esRollover.resources | nindent 14 }} + volumeMounts: + {{- range .Values.esRollover.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.esRollover.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + mountPath: "/es-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + {{- end }} + volumes: + {{- range .Values.esRollover.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.esRollover.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + secret: + secretName: {{ .Values.storage.elasticsearch.tls.secretName }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-hook.yml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-hook.yml new file mode 100644 index 0000000..48c59af --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-hook.yml @@ -0,0 +1,90 @@ +{{- if .Values.esRollover.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "jaeger.fullname" . }}-es-rollover-init + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: es-rollover-init + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- if .Values.esRollover.initHook.annotations }} + {{- toYaml .Values.esRollover.initHook.annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.esRollover.initHook.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.esRollover.initHook.activeDeadlineSeconds }} + {{- end }} + {{- with .Values.esRollover.initHook.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ . }} + {{- end }} + + template: + metadata: + {{- with .Values.esRollover.initHook.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: es-rollover-init + {{- if .Values.esRollover.initHook.podLabels }} + {{- toYaml .Values.esRollover.initHook.podLabels | nindent 10 }} + {{- end }} + spec: + serviceAccountName: {{ template "jaeger.esRollover.serviceAccountName" . }} + {{- with .Values.esRollover.imagePullSecrets }} + imagePullSecrets: {{- toYaml . | nindent 8 }} + {{- end }} + securityContext: {{- toYaml .Values.esRollover.podSecurityContext | nindent 8 }} + restartPolicy: OnFailure + {{- with .Values.esRollover.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.esRollover.affinity }} + affinity: {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.esRollover.tolerations }} + tolerations: {{- toYaml . | nindent 8 }} + {{- end }} + + containers: + - name: {{ include "jaeger.fullname" . }}-es-rollover-init + securityContext: {{- toYaml .Values.esRollover.securityContext | nindent 12 }} + image: "{{ .Values.esRollover.image }}:{{ .Values.esRollover.tag }}" + imagePullPolicy: {{ .Values.esRollover.pullPolicy }} + args: + - init + - {{ include "elasticsearch.client.url" . }} + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.esRollover.cmdlineParams ) | nindent 12 }} + env: + {{ include "elasticsearch.env" . | nindent 12 }} + {{- with .Values.esRollover.initHook.extraEnv }} + {{- toYaml . | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.esRollover.resources | nindent 12 }} + volumeMounts: + {{- range .Values.esRollover.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.esRollover.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + volumes: + {{- range .Values.esRollover.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.esRollover.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-sa.yaml new file mode 100644 index 0000000..351906c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/es-rollover-sa.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.esRollover.enabled .Values.esRollover.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.esRollover.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: es-index-rollover + annotations: + "helm.sh/hook": pre-install,pre-upgrade + # Must be created before the rollover init hook + "helm.sh/hook-weight": "-10" + "helm.sh/hook-delete-policy": before-hook-creation +automountServiceAccountToken: {{ .Values.esRollover.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/extra-list.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/extra-list.yaml new file mode 100644 index 0000000..d1ca3ba --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraObjects }} +--- +{{ tpl (. | toYaml) $ }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-deploy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-deploy.yaml new file mode 100644 index 0000000..3e554b2 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-deploy.yaml @@ -0,0 +1,65 @@ +{{- if .Values.hotrod.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "jaeger.fullname" . }}-hotrod + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: hotrod +spec: + replicas: {{ .Values.hotrod.replicaCount }} + selector: + matchLabels: + {{- include "jaeger.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: hotrod + template: + metadata: + labels: + {{- include "jaeger.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: hotrod + spec: + securityContext: + {{- toYaml .Values.hotrod.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "jaeger.hotrod.serviceAccountName" . }} + {{- with .Values.hotrod.image.pullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ include "jaeger.fullname" . }}-hotrod + securityContext: + {{- toYaml .Values.hotrod.securityContext | nindent 12 }} + image: {{ .Values.hotrod.image.repository }}:{{- include "jaeger.image.tag" . }} + imagePullPolicy: {{ .Values.hotrod.image.pullPolicy }} + env: + - name: JAEGER_AGENT_HOST + value: {{ template "jaeger.hotrod.tracing.host" . }} + - name: JAEGER_AGENT_PORT + value: {{ .Values.hotrod.tracing.port | quote }} + ports: + - name: http + containerPort: 8080 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.hotrod.resources | nindent 12 }} + {{- with .Values.hotrod.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.hotrod.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.hotrod.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-ing.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-ing.yaml new file mode 100644 index 0000000..b60f211 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-ing.yaml @@ -0,0 +1,37 @@ +{{- if .Values.hotrod.enabled -}} + {{- if .Values.hotrod.ingress.enabled -}} + {{- $ingressSupportsIngressClassName := eq (include "common.ingress.supportsIngressClassname" .) "true" }} + {{- $serviceName := include "jaeger.fullname" . -}} + {{- $servicePort := .Values.hotrod.service.port -}} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" $ }} +kind: Ingress +metadata: + name: {{ include "jaeger.fullname" . }}-hotrod + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: hotrod + {{- if .Values.hotrod.ingress.annotations }} + annotations: + {{- toYaml .Values.hotrod.ingress.annotations | nindent 4 }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.hotrod.ingress.ingressClassName }} + ingressClassName: {{ .Values.hotrod.ingress.ingressClassName }} + {{- end }} + rules: + {{- range $host := .Values.hotrod.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: / + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: ImplementationSpecific + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (printf "%s-hotrod" $serviceName) "servicePort" $servicePort "context" $) | nindent 14 }} + {{- end -}} + {{- if .Values.hotrod.ingress.tls }} + tls: + {{- toYaml .Values.hotrod.ingress.tls | nindent 4 }} + {{- end -}} + {{- end -}} + {{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-sa.yaml new file mode 100644 index 0000000..414b25f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-sa.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.hotrod.enabled .Values.hotrod.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.hotrod.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: hotrod +automountServiceAccountToken: {{ .Values.hotrod.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-svc.yaml new file mode 100644 index 0000000..01771a5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/hotrod-svc.yaml @@ -0,0 +1,24 @@ +{{- if .Values.hotrod.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "jaeger.fullname" . }}-hotrod + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: hotrod +{{- if .Values.hotrod.service.annotations }} + annotations: + {{- toYaml .Values.hotrod.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.hotrod.service.type }} + ports: + - name: http + port: {{ .Values.hotrod.service.port }} + protocol: TCP + targetPort: http + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: hotrod +{{- template "loadBalancerSourceRanges" .Values.hotrod }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-deploy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-deploy.yaml new file mode 100644 index 0000000..8c7608a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-deploy.yaml @@ -0,0 +1,134 @@ +{{- if .Values.ingester.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "jaeger.fullname" . }}-ingester + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: ingester +{{- if .Values.ingester.annotations }} + annotations: + {{- toYaml .Values.ingester.annotations | nindent 4 }} +{{- end }} +spec: +{{- if not .Values.ingester.autoscaling.enabled }} + replicas: {{ .Values.ingester.replicaCount }} +{{- end }} + selector: + matchLabels: + {{- include "jaeger.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: ingester + template: + metadata: + annotations: +{{- if .Values.ingester.podAnnotations }} + {{- toYaml .Values.ingester.podAnnotations | nindent 8 }} +{{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: ingester +{{- if .Values.ingester.podLabels }} + {{- toYaml .Values.ingester.podLabels | nindent 8 }} +{{- end }} + spec: + serviceAccountName: {{ include "jaeger.ingester.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.ingester.podSecurityContext | nindent 8 }} + {{- with .Values.ingester.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.ingester.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ include "jaeger.fullname" . }}-ingester + securityContext: + {{- toYaml .Values.ingester.securityContext | nindent 10 }} + image: {{ .Values.ingester.image }}:{{- include "jaeger.image.tag" . }} + imagePullPolicy: {{ .Values.ingester.pullPolicy }} + args: + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.ingester.cmdlineParams ) | nindent 10 }} + {{- include "storage.cmdArgs" . | nindent 10 }} + env: + {{- if .Values.ingester.extraEnv }} + {{- toYaml .Values.ingester.extraEnv | nindent 10 }} + {{- end }} + - name: SPAN_STORAGE_TYPE + value: {{ .Values.storage.type }} + {{- include "storage.env" . | nindent 10 }} + - name: KAFKA_CONSUMER_BROKERS + value: {{ tpl (include "helm-toolkit.utils.joinListWithComma" .Values.storage.kafka.brokers) . }} + - name: KAFKA_CONSUMER_TOPIC + value: {{ .Values.storage.kafka.topic }} + - name: KAFKA_CONSUMER_AUTHENTICATION + value: {{ .Values.storage.kafka.authentication }} + ports: + - containerPort: 14270 + name: admin + protocol: TCP + readinessProbe: + httpGet: + path: / + port: admin + livenessProbe: + httpGet: + path: / + port: admin + resources: + {{- toYaml .Values.ingester.resources | nindent 10 }} + volumeMounts: + {{- range .Values.ingester.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.ingester.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/client-cert.pem" + subPath: "client-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/client-key.pem" + subPath: "client-key.pem" + readOnly: true + {{- end }} + dnsPolicy: {{ .Values.ingester.dnsPolicy }} + restartPolicy: Always + volumes: + {{- range .Values.ingester.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.ingester.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + secret: + secretName: {{ .Values.storage.cassandra.tls.secretName }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-hpa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-hpa.yaml new file mode 100644 index 0000000..9ed66a7 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-hpa.yaml @@ -0,0 +1,43 @@ +{{- if .Values.ingester.autoscaling.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "jaeger.ingester.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: ingester +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "jaeger.ingester.name" . }} + minReplicas: {{ .Values.ingester.autoscaling.minReplicas }} + maxReplicas: {{ .Values.ingester.autoscaling.maxReplicas }} + metrics: + {{- if .Values.ingester.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + {{- if eq "autoscaling/v2" (include "common.capabilities.hpa.apiVersion" ( dict "context" $ )) }} + target: + averageUtilization: {{ .Values.ingester.autoscaling.targetMemoryUtilizationPercentage }} + type: Utilization + {{- else }} + targetAverageUtilization: {{ .Values.ingester.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- end }} + - type: Resource + resource: + name: cpu + {{- if eq "autoscaling/v2" (include "common.capabilities.hpa.apiVersion" ( dict "context" $ )) }} + target: + averageUtilization: {{ .Values.ingester.autoscaling.targetCPUUtilizationPercentage | default 80 }} + type: Utilization + {{- else }} + targetAverageUtilization: {{ .Values.ingester.autoscaling.targetCPUUtilizationPercentage | default 80 }} + {{- end }} + {{- with .Values.ingester.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-sa.yaml new file mode 100644 index 0000000..d642ef3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-sa.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.ingester.enabled .Values.ingester.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.ingester.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: ingester +automountServiceAccountToken: {{ .Values.ingester.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-servicemonitor.yaml new file mode 100644 index 0000000..957c5b6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and (.Values.ingester.enabled) (.Values.ingester.serviceMonitor.enabled)}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "jaeger.ingester.name" . }} + {{- if .Values.ingester.serviceMonitor.namespace }} + namespace: {{ .Values.ingester.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: ingester + {{- if .Values.ingester.serviceMonitor.additionalLabels }} + {{- toYaml .Values.ingester.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.annotations }} + annotations: + {{- toYaml .Values.ingester.serviceMonitor.annotations | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: admin + path: /metrics + {{- if .Values.ingester.serviceMonitor.interval }} + interval: {{ .Values.ingester.serviceMonitor.interval }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.ingester.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.ingester.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.ingester.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.ingester.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/component: ingester + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-svc.yaml new file mode 100644 index 0000000..18a07e8 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/ingester-svc.yaml @@ -0,0 +1,23 @@ +{{- if .Values.ingester.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jaeger.ingester.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: ingester +{{- if .Values.ingester.service.annotations }} + annotations: + {{- toYaml .Values.ingester.service.annotations | nindent 4 }} +{{- end }} +spec: + ports: + - name: admin + port: 14270 + targetPort: admin + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: ingester + type: {{ .Values.ingester.service.type }} +{{- template "loadBalancerSourceRanges" .Values.ingester }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/oauth-sidecar-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/oauth-sidecar-configmap.yaml new file mode 100644 index 0000000..88e2464 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/oauth-sidecar-configmap.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.query.oAuthSidecar.enabled .Values.query.oAuthSidecar.config}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "jaeger.fullname" . }}-oauth-configuration + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: query +data: + oauth2-proxy.cfg: |- +{{ tpl .Values.query.oAuthSidecar.config . | indent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-configmap.yaml new file mode 100644 index 0000000..fa71bcb --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.query.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "jaeger.fullname" . }}-ui-configuration + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: query +data: + query-ui-config.json: |- +{{ tpl .Values.query.config . | indent 4 }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-deploy.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-deploy.yaml new file mode 100644 index 0000000..892172a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-deploy.yaml @@ -0,0 +1,293 @@ +{{- if .Values.query.enabled -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "jaeger.query.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: query +{{- if .Values.query.annotations }} + annotations: + {{- toYaml .Values.query.annotations | nindent 4 }} +{{- end }} +spec: + replicas: {{ .Values.query.replicaCount }} + selector: + matchLabels: + {{- include "jaeger.selectorLabels" . | nindent 6 }} + app.kubernetes.io/component: query + template: + metadata: +{{- if or .Values.query.config .Values.query.podAnnotations }} + annotations: + {{- if .Values.query.config }} + checksum/ui-config: {{ include (print $.Template.BasePath "/query-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.query.podAnnotations }} + {{- toYaml .Values.query.podAnnotations | nindent 8 }} + {{- end }} +{{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 8 }} + app.kubernetes.io/component: query +{{- if .Values.query.podLabels }} + {{- toYaml .Values.query.podLabels | nindent 8 }} +{{- end }} + spec: + {{- with .Values.query.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + securityContext: + {{- toYaml .Values.query.podSecurityContext | nindent 8 }} + serviceAccountName: {{ template "jaeger.query.serviceAccountName" . }} + {{- with .Values.query.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ template "jaeger.query.name" . }} + securityContext: + {{- toYaml .Values.query.securityContext | nindent 10 }} + image: {{ .Values.query.image }}:{{- .Values.query.tag | default (include "jaeger.image.tag" .) }} + imagePullPolicy: {{ .Values.query.pullPolicy }} + args: + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.query.cmdlineParams ) | nindent 10 }} + {{- include "storage.cmdArgs" . | nindent 10 }} + env: + {{- if .Values.query.extraEnv }} + {{- toYaml .Values.query.extraEnv | nindent 10 }} + {{- end }} + - name: SPAN_STORAGE_TYPE + value: {{ .Values.storage.type }} + {{- include "storage.env" . | nindent 10 }} + {{- if .Values.query.basePath }} + - name: QUERY_BASE_PATH + value: {{ .Values.query.basePath | quote }} + {{- end }} + - name: JAEGER_AGENT_PORT + value: "6831" + {{- if .Values.query.config}} + - name: QUERY_UI_CONFIG + value: /etc/conf/query-ui-config.json + {{- end }} + ports: + - name: query + containerPort: 16686 + protocol: TCP + - name: grpc + containerPort: 16685 + protocol: TCP + - name: admin + containerPort: 16687 + protocol: TCP + resources: + {{- toYaml .Values.query.resources | nindent 10 }} + volumeMounts: + {{- range .Values.query.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.query.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/client-cert.pem" + subPath: "client-cert.pem" + readOnly: true + - name: {{ .Values.storage.cassandra.tls.secretName }} + mountPath: "/cassandra-tls/client-key.pem" + subPath: "client-key.pem" + readOnly: true + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + mountPath: "/es-tls/ca-cert.pem" + subPath: "ca-cert.pem" + readOnly: true + {{- end }} + {{- if .Values.query.config}} + - name: ui-configuration + mountPath: /etc/conf/ + {{- end }} + livenessProbe: + httpGet: + path: / + port: admin + readinessProbe: + httpGet: + path: / + port: admin +{{- if .Values.query.oAuthSidecar.enabled }} + - name: {{ template "jaeger.agent.name" . }}-oauth2-sidecar + image: {{ .Values.query.oAuthSidecar.image }} + imagePullPolicy: {{ .Values.query.oAuthSidecar.pullPolicy }} + args: + {{- range .Values.query.oAuthSidecar.args }} + - {{ . }} + {{- end }} + {{- if .Values.query.oAuthSidecar.extraEnv }} + env: + {{- toYaml .Values.query.oAuthSidecar.extraEnv | nindent 10 }} + {{- end }} + volumeMounts: + {{- range .Values.query.oAuthSidecar.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.query.oAuthSidecar.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.query.oAuthSidecar.config}} + - name: jaeger-oauth-configuration + mountPath: /etc/oauth2-proxy + {{- end }} + ports: + - containerPort: {{ .Values.query.oAuthSidecar.containerPort }} + name: oauth-proxy + {{- if .Values.query.oAuthSidecar.livenessProbe }} + livenessProbe: + {{- toYaml .Values.query.oAuthSidecar.livenessProbe | nindent 10 }} + {{- end }} + {{- if .Values.query.oAuthSidecar.readinessProbe }} + readinessProbe: + {{- toYaml .Values.query.oAuthSidecar.readinessProbe | nindent 10 }} + {{- end }} +{{- end }} +{{- if .Values.query.agentSidecar.enabled }} + - name: {{ template "jaeger.agent.name" . }}-sidecar + securityContext: + {{- toYaml .Values.query.securityContext | nindent 10 }} + image: {{ .Values.agent.image }}:{{- include "jaeger.image.tag" . }} + imagePullPolicy: {{ .Values.agent.pullPolicy }} + args: + {{- range $key, $value := .Values.agent.cmdlineParams }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + env: + {{- if not (hasKey .Values.agent.cmdlineParams "reporter.grpc.host-port") }} + - name: REPORTER_GRPC_HOST_PORT + value: {{ include "jaeger.collector.name" . }}:{{ .Values.collector.service.grpc.port }} + {{- end }} + ports: + - name: admin + containerPort: 14271 + protocol: TCP + resources: + {{- toYaml .Values.query.agentSidecar.resources | nindent 10 }} + volumeMounts: + {{- range .Values.agent.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.agent.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + livenessProbe: + httpGet: + path: / + port: admin + readinessProbe: + httpGet: + path: / + port: admin +{{- end }} + {{- if .Values.query.sidecars }} + {{- tpl (toYaml .Values.query.sidecars) . | nindent 6 }} + {{- end }} + dnsPolicy: {{ .Values.query.dnsPolicy }} + restartPolicy: Always + volumes: + {{- range .Values.query.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.query.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.query.config}} + - name: ui-configuration + configMap: + name: {{ include "jaeger.fullname" . }}-ui-configuration + {{- end }} + {{- if .Values.storage.cassandra.tls.enabled }} + - name: {{ .Values.storage.cassandra.tls.secretName }} + secret: + secretName: {{ .Values.storage.cassandra.tls.secretName }} + {{- end }} + {{- if .Values.storage.elasticsearch.tls.enabled }} + - name: {{ .Values.storage.elasticsearch.tls.secretName }} + secret: + secretName: {{ .Values.storage.elasticsearch.tls.secretName }} + {{- end }} +{{- if .Values.query.oAuthSidecar.enabled }} + {{- range .Values.query.oAuthSidecar.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.query.oAuthSidecar.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- if .Values.query.oAuthSidecar.config }} + - name: jaeger-oauth-configuration + configMap: + name: {{ include "jaeger.fullname" . }}-oauth-configuration + {{- end }} +{{- end }} +{{- if .Values.query.agentSidecar.enabled }} + {{- range .Values.agent.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- range .Values.agent.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} +{{- end }} + {{- if .Values.query.extraVolumes }} + {{- tpl (toYaml .Values.query.extraVolumes) . | nindent 8 }} + {{- end }} + {{- with .Values.query.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.query.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.query.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-ing.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-ing.yaml new file mode 100644 index 0000000..a35bbfe --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-ing.yaml @@ -0,0 +1,45 @@ +{{- if .Values.query.ingress.enabled -}} +{{- $ingressSupportsIngressClassName := eq (include "common.ingress.supportsIngressClassname" .) "true" }} + {{- $servicePort := .Values.query.service.port -}} + {{- $basePath := .Values.query.basePath -}} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" $ }} +kind: Ingress +metadata: + name: {{ template "jaeger.query.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + {{- if .Values.query.ingress.labels }} + {{- toYaml .Values.query.ingress.labels | nindent 4 }} + {{- end }} + app.kubernetes.io/component: query + {{- if .Values.query.ingress.annotations }} + annotations: + {{- toYaml .Values.query.ingress.annotations | nindent 4 }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.query.ingress.ingressClassName }} + ingressClassName: {{ .Values.query.ingress.ingressClassName }} + {{- end }} + rules: + {{- range $host := .Values.query.ingress.hosts }} + - host: {{ $host }} + http: + paths: + - path: {{ $basePath }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: ImplementationSpecific + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "jaeger.query.name" $) "servicePort" $servicePort "context" $) | nindent 14 }} + {{- end -}} + {{- if .Values.query.ingress.health.exposed }} + - path: /health + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: ImplementationSpecific + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "jaeger.query.name" $) "servicePort" 16687 "context" $) | nindent 14 }} + {{- end -}} + {{- if .Values.query.ingress.tls }} + tls: + {{- toYaml .Values.query.ingress.tls | nindent 4 }} + {{- end -}} + {{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-sa.yaml new file mode 100644 index 0000000..da6ac97 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-sa.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.query.enabled .Values.query.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.query.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: query + {{- with .Values.query.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.query.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-servicemonitor.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-servicemonitor.yaml new file mode 100644 index 0000000..e52086b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and (.Values.query.enabled) (.Values.query.serviceMonitor.enabled)}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "jaeger.query.name" . }} + {{- if .Values.query.serviceMonitor.namespace }} + namespace: {{ .Values.query.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: query + {{- if .Values.query.serviceMonitor.additionalLabels }} + {{- toYaml .Values.query.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.query.serviceMonitor.annotations }} + annotations: + {{- toYaml .Values.query.serviceMonitor.annotations | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: admin + path: /metrics + {{- if .Values.query.serviceMonitor.interval }} + interval: {{ .Values.query.serviceMonitor.interval }} + {{- end }} + {{- if .Values.query.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.query.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.query.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.query.serviceMonitor.relabelings | nindent 8 }} + {{- end }} + {{- if .Values.query.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml .Values.query.serviceMonitor.metricRelabelings | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + app.kubernetes.io/component: query + app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-svc.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-svc.yaml new file mode 100644 index 0000000..35c120a --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/query-svc.yaml @@ -0,0 +1,35 @@ +{{- if .Values.query.enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "jaeger.query.name" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: query +{{- if .Values.query.service.annotations }} + annotations: + {{- toYaml .Values.query.service.annotations | nindent 4 }} +{{- end }} +spec: + ports: + - name: query + port: {{ .Values.query.service.port }} + protocol: TCP + targetPort: {{ default (ternary "oauth-proxy" "query" .Values.query.oAuthSidecar.enabled) .Values.query.service.targetPort }} +{{- if and (eq .Values.query.service.type "NodePort") (.Values.query.service.nodePort) }} + nodePort: {{ .Values.query.service.nodePort }} +{{- end }} + - name: grpc + port: 16685 + protocol: TCP + targetPort: grpc + - name: admin + port: 16687 + protocol: TCP + targetPort: admin + selector: + {{- include "jaeger.selectorLabels" . | nindent 4 }} + app.kubernetes.io/component: query + type: {{ .Values.query.service.type }} +{{- template "loadBalancerSourceRanges" .Values.query }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/spark-cronjob.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/spark-cronjob.yaml new file mode 100644 index 0000000..7654f6c --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/spark-cronjob.yaml @@ -0,0 +1,106 @@ +{{- if .Values.spark.enabled -}} +apiVersion: {{ include "common.capabilities.cronjob.apiVersion" $ }} +kind: CronJob +metadata: + name: {{ include "jaeger.fullname" . }}-spark + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: spark +{{- if .Values.spark.annotations }} + annotations: + {{- toYaml .Values.spark.annotations | nindent 4 }} +{{- end }} +spec: + schedule: {{ .Values.spark.schedule | quote }} + successfulJobsHistoryLimit: {{ .Values.spark.successfulJobsHistoryLimit }} + failedJobsHistoryLimit: {{ .Values.spark.failedJobsHistoryLimit }} + concurrencyPolicy: {{ .Values.spark.concurrencyPolicy }} + jobTemplate: + spec: + {{- if .Values.spark.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ .Values.spark.activeDeadlineSeconds }} + {{- end}} + {{- if .Values.spark.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ .Values.spark.ttlSecondsAfterFinished }} + {{- end }} + template: + metadata: + {{- if .Values.spark.podAnnotations }} + annotations: + {{- toYaml .Values.spark.podAnnotations | nindent 12 }} + {{- end }} + labels: + {{- include "jaeger.selectorLabels" . | nindent 12 }} + app.kubernetes.io/component: spark + {{- if .Values.spark.podLabels }} + {{- toYaml .Values.spark.podLabels | nindent 12 }} + {{- end }} + spec: + serviceAccountName: {{ template "jaeger.spark.serviceAccountName" . }} + {{- with .Values.spark.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} + containers: + - name: {{ include "jaeger.fullname" . }}-spark + image: {{ .Values.spark.image }}:{{ .Values.spark.tag }} + imagePullPolicy: {{ .Values.spark.pullPolicy }} + args: + {{ include "extra.cmdArgs" ( dict "cmdlineParams" .Values.spark.cmdlineParams ) | nindent 14 }} + env: + - name: STORAGE + value: {{ .Values.storage.type }} + {{- include "storage.env" . | nindent 14 }} + {{- if .Values.spark.extraEnv }} + {{- toYaml .Values.spark.extraEnv | nindent 14 }} + {{- end }} + {{- if eq .Values.storage.type "cassandra" }} + - name: CASSANDRA_CONTACT_POINTS + value: {{ include "cassandra.contact_points" . }} + {{- end }} + {{- if eq .Values.storage.type "elasticsearch" }} + - name: ES_NODES + value: {{ include "elasticsearch.client.url" . }} + - name: ES_NODES_WAN_ONLY + value: {{ .Values.storage.elasticsearch.nodesWanOnly | quote }} + {{- end }} + resources: + {{- toYaml .Values.spark.resources | nindent 14 }} + volumeMounts: + {{- range .Values.spark.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.spark.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + restartPolicy: OnFailure + volumes: + {{- range .Values.spark.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.spark.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- end }} + {{- with .Values.spark.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.spark.affinity }} + affinity: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.spark.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/spark-sa.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/spark-sa.yaml new file mode 100644 index 0000000..c694d68 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/templates/spark-sa.yaml @@ -0,0 +1,10 @@ +{{- if and .Values.spark.enabled .Values.spark.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "jaeger.spark.serviceAccountName" . }} + labels: + {{- include "jaeger.labels" . | nindent 4 }} + app.kubernetes.io/component: spark +automountServiceAccountToken: {{ .Values.spark.serviceAccount.automountServiceAccountToken }} +{{- end -}} diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/values.yaml new file mode 100644 index 0000000..9ceccc5 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/jaeger/values.yaml @@ -0,0 +1,828 @@ +# Default values for jaeger. +# This is a YAML-formatted file. +# Jaeger values are grouped by component. Cassandra values override subchart values + +provisionDataStore: + cassandra: true + elasticsearch: false + kafka: false + +# Overrides the image tag where default is the chart appVersion. +tag: "" + +nameOverride: "" +fullnameOverride: "" + +allInOne: + enabled: false + image: jaegertracing/all-in-one + pullPolicy: IfNotPresent + extraEnv: [] + # command line arguments / CLI flags + # See https://www.jaegertracing.io/docs/cli/ + args: [] + # samplingConfig: |- + # { + # "default_strategy": { + # "type": "probabilistic", + # "param": 1 + # } + # } + ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + annotations: {} + labels: {} + # Used to create an Ingress record. + # hosts: + # - chart-example.local + # annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # labels: + # app: jaeger + # tls: + # # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # resources: + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + nodeSelector: {} + +storage: + # allowed values (cassandra, elasticsearch) + type: cassandra + cassandra: + host: cassandra + port: 9042 + tls: + enabled: false + secretName: cassandra-tls-secret + user: user + usePassword: true + password: password + keyspace: jaeger_v1_test + ## Use existing secret (ignores previous password) + # existingSecret: + ## Cassandra related env vars to be configured on the concerned components + extraEnv: [] + # - name: CASSANDRA_SERVERS + # value: cassandra + # - name: CASSANDRA_PORT + # value: "9042" + # - name: CASSANDRA_KEYSPACE + # value: jaeger_v1_test + # - name: CASSANDRA_TLS_ENABLED + # value: "false" + ## Cassandra related cmd line opts to be configured on the concerned components + cmdlineParams: {} + # cassandra.servers: cassandra + # cassandra.port: 9042 + # cassandra.keyspace: jaeger_v1_test + # cassandra.tls.enabled: "false" + elasticsearch: + scheme: http + host: elasticsearch-master + port: 9200 + anonymous: false + user: elastic + usePassword: true + password: changeme + # indexPrefix: test + ## Use existing secret (ignores previous password) + # existingSecret: + # existingSecretKey: + nodesWanOnly: false + extraEnv: [] + ## ES related env vars to be configured on the concerned components + # - name: ES_SERVER_URLS + # value: http://elasticsearch-master:9200 + # - name: ES_USERNAME + # value: elastic + # - name: ES_INDEX_PREFIX + # value: test + ## ES related cmd line opts to be configured on the concerned components + cmdlineParams: {} + # es.server-urls: http://elasticsearch-master:9200 + # es.username: elastic + # es.index-prefix: test + tls: + enabled: false + secretName: es-tls-secret + kafka: + brokers: + - kafka:9092 + topic: jaeger_v1_test + authentication: none + extraEnv: [] + grpcPlugin: + extraEnv: [] + +# Begin: Override values on the Cassandra subchart to customize for Jaeger +cassandra: + persistence: + # To enable persistence, please see the documentation for the Cassandra chart + enabled: false + config: + cluster_name: jaeger + seed_size: 1 + dc_name: dc1 + rack_name: rack1 + endpoint_snitch: GossipingPropertyFileSnitch +# End: Override values on the Cassandra subchart to customize for Jaeger + +# Begin: Override values on the Kafka subchart to customize for Jaeger +kafka: + replicaCount: 1 + autoCreateTopicsEnable: true + zookeeper: + replicaCount: 1 + serviceAccount: + create: true + +# End: Override values on the Kafka subchart to customize for Jaeger + +# Begin: Default values for the various components of Jaeger +# This chart has been based on the Kubernetes integration found in the following repo: +# https://github.com/jaegertracing/jaeger-kubernetes/blob/main/production/jaeger-production-template.yml +# +# This is the jaeger-cassandra-schema Job which sets up the Cassandra schema for +# use by Jaeger +schema: + annotations: {} + image: jaegertracing/jaeger-cassandra-schema + imagePullSecrets: [] + pullPolicy: IfNotPresent + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: true + name: + podAnnotations: {} + podLabels: {} + securityContext: {} + podSecurityContext: {} + ## Deadline for cassandra schema creation job + activeDeadlineSeconds: 300 + extraEnv: [] + # - name: MODE + # value: prod + # - name: TRACE_TTL + # value: "172800" + # - name: DEPENDENCIES_TTL + # value: "0" + +# For configurable values of the elasticsearch if provisioned, please see: +# https://github.com/elastic/helm-charts/tree/master/elasticsearch#configuration +elasticsearch: {} + +ingester: + enabled: false + podSecurityContext: {} + securityContext: {} + annotations: {} + image: jaegertracing/jaeger-ingester + imagePullSecrets: [] + pullPolicy: IfNotPresent + dnsPolicy: ClusterFirst + cmdlineParams: {} + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 10 + behavior: {} + # targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + service: + annotations: {} + # List of IP ranges that are allowed to access the load balancer (if supported) + loadBalancerSourceRanges: [] + type: ClusterIP + resources: {} + # limits: + # cpu: 1 + # memory: 1Gi + # requests: + # cpu: 500m + # memory: 512Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + nodeSelector: {} + tolerations: [] + affinity: {} + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + extraSecretMounts: [] + extraConfigmapMounts: [] + + serviceMonitor: + enabled: false + additionalLabels: {} + # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + relabelings: [] + # -- ServiceMonitor metric relabel configs to apply to samples before ingestion + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + metricRelabelings: [] + +agent: + podSecurityContext: {} + securityContext: {} + enabled: true + annotations: {} + image: jaegertracing/jaeger-agent + # tag: 1.22 + imagePullSecrets: [] + pullPolicy: IfNotPresent + cmdlineParams: {} + extraEnv: [] + daemonset: + useHostPort: false + updateStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxUnavailable: 1 + service: + annotations: {} + # List of IP ranges that are allowed to access the load balancer (if supported) + loadBalancerSourceRanges: [] + type: ClusterIP + # zipkinThriftPort :accept zipkin.thrift over compact thrift protocol + zipkinThriftPort: 5775 + # compactPort: accept jaeger.thrift over compact thrift protocol + compactPort: 6831 + # binaryPort: accept jaeger.thrift over binary thrift protocol + binaryPort: 6832 + # samplingPort: (HTTP) serve configs, sampling strategies + samplingPort: 5778 + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + extraSecretMounts: [] + # - name: jaeger-tls + # mountPath: /tls + # subPath: "" + # secretName: jaeger-tls + # readOnly: true + extraConfigmapMounts: [] + # - name: jaeger-config + # mountPath: /config + # subPath: "" + # configMap: jaeger-config + # readOnly: true + useHostNetwork: false + dnsPolicy: ClusterFirst + priorityClassName: "" + initContainers: [] + + serviceMonitor: + enabled: false + additionalLabels: {} + # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + relabelings: [] + # -- ServiceMonitor metric relabel configs to apply to samples before ingestion + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + metricRelabelings: [] + +collector: + podSecurityContext: {} + securityContext: {} + enabled: true + annotations: {} + image: jaegertracing/jaeger-collector + # tag: 1.22 + imagePullSecrets: [] + pullPolicy: IfNotPresent + dnsPolicy: ClusterFirst + extraEnv: [] + cmdlineParams: {} + basePath: / + replicaCount: 1 + autoscaling: + enabled: false + minReplicas: 2 + maxReplicas: 10 + behavior: {} + # targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + service: + annotations: {} + # The IP to be used by the load balancer (if supported) + loadBalancerIP: '' + # List of IP ranges that are allowed to access the load balancer (if supported) + loadBalancerSourceRanges: [] + type: ClusterIP + # Cluster IP address to assign to service. Set to None to make service headless + clusterIP: "" + grpc: + port: 14250 + # nodePort: + # httpPort: can accept spans directly from clients in jaeger.thrift format + http: + port: 14268 + # nodePort: + # can accept Zipkin spans in JSON or Thrift + zipkin: {} + # port: 9411 + # nodePort: + otlp: + grpc: {} + # port: 4317 + # nodePort: + http: {} + # port: 4318 + # nodePort: + ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + annotations: {} + labels: {} + # Used to create an Ingress record. + # The 'hosts' variable accepts two formats: + # hosts: + # - chart-example.local + # or: + # hosts: + # - host: chart-example.local + # servicePort: grpc + # annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # labels: + # app: jaeger-collector + # tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + resources: {} + # limits: + # cpu: 1 + # memory: 1Gi + # requests: + # cpu: 500m + # memory: 512Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + extraSecretMounts: [] + # - name: jaeger-tls + # mountPath: /tls + # subPath: "" + # secretName: jaeger-tls + # readOnly: true + extraConfigmapMounts: [] + # - name: jaeger-config + # mountPath: /config + # subPath: "" + # configMap: jaeger-config + # readOnly: true + # samplingConfig: |- + # { + # "service_strategies": [ + # { + # "service": "foo", + # "type": "probabilistic", + # "param": 0.8, + # "operation_strategies": [ + # { + # "operation": "op1", + # "type": "probabilistic", + # "param": 0.2 + # }, + # { + # "operation": "op2", + # "type": "probabilistic", + # "param": 0.4 + # } + # ] + # }, + # { + # "service": "bar", + # "type": "ratelimiting", + # "param": 5 + # } + # ], + # "default_strategy": { + # "type": "probabilistic", + # "param": 1 + # } + # } + priorityClassName: "" + serviceMonitor: + enabled: false + additionalLabels: {} + # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + relabelings: [] + # -- ServiceMonitor metric relabel configs to apply to samples before ingestion + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + metricRelabelings: [] + +query: + enabled: true + basePath: / + oAuthSidecar: + enabled: false + image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.0 + pullPolicy: IfNotPresent + containerPort: 4180 + args: [] + extraEnv: [] + extraConfigmapMounts: [] + extraSecretMounts: [] + # config: |- + # provider = "oidc" + # https_address = ":4180" + # upstreams = ["http://localhost:16686"] + # redirect_url = "https://jaeger-svc-domain/oauth2/callback" + # client_id = "jaeger-query" + # oidc_issuer_url = "https://keycloak-svc-domain/auth/realms/Default" + # cookie_secure = "true" + # email_domains = "*" + # oidc_groups_claim = "groups" + # user_id_claim = "preferred_username" + # skip_provider_button = "true" + podSecurityContext: {} + securityContext: {} + agentSidecar: + enabled: true +# resources: +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 256m +# memory: 128Mi + annotations: {} + image: jaegertracing/jaeger-query + # tag: 1.22 + imagePullSecrets: [] + pullPolicy: IfNotPresent + dnsPolicy: ClusterFirst + cmdlineParams: {} + extraEnv: [] + replicaCount: 1 + service: + annotations: {} + type: ClusterIP + # List of IP ranges that are allowed to access the load balancer (if supported) + loadBalancerSourceRanges: [] + port: 80 + # Specify a custom target port (e.g. port of auth proxy) + # targetPort: 8080 + # Specify a specific node port when type is NodePort + # nodePort: 32500 + ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + annotations: {} + labels: {} + # Used to create an Ingress record. + # hosts: + # - chart-example.local + # annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + # labels: + # app: jaeger-query + # tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + health: + exposed: false + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + annotations: {} + nodeSelector: {} + tolerations: [] + affinity: {} + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + extraConfigmapMounts: [] + # - name: jaeger-config + # mountPath: /config + # subPath: "" + # configMap: jaeger-config + # readOnly: true + extraVolumes: [] + sidecars: [] + ## - name: your-image-name + ## image: your-image + ## ports: + ## - name: portname + ## containerPort: 1234 + priorityClassName: "" + serviceMonitor: + enabled: false + additionalLabels: {} + # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + relabelings: [] + # -- ServiceMonitor metric relabel configs to apply to samples before ingestion + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + metricRelabelings: [] + # config: |- + # { + # "dependencies": { + # "dagMaxNumServices": 200, + # "menuEnabled": true + # }, + # "archiveEnabled": true, + # "tracking": { + # "gaID": "UA-000000-2", + # "trackErrors": true + # } + # } + +spark: + enabled: false + annotations: {} + image: jaegertracing/spark-dependencies + imagePullSecrets: [] + tag: latest + pullPolicy: IfNotPresent + cmdlineParams: {} + extraEnv: [] + schedule: "49 23 * * *" + successfulJobsHistoryLimit: 5 + failedJobsHistoryLimit: 5 + concurrencyPolicy: Forbid + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + nodeSelector: {} + tolerations: [] + affinity: {} + extraSecretMounts: [] + extraConfigmapMounts: [] + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + # ttlSecondsAfterFinished: 120 + +esIndexCleaner: + enabled: false + securityContext: + runAsUser: 1000 + podSecurityContext: + runAsUser: 1000 + annotations: {} + image: jaegertracing/jaeger-es-index-cleaner + imagePullSecrets: [] + pullPolicy: IfNotPresent + cmdlineParams: {} + extraEnv: [] + # - name: ROLLOVER + # value: 'true' + schedule: "55 23 * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + concurrencyPolicy: Forbid + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + numberOfDays: 7 + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + nodeSelector: {} + tolerations: [] + affinity: {} + extraSecretMounts: [] + extraConfigmapMounts: [] + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + # ttlSecondsAfterFinished: 120 + +esRollover: + enabled: false + securityContext: {} + podSecurityContext: + runAsUser: 1000 + annotations: {} + image: jaegertracing/jaeger-es-rollover + imagePullSecrets: [] + tag: latest + pullPolicy: IfNotPresent + cmdlineParams: {} + extraEnv: + - name: CONDITIONS + value: '{"max_age": "1d"}' + schedule: "10 0 * * *" + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + concurrencyPolicy: Forbid + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + nodeSelector: {} + tolerations: [] + affinity: {} + extraSecretMounts: [] + extraConfigmapMounts: [] + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + # ttlSecondsAfterFinished: 120 + initHook: + extraEnv: [] + # - name: SHARDS + # value: "3" + annotations: {} + podAnnotations: {} + podLabels: {} + ttlSecondsAfterFinished: 120 + +esLookback: + enabled: false + securityContext: {} + podSecurityContext: + runAsUser: 1000 + annotations: {} + image: jaegertracing/jaeger-es-rollover + imagePullSecrets: [] + tag: latest + pullPolicy: IfNotPresent + cmdlineParams: {} + extraEnv: + - name: UNIT + value: days + - name: UNIT_COUNT + value: '7' + schedule: '5 0 * * *' + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 3 + concurrencyPolicy: Forbid + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 256m + # memory: 128Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + nodeSelector: {} + tolerations: [] + affinity: {} + extraSecretMounts: [] + extraConfigmapMounts: [] + podAnnotations: {} + ## Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + # ttlSecondsAfterFinished: 120 +# End: Default values for the various components of Jaeger + +hotrod: + enabled: false + podSecurityContext: {} + securityContext: {} + replicaCount: 1 + image: + repository: jaegertracing/example-hotrod + pullPolicy: IfNotPresent + pullSecrets: [] + service: + annotations: {} + name: hotrod + type: ClusterIP + # List of IP ranges that are allowed to access the load balancer (if supported) + loadBalancerSourceRanges: [] + port: 80 + ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Used to create Ingress record (should be used with service.type: ClusterIP). + hosts: + - chart-example.local + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + tls: + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + serviceAccount: + create: true + # Explicitly mounts the API credentials for the Service Account + automountServiceAccountToken: false + name: + nodeSelector: {} + tolerations: [] + affinity: {} + tracing: + host: null + port: 6831 + +# Array with extra yaml objects to install alongside the chart. Values are evaluated as a template. +extraObjects: [] diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/.helmignore b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/CONTRIBUTING.md b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/CONTRIBUTING.md new file mode 100644 index 0000000..e2dd7d2 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/CONTRIBUTING.md @@ -0,0 +1,8 @@ +# Collector Chart Contributing Guide + +## Bumping Default Collector Version + +1. Increase the minor version of the chart by one and set the patch version to zero. +2. Update the chart's `appVersion` to match the new collector version. This version will be used as the image tag by default. +3. Review the corresponding release notes in [Collector Core](https://github.com/open-telemetry/opentelemetry-collector/releases), [Collector Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases), and [Collector Releases](https://github.com/open-telemetry/opentelemetry-collector-releases/releases). If any changes affect the helm charts, adjust the helm chart accordingly. +4. Run `make generate-examples`. \ No newline at end of file diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/Chart.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/Chart.yaml new file mode 100644 index 0000000..19aabf3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/Chart.yaml @@ -0,0 +1,14 @@ +apiVersion: v2 +appVersion: 0.72.0 +description: OpenTelemetry Collector Helm chart for Kubernetes +home: https://opentelemetry.io/ +icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png +maintainers: +- name: dmitryax +- name: TylerHelmuth +name: opentelemetry-collector +sources: +- https://github.com/open-telemetry/opentelemetry-collector +- https://github.com/open-telemetry/opentelemetry-collector-contrib +type: application +version: 0.49.1 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/README.md new file mode 100644 index 0000000..a15197f --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/README.md @@ -0,0 +1,217 @@ +# OpenTelemetry Collector Helm Chart + +The helm chart installs [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) +in kubernetes cluster. + +## Prerequisites + +- Kubernetes 1.23+ +- Helm 3.9+ + +## Installing the Chart + +Add OpenTelemetry Helm repository: + +```console +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +``` + +To install the chart with the release name my-opentelemetry-collector, run the following command: + +```console +helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector +``` + +## Upgrading + +See [UPGRADING.md](UPGRADING.md). + +## Security Considerations + +OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users. +For this reason, by default the chart binds all the Collector's endpoints to the pod's IP. + +More info is available in the [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) + +Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace. + +## Configuration + +### Default configuration + +By default this chart will deploy an OpenTelemetry Collector as daemonset with three pipelines (logs, metrics and traces) +and logging exporter enabled by default. Besides daemonset (agent), it can be also installed as deployment. + +*Example*: Install collector as a deployment, and do not run it as an agent. + +```yaml +mode: deployment +``` + +By default collector has the following receivers enabled: + +- **metrics**: OTLP and prometheus. Prometheus is configured only for scraping collector's own metrics. +- **traces**: OTLP, zipkin and jaeger (thrift and grpc). +- **logs**: OTLP (to enable container logs, see [Configuration for Kubernetes container logs](#configuration-for-kubernetes-container-logs)). + +There are two ways to configure collector pipelines, which can be used together as well. + +### Basic top level configuration + +Default components can be removed with `null`. When changing a pipeline, you must explicitly list all the components that are in the pipeline, including any default components. + +*Example*: Disable metrics and logging pipelines and non-otlp receivers: + +```yaml +config: + receivers: + jaeger: null + prometheus: null + zipkin: null + service: + pipelines: + traces: + receivers: + - otlp + metrics: null + logs: null +``` + +*Example*: Add host metrics receiver: + +```yaml +mode: daemonset + +presets: + hostMetrics: + enabled: true +``` + +### Configuration for Kubernetes container logs + +The collector can be used to collect logs sent to standard output by Kubernetes containers. +This feature is disabled by default. It has the following requirements: + +- It needs agent collector to be deployed. +- It requires the [contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib) version +of the collector image. + +To enable this feature, set the `presets.logsCollection.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: daemonset + +presets: + logsCollection: + enabled: true + includeCollectorLogs: true +``` + +The way this feature works is it adds a `filelog` receiver on the `logs` pipeline. This receiver is preconfigured +to read the files where Kubernetes container runtime writes all containers' console output to. + +#### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion" + +The container logs pipeline uses the `logging` console exporter by default. +Paired with the default `filelog` receiver that receives all containers' console output, +it is easy to accidentally feed the exported logs back into the receiver. + +Also note that using the `--log-level=debug` option for the `logging` exporter causes it to output +multiple lines per single received log, which when looped, would amplify the logs exponentially. + +To prevent the looping, the default configuration of the receiver excludes logs from the collector's containers. + +If you want to include the collector's logs, make sure to replace the `logging` exporter +with an exporter that does not send logs to collector's standard output. + +Here's an example `values.yaml` file that replaces the default `logging` exporter on the `logs` pipeline +with an `otlphttp` exporter that sends the container logs to `https://example.com:55681` endpoint. +It also clears the `filelog` receiver's `exclude` property, for collector logs to be included in the pipeline. + +```yaml +mode: daemonset + +presets: + logsCollection: + enabled: true + includeCollectorLogs: true + +config: + exporters: + otlphttp: + endpoint: https://example.com:55681 + service: + pipelines: + logs: + exporters: + - otlphttp +``` + +### Configuration for Kubernetes attributes processor + +The collector can be configured to add Kubernetes metadata to logs, metrics and traces. + +This feature is disabled by default. It has the following requirements: + +- It requires [k8sattributesprocessor](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/processor/k8sattributesprocessor) processor to be included in the collector, such as [contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib) version of the collector image. + +To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: daemonset +presets: + kubernetesAttributes: + enabled: true +``` + +### Configuration for Kubernetes Cluster Metrics + +The collector can be configured to collects cluster-level metrics from the Kubernetes API server. A single instance of this receiver can be used to monitor a cluster. + +This feature is disabled by default. It has the following requirements: + +- It requires [k8sclusterreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/k8sclusterreceiver) to be included in the collector, such as [contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib) version of the collector image. +- It requires statefulset or deployment mode with a signle replica. + +To enable this feature, set the `presets.clusterMetrics.enabled` property to `true`. + +Here is an example `values.yaml`: + +```yaml +mode: deployment +replicaCount: 1 +presets: + clusterMetrics: + enabled: true +``` + +### Configuration for retrieving Kubelet metrics + +The collector can be configured to collect Kubelet metrics. + +This feature is disabled by default. It has the following requirements: + +- It requires [kubeletstats](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/kubeletstatsreceiver) receiver to be included in the collector, such as [contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib) version of the collector image. + +To enable this feature, set the `presets.kubeletMetrics.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: daemonset +presets: + kubeletMetrics: + enabled: true +``` + +### CRDs + +At this time, Prometheus CRDs are supported but other CRDs are not. + +### Other configuration options + +The [values.yaml](./values.yaml) file contains information about all other configuration +options for this chart. + +For more examples see [Examples](examples). diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/UPGRADING.md b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/UPGRADING.md new file mode 100644 index 0000000..e9e39a3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/UPGRADING.md @@ -0,0 +1,256 @@ +# Upgrade guidelines + +## 0.46.0 to 0.47.0 + +[Update Collector Endpoints to use Pod IP Instead of 0.0.0.0](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/603) + +The [Collector's security guidelines were updated](https://github.com/open-telemetry/opentelemetry-collector/pull/6959) to include containerized environments when discussing safeguards against denial of service attacks. +To be in compliance with the Collector's security best practices the chart has been updated to use the Collector's pod IP in place of `0.0.0.0`. + +The chart will continue to allow complete configuration of the Collector via the `config` field in the values.yaml. If pod IP does not suite your needs you can use `config` to set something different. + +See [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) for more details. + +## 0.40.7 to 0.41.0 + +[Require Kubernetes version 1.23 or later](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/541) + +If you enable use of a _HorizontalPodAutoscaler_ for the collector when running in the "deployment" mode by way of `.Values.autoscaling.enabled`, the manifest now uses the "autoscaling/v2" API group version, which [is available only as recently as Kubernetes version 1.23](https://kubernetes.io/blog/2021/12/07/kubernetes-1-23-release-announcement/#horizontalpodautoscaler-v2-graduates-to-ga). As [all previous versions of this API group are deprecated and removed as of Kubernetes version 1.26](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#horizontalpodautoscaler-v126), we don't offer support for Kubernetes versions older than 1.23. + +## 0.34.0 to 0.34.0 + +[config supports templating](TBD) + +The chart now supports templating in `.Values.config`. If you are currently using any `{{ }}` syntax in `.Values.yaml` it will now be rendered. To escape existing instances of `{{ }}`, use ``` {{` `}} ```. For example, `{{ REDACTED_EMAIL }}` becomes ``` {{` {{ REDACTED_EMAIL }} `}} ```. + +## 0.28.0 to 0.29.0 + +[Reduce requested resources](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/273) + +Resource `limits` have been reduced. Upgrades/installs of chart 0.29.0 will now use fewer resources. In order to set the resources back to what they were, you will need to override the `resources` section in the `values.yaml`. + +*Example*: + +```yaml +resources: + limits: + cpu: 1 + memory: 2Gi +``` + +## 0.23.1 to 0.24.0 + +[Remove containerLogs in favor of presets.logsCollection]() + +The ability to enable logs collection from the collector has been moved from `containerLogs.enabled` to `presets.logsCollection.enabled`. If you are currently using `containerLogs.enabled`, you should instead use the preset: + +```yaml +presets: + logsCollection: + enabled: true +``` + +If you are using `containerLogs.enabled` and also enabling collection of the collector logs you can use `includeCollectorLogs` + +```yaml +presets: + logsCollection: + enabled: true + includeCollectorLogs: true +``` + +You no longer need to update `config.service.pipelines.logs` to include the filelog receiver yourself as the preset will automatically update the logs pipeline to include the filelog receiver. + +The filelog's preset configuration can modified by `config.receivers`, but preset configuration cannot be removed. If you need to remove any filelog receiver configuration generated by the preset you should not use the preset. Instead, configure the filelog receiver manually in `config.receivers` and set any other necessary fields in the values.yaml to modify k8s as needed. + +See the [daemonset-collector-logs example](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-collector/examples/daemonset-collector-logs) to see an example of the preset in action. + +## 0.18.0 to 0.19.0 + +[Remove agentCollector and standaloneCollector settings](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/216) + +The `agentCollector` and `standaloneCollector` config sections have been removed. Upgrades/installs of chart 0.19.0 will fail if `agentCollector` or `standaloneCollector` are in the values.yaml. See the [Migrate to mode](#migrate-to-mode) steps for instructions on how to replace `agentCollector` and `standaloneCollector` with `mode`. + +## 0.13.0 to 0.14.0 + +[Remove two-deployment mode](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/159) + +The ability to install both the agent and standalone collectors simultaneous with the chart has been removed. Installs/upgrades where both `.Values.agentCollector.enabled` and `.Values.standloneCollector.enables` are true will fail. `agentCollector` and `standloneCollector` have also be deprecated, but backward compatibility has been maintained. + +### To run both a deployment and daemonset + +Install a deployment version of the collector. This is done by setting `.Values.mode` to `deployment` + +```yaml +mode: deployment +``` + +Next, install an daemonset version of the collector that is configured to send traffic to the previously installed deployment. This is done by setting `.Values.mode` to `daemonset` and updating `.Values.config` so that data is exported to the deployment. + +```yaml +mode: daemonset + +config: + exporters: + otlp: + endpoint: example-opentelemetry-collector:4317 + tls: + insecure: true + service: + pipelines: + logs: + exporters: + - otlp + - logging + metrics: + exporters: + - otlp + - logging + traces: + exporters: + - otlp + - logging +``` + +See the [daemonset-and-deployment](examples/daemonset-and-deployment) example to see the rendered config. + +### Migrate to `mode`: + +The `agentCollector` and `standaloneCollector` sections in values.yaml have been deprecated. Instead there is a new field, `mode`, that determines if the collector is being installed as a daemonset or deployment. + +```yaml +# Valid values are "daemonset" and "deployment". +# If set, agentCollector and standaloneCollector are ignored. +mode: +``` + +The following fields have also been added to the root-level to replace the depracated `agentCollector` and `standaloneCollector` settings. + +```yaml +containerLogs: + enabled: false + +resources: + limits: + cpu: 1 + memory: 2Gi + +podAnnotations: {} + +podLabels: {} + +# Host networking requested for this pod. Use the host's network namespace. +hostNetwork: false + +# only used with deployment mode +replicaCount: 1 + +annotations: {} +``` + +When using `mode`, these settings should be used instead of their counterparts in `agentCollector` and `standaloneCollector`. + +Set `mode` to `daemonset` if `agentCollector` was being used. Move all `agentCollector` settings to the corresponding root-level setting. If `agentCollector.configOverride` was being used, merge the settings with `.Values.config`. + +Example agentCollector values.yaml: + +```yaml +agentCollector: + resources: + limits: + cpu: 3 + memory: 6Gi + configOverride: + receivers: + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + service: + pipelines: + metrics: + receivers: [otlp, prometheus, hostmetrics] +``` + +Example mode values.yaml: + +```yaml +mode: daemonset + +resources: + limits: + cpu: 3 + memory: 6Gi + +config: + receivers: + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + service: + pipelines: + metrics: + receivers: [otlp, prometheus, hostmetrics] +``` + +Set `mode` to `deployment` if `standaloneCollector` was being used. Move all `standaloneCollector` settings to the corresponding root-level setting. If `standaloneCollector.configOverride` was being used, merge the settings with `.Values.config`. + +Example standaloneCollector values.yaml: + +```yaml +standaloneCollector: + enabled: true + replicaCount: 2 + configOverride: + receivers: + podman_stats: + endpoint: unix://run/podman/podman.sock + timeout: 10s + collection_interval: 10s + service: + pipelines: + metrics: + receivers: [otlp, prometheus, podman_stats] +``` + +Example mode values.yaml: + +```yaml +mode: deployment + +replicaCount: 2 + +config: + receivers: + receivers: + podman_stats: + endpoint: unix://run/podman/podman.sock + timeout: 10s + collection_interval: 10s + service: + pipelines: + metrics: + receivers: [otlp, prometheus, podman_stats] +``` + +Default configuration in `.Values.config` can now be removed with `null`. When changing a pipeline, you must explicitly list all the components that are in the pipeline, including any default components. + +*Example*: Disable metrics and logging pipelines and non-otlp receivers: + +```yaml +config: + receivers: + jaeger: null + prometheus: null + zipkin: null + service: + pipelines: + traces: + receivers: + - otlp + metrics: null + logs: null +``` diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/clusterrole-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/clusterrole-values.yaml new file mode 100644 index 0000000..ac39cc6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/clusterrole-values.yaml @@ -0,0 +1,20 @@ +mode: daemonset +clusterRole: + create: true + name: "testing-clusterrole" + rules: + - apiGroups: + - '' + resources: + - 'pods' + - 'nodes' + verbs: + - 'get' + - 'list' + - 'watch' + clusterRoleBinding: + name: "testing-clusterrolebinding" +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/config-override-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/config-override-values.yaml new file mode 100644 index 0000000..ff0edfd --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/config-override-values.yaml @@ -0,0 +1,19 @@ +mode: daemonset +config: + receivers: + jaeger: null + otlp: null + zipkin: null + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + service: + pipelines: + metrics: + receivers: + - prometheus + - hostmetrics + traces: null + logs: null diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/daemonset-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/daemonset-values.yaml new file mode 100644 index 0000000..1ccac8b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/daemonset-values.yaml @@ -0,0 +1,5 @@ +mode: daemonset +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/deployment-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/deployment-values.yaml new file mode 100644 index 0000000..c20e347 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/deployment-values.yaml @@ -0,0 +1,5 @@ +mode: deployment +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/disabling-protocols-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/disabling-protocols-values.yaml new file mode 100644 index 0000000..62cbb30 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/disabling-protocols-values.yaml @@ -0,0 +1,14 @@ +mode: deployment +ports: + jaeger-compact: + enabled: false + jaeger-thrift: + enabled: false + jaeger-grpc: + enabled: false + zipkin: + enabled: false +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/multiple-ingress-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/multiple-ingress-values.yaml new file mode 100644 index 0000000..1972b45 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/multiple-ingress-values.yaml @@ -0,0 +1,43 @@ +mode: deployment + +resources: + limits: + cpu: 100m + memory: 200M + +ingress: + enabled: true + + ingressClassName: nginx + annotations: + test.io/collector: default + hosts: + - host: defaultcollector.example.com + paths: + - path: / + pathType: Prefix + port: 4318 + + additionalIngresses: + - name: additional-basic + hosts: + - host: additional-basic.example.com + paths: + - path: / + pathType: Prefix + port: 4318 + + - name: additional-advanced + ingressClassName: nginx + annotations: + test.io/ingress: additional-advanced + hosts: + - host: additional-advanced.example.com + paths: + - path: / + pathType: Exact + port: 4318 + tls: + - secretName: somesecret + hosts: + - additional-advanced.example.com diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-clustermetrics-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-clustermetrics-values.yaml new file mode 100644 index 0000000..503e832 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-clustermetrics-values.yaml @@ -0,0 +1,10 @@ +mode: deployment + +presets: + clusterMetrics: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-hostmetrics-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-hostmetrics-values.yaml new file mode 100644 index 0000000..62567e4 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-hostmetrics-values.yaml @@ -0,0 +1,10 @@ +mode: daemonset + +presets: + hostMetrics: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-kubeletmetrics-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-kubeletmetrics-values.yaml new file mode 100644 index 0000000..8c97ffc --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-kubeletmetrics-values.yaml @@ -0,0 +1,10 @@ +mode: daemonset + +presets: + kubeletMetrics: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-kubernetesattributes-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-kubernetesattributes-values.yaml new file mode 100644 index 0000000..4f895c9 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-kubernetesattributes-values.yaml @@ -0,0 +1,10 @@ +mode: daemonset + +presets: + kubernetesAttributes: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-logscollection-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-logscollection-values.yaml new file mode 100644 index 0000000..737b291 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/preset-logscollection-values.yaml @@ -0,0 +1,11 @@ +mode: daemonset + +presets: + logsCollection: + enabled: true + includeCollectorLogs: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/statefulset-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/statefulset-values.yaml new file mode 100644 index 0000000..f4b2942 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/ci/statefulset-values.yaml @@ -0,0 +1,6 @@ +mode: statefulset +replicaCount: 2 +resources: + limits: + cpu: 100m + memory: 200M diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/README.md b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/README.md new file mode 100644 index 0000000..090dcf3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/README.md @@ -0,0 +1,12 @@ +# Examples of chart configuration + +Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts. + +- [Daemonset only](daemonset-only) +- [Deployment only](deployment-only) +- [Daemonset and deployment](daemonset-and-deployment) +- [Log collection, including collector logs](daemonset-collector-logs) +- [Add component (hostmetrics)](daemonset-hostmetrics) + +The manifests are rendered using the `helm template` command and the specific example folder's values.yaml. + diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/daemonset-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/daemonset-values.yaml new file mode 100644 index 0000000..92772e1 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/daemonset-values.yaml @@ -0,0 +1,28 @@ +mode: daemonset + +config: + exporters: + otlp: + endpoint: example-opentelemetry-collector:4317 + tls: + insecure: true + service: + pipelines: + logs: + exporters: + - otlp + - logging + metrics: + exporters: + - otlp + - logging + traces: + exporters: + - otlp + - logging + +resources: + limits: + cpu: 100m + memory: 200M + diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/deployment-values.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/deployment-values.yaml new file mode 100644 index 0000000..4618a2e --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/deployment-values.yaml @@ -0,0 +1,7 @@ +mode: deployment + +resources: + limits: + cpu: 100m + memory: 200M + diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml new file mode 100644 index 0000000..9b58581 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml @@ -0,0 +1,93 @@ +--- +# Source: opentelemetry-collector/templates/configmap-agent.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-agent + labels: + helm.sh/chart: opentelemetry-collector-0.49.1 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.72.0" + app.kubernetes.io/managed-by: Helm +data: + relay: | + exporters: + logging: {} + otlp: + endpoint: example-opentelemetry-collector:4317 + tls: + insecure: true + extensions: + health_check: {} + memory_ballast: + size_in_percentage: 40 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${MY_POD_IP}:14250 + thrift_compact: + endpoint: ${MY_POD_IP}:6831 + thrift_http: + endpoint: ${MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${MY_POD_IP}:4317 + http: + endpoint: ${MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${MY_POD_IP}:8888 + zipkin: + endpoint: ${MY_POD_IP}:9411 + service: + extensions: + - health_check + - memory_ballast + pipelines: + logs: + exporters: + - otlp + - logging + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - otlp + - logging + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - otlp + - logging + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${MY_POD_IP}:8888 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml new file mode 100644 index 0000000..fbba533 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.49.1 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.72.0" + app.kubernetes.io/managed-by: Helm +data: + relay: | + exporters: + logging: {} + extensions: + health_check: {} + memory_ballast: + size_in_percentage: 40 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${MY_POD_IP}:14250 + thrift_compact: + endpoint: ${MY_POD_IP}:6831 + thrift_http: + endpoint: ${MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${MY_POD_IP}:4317 + http: + endpoint: ${MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${MY_POD_IP}:8888 + zipkin: + endpoint: ${MY_POD_IP}:9411 + service: + extensions: + - health_check + - memory_ballast + pipelines: + logs: + exporters: + - logging + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - logging + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - logging + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${MY_POD_IP}:8888 diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml new file mode 100644 index 0000000..476bda3 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml @@ -0,0 +1,98 @@ +--- +# Source: opentelemetry-collector/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-collector-agent + labels: + helm.sh/chart: opentelemetry-collector-0.49.1 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.72.0" + app.kubernetes.io/managed-by: Helm +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 69408c31e98cdd67127e6c2a52fd93a99249d46080dc7457741ed2444e3b255f + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-contrib + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-contrib:0.72.0" + imagePullPolicy: IfNotPresent + ports: + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + hostPort: 6831 + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + hostPort: 14250 + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + hostPort: 14268 + - name: otlp + containerPort: 4317 + protocol: TCP + hostPort: 4317 + - name: otlp-http + containerPort: 4318 + protocol: TCP + hostPort: 4318 + - name: zipkin + containerPort: 9411 + protocol: TCP + hostPort: 9411 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-agent + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml new file mode 100644 index 0000000..6f57f5b --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml @@ -0,0 +1,94 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.49.1 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.72.0" + app.kubernetes.io/managed-by: Helm +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 6d757498456339ef2fa85e278c05813d1a80c6b0e4e07e9716e8bee86d4ddc9d + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-contrib + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-contrib:0.72.0" + imagePullPolicy: IfNotPresent + ports: + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml new file mode 100644 index 0000000..1b72f23 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml @@ -0,0 +1,46 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.49.1 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.72.0" + app.kubernetes.io/managed-by: Helm + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml new file mode 100644 index 0000000..5222e98 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml @@ -0,0 +1,12 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.49.1 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.72.0" + app.kubernetes.io/managed-by: Helm diff --git a/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-collector-logs/rendered/configmap-agent.yaml b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-collector-logs/rendered/configmap-agent.yaml new file mode 100644 index 0000000..6ddd6b6 --- /dev/null +++ b/sreplayground-app/charts/opentelemetry-demo/charts/opentelemetry-collector/examples/daemonset-collector-logs/rendered/configmap-agent.yaml @@ -0,0 +1,150 @@ +--- +# Source: opentelemetry-collector/templates/configmap-agent.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-agent + labels: + helm.sh/chart: opentelemetry-collector-0.49.1 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.72.0" + app.kubernetes.io/managed-by: Helm +data: + relay: | + exporters: + logging: {} + extensions: + health_check: {} + memory_ballast: + size_in_percentage: 40 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + filelog: + exclude: [] + include: + - /var/log/pods/*/*/*.log + include_file_name: false + include_file_path: true + operators: + - id: get-format + routes: + - expr: body matches "^\\{" + output: parser-docker + - expr: body matches "^[^ Z]+ " + output: parser-crio + - expr: body matches "^[^ Z]+Z" + output: parser-containerd + type: router + - id: parser-crio + output: extract_metadata_from_filepath + regex: ^(?P