From 647d051ccbe9de55786c90c8ba3dc961fe09e168 Mon Sep 17 00:00:00 2001 From: Kiran Babu Date: Tue, 28 May 2024 18:16:50 +0530 Subject: [PATCH] Initial hdp official helm chart project --- charts/hybriddatapipeline/Chart.yaml | 31 +++ charts/hybriddatapipeline/ReadMe.txt | 92 +++++++++ .../config/hdpdeploy.properties | 189 ++++++++++++++++++ .../secrets/hdp-secrets.yaml | 11 + .../secrets/postgres-secrets.yaml | 12 ++ charts/hybriddatapipeline/templates/NOTES.txt | 17 ++ .../hybriddatapipeline/templates/_helpers.tpl | 82 ++++++++ .../templates/configmap.yaml | 7 + .../templates/haproxy-ingress.yaml | 48 +++++ .../templates/hdp-service.yaml | 28 +++ .../templates/notification-service.yaml | 25 +++ .../templates/opa-service.yaml | 25 +++ .../templates/postgres-init-sql.yaml | 24 +++ charts/hybriddatapipeline/templates/pvc.yaml | 13 ++ .../templates/statefulset.yaml | 103 ++++++++++ charts/hybriddatapipeline/values.yaml | 172 ++++++++++++++++ 16 files changed, 879 insertions(+) create mode 100644 charts/hybriddatapipeline/Chart.yaml create mode 100644 charts/hybriddatapipeline/ReadMe.txt create mode 100644 charts/hybriddatapipeline/config/hdpdeploy.properties create mode 100644 charts/hybriddatapipeline/secrets/hdp-secrets.yaml create mode 100644 charts/hybriddatapipeline/secrets/postgres-secrets.yaml create mode 100644 charts/hybriddatapipeline/templates/NOTES.txt create mode 100644 charts/hybriddatapipeline/templates/_helpers.tpl create mode 100644 charts/hybriddatapipeline/templates/configmap.yaml create mode 100644 charts/hybriddatapipeline/templates/haproxy-ingress.yaml create mode 100644 charts/hybriddatapipeline/templates/hdp-service.yaml create mode 100644 charts/hybriddatapipeline/templates/notification-service.yaml create mode 100644 charts/hybriddatapipeline/templates/opa-service.yaml create mode 100644 charts/hybriddatapipeline/templates/postgres-init-sql.yaml create mode 100644 charts/hybriddatapipeline/templates/pvc.yaml create mode 100644 charts/hybriddatapipeline/templates/statefulset.yaml create mode 100644 charts/hybriddatapipeline/values.yaml diff --git a/charts/hybriddatapipeline/Chart.yaml b/charts/hybriddatapipeline/Chart.yaml new file mode 100644 index 0000000..d24e09f --- /dev/null +++ b/charts/hybriddatapipeline/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + category: DataConnectivity + +apiVersion: v2 + +name: hybriddatapipeline + +description: Progress® DataDirect® Hybrid Data Pipeline is a light-weight software service that provides simple, secure access to cloud and on-premises data for business intelligence tools and applications. Client applications can use ODBC, JDBC, or OData to access data from a broad range of data stores. Requests from client applications are translated into the format supported by the underlying data store – SQL, NoSQL, Big Data, cloud – and returned in the format accepted by the client. Communications in HTTP and HTTPS are supported. In addition to supporting connectivity to a wide range of relational and non-relational database management systems, Hybrid Data Pipeline supports connectivity to REST sources with a built-in Autonomous REST Connector. Moreover, third-party connectors can be integrated with Hybrid Data Pipeline, expanding the range of data stores to which the service can connect. + +type: application + +appVersion: "4.6.1" + +icon: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOEAAADhCAMAAAAJbSJIAAAAmVBMVEX///9c4wD///1P4gD2/u5W4gCe7XpR4wD//f/G87L9///+//xe4wH///tZ5QBb5QBB4QDC9abT+MCs8Yn1/en4/PeI6ktg4QPf+MxQ4ADr/OLj+dTZ+MOM61po5gCY7Wy68phy5i3N9att5yOW7Was8oF/6UKi63G88Zx+5yed73Ww8Y/P9LR76jyY7mme62zq+tmv74mL6E/dky40AAAJZ0lEQVR4nO2da3faOBCGJQtFjixLkEAcc0tCbt0mzSb9/z9uZwRtARuwTfGFnedTT5ty9EYjzUXSwBhBEARBEARBEARBEARBEARBEARBEARBEARBEARB/A8JdBDp1Z+1Dhody2nQDGVNZ/P53YSxKGp6PH+fAPSNfwrFpUr/7TU9mlMQsdFFaq3hxlip7psezl8kBm2awQQOjOIrjBHzKzDc2P9D9wlAI7t9EPw31nGV3oyXptt5IhQxnPWV+aMwAWPlUlyMYBqbHt/xgMD4WiljjFvTiKbqBB80Pbpj0QF6iAVPeT5WPE5ZyEJwlB31Hqhv8iDkDoE4j+lsyFhX9SHDJ2F36uPOOCfFM65FffCj2sk9LEBn9mg0hsNyXLDueY0IDe99rnZrW0c83LLVrtsVAnB0kxeBO2ghiVI8DTu1GgPNRq9CQoS2x0DXAD+i5H23XOO9VNZYmMFCcwi/CNBorpoe9WEiFqJ9suk8dYUmbxPx49N/TIsnExx8qNn4Bg20EuKjx+KwaRl7iJY5knLg6KoItE6pb02L2E/EBlzB7lJw/W1jEuPUvM3LMdL/9MGJV5q/3yT9m08s5bQ074i+REEvv3MewTumrz1f2GlaTZYgOhSJFpHoIFi1kFeFrVQYgHVtpPOlcdyXcjCvanEUt3AYcPPkmKm04maI4V9LS6ujZ8EtP2rHSZxMn8FU40i3M14d/qzs9lcKcRrNAlZj1ML1GOs4YNPHY5bj0qdKBXlVCwV6sELKpXSmaAaVj0xnPXS0TcvJQWMK9V1w547acOCXpN6CVhZWwzgG+xq/HLkcEWHem1aTRxT6aWRX8+MiAIMuUvyYNK1nDwFkw5hrbFeESyHFxyjEA7oWWisMafSvL5nKY/yjUfIeP62NyaOOAjb5AaZ63I7DnXi8amXVEQ0LK4uJOsZKV5XVl8+m1UCIpXfUWaI3If2Z0zHTKMVr3Zo20Zi47rQjyKuO0oeRnOnXqSdLuNSXuxtEsT+lOcpz2IQ3rBCygM/B/WXuPQTvHNmC7z6IOgzYuKhZ0hoBSug9CaFS9ca82kzW409LwXM4U3lBNqkwYuEb+j0Yez95z93XA7w0NJ6lsvqO06BCiM+cWFoSuAXh62XbPwERNP7l9HHXoXCrFUKMnTjufR6EaE6J1+xyRIUhzu2AV63INaBQ6yiGPOkuc5Yt5TdM7HTuJTbIq+wqqm69QgbpPPsmM3MCu4lIpv7KTNZaYQ8a3whry2fHDSiMcF318yIy4yxeCwqDTIyzdJvTRJTfcZqYQzxqyrE250NJKZ5H2f8SwSxCPA55Ven0uEaFAYPlF2PdcH9WpMwAI1UMv7Pxau+7kOWinDoV+tW14Co5NA3iYYo3a3L9I572lzqnqlEhnircPmKlaf9ZEyT3SsyGeC84W9kNdMgWRpXYUWtUqNlwBjuFTNz+vM84WI5KXeelVJpFWCf8UsWD1ToUBmCf6AKu99zmyqLcYnkomPeRvZnAQK6ItdahMPbX1S5hzKW8tRQPk6XzzOP2IbVtUYi3EbDu4kzJuoux6VMvfw7RVAdOFdlVa1AIEzj0IVrJ6qA1lkt1nzuHARZz4kJmX4vCL/CA1apKsOXMfV6VE6pCPjmeHd5Ua7HS97nglW8jKPEy8ZtVViILe4cLOXUohF//W4ntfR1jE2OkeB3l5McaHCwoPLS467BSCNZY7ymtohFN23GrZM61oAC2WVB4yDZq8viw993+ELzywaAT8+nysuVmnNMOK0WCGKKtSwPbu61UqsecA/IqjbfYtxS2Zw5hHQXXqap2uwuvW/rlGLDtOWyJQl9QAscPkWmlLcf4Wo7KPLdokcIVGrMLhxfZqk2leLxtuUL0HAsrbcUAgDspW65Q+xvd12nVAMBI2wGFvmzW55VOXCC0bbnC3/jbQRUch+RdUQjJj1QVLLUzCoMwWFWxz1Vh5BPb8UvpM97OKPzFFeRVvExxo3MKmb5XpQ6VOqcQIoDeR5mLbJ1T6OPoSYlr3t1TyHzyvjDKmIQXCOU6qRB31ei5YLGqkwqXKd9wlhZZjp1UGAVBFEb+1Oaw2+ikwj8MYDkeGHC3FWoWXRw4Pe22Qjz2ZeOf+wOATivEOj0sx7u9hZyOK2S4sV6QwnVIISmsh42nZmepcONe0FkqDNcfmp2lwqv197tnpVCHWOPvzfqXa395Xgp1xOLr1Kbnq9Dn9tyos1W4vEVk3DkqjLHBU++jvxwCKDyznSaALcbf9l1etThDhViWuZpL51ZNoc5PYcA+X/BMJlk92j4fhb75KGyg2NRrfQhnpFD7Z3h43X5jVOejkIU6YFePqd0a0/koBH3jG+HP0jZKv2eiED1g/Jx7XeiUCp01NSnEFHBg828tn1KhcUbVo9DfPN/R0uOkCrlM69HXm+FQ8gdzUivllm+P5QTo0ZeQu6+WnnankRc1CAwe+/uulZxUoZQ5L+FOgG/d2YRC4eroGoGtt3p36c5pPJ3COrvxL6PtZe+4YxQaU+xuIj6JcuKpzm9UwGYlxr8czYyolELuCiq06UOtXYaWjfDuU8mzPrGUwoJWasXq5VudGjFuG36I7DF2KYUqKaLQNzXPfUp0eiYP+IatUvbkF7C83vi4rEKDEzgbh3nPiOpA+4fAtlJugf/Lqk33llGYJA67mYfN9WvDR3XP6cZZfWErNbAKt3aPbYXOKjnwjRmaai2oQ3zWg00gK1ip7D9HbNP6thXKFF/zR2HzbQWX17yNLKrQtyL4Oc58zi+FWNVyxuHLodZ0LxtYufr1F1HoeP9hmtPd5ZdCWKOO+9dfbenPhhn/95WpFlHo3wPlfBXCL4WwCfkXfGHQmu6s2OZqfOPfJB5W6N906dXl8A3+rMP+XQ89YKta7MKecQXL0ajFboXYmseIl+wCXOHfHyZt6DyXD0zJNyXE/rMnMd/TqdPPofPNPNs0eSsC39xjdNe/XLursK3QJ0C7vTfOoUq/cHdpYdNZjb2DYVyTqd5lpRJbDuxbXD0h+089306qLTvMIVYK3fJh7Mvtgei51291J9Y8VgqNhfiZv2Pn6v0KF3UN7K/xS6GT6VfgH9futb4WLr5DrBRKbMET+i/qPPDjccdEgp4LafC5bxu/5+BvEMTsQkl7yZrJz2sA4pwLbGfWXH53asC3D8b4xbFBvVUkgiAIgiAIgiAIgiAIgiAIgiAIgiAIgiAIgiAIgiAa4z93MIewqptIzgAAAABJRU5ErkJggg== + +sources: + - https://github.com/progress/hdp-kubernetes-preview + - https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-46 + +dependencies: + - name: postgresql + repository: https://charts.bitnami.com/bitnami + version: "13.2.30" + condition: postgres.enabled + - name: kubernetes-ingress + alias: kubernetesingress + repository: https://haproxytech.github.io/helm-charts/ + version: "1.38.2" + condition: haproxy.kubernetesIngress.enabled + +version: 0.1.0 diff --git a/charts/hybriddatapipeline/ReadMe.txt b/charts/hybriddatapipeline/ReadMe.txt new file mode 100644 index 0000000..b0a499f --- /dev/null +++ b/charts/hybriddatapipeline/ReadMe.txt @@ -0,0 +1,92 @@ +# Hybrid Data Pipeline Kubernetes Helm Chart + +This repository contains a Helm Chart that can be used to deploy Hybrid Data Pipeline on a Kubernetes cluster. Below is a brief description of how to easily create a Hybrid Data Pipeline StatefulSet for development and testing. + +## Getting Started + +### Prerequisites + +[Helm](https://helm.sh/docs/intro/install/), [Kubectl](https://kubernetes.io/docs/tasks/tools/), [AzureCLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) must be installed locally in order to use this chart. + +In this release, the Helm chart is supported in Azure Kubernetes Service. + +### Kubernetes Version + +This Helm-chart currently support Kubernetes 1.27 or later. + +### Installing Hybrid Data Pipeline Helm Chart + +This below example Helm Chart installation will create a two-node Hybrid Data Pipeline cluster with a "Default" group. A 1GB persistent volume, 2 vCPUs, and 4GB of RAM will be allocated for the pod. + +1. Add Hybrid Data Pipeline Repo to Helm: +``` +helm repo add hybriddatapipeline https://github.com/progress/hdp-kubernetes-preview +``` +2. Create a Kubernetes namespace: +``` +kubectl create namespace hybriddatapipeline +``` +3. The Hybrid Data Pipeline Helm Chart relies on two sub-charts, namely PostgreSQL and HAProxy. + +To download the dependencies of the chart, execute: +``` +helm dependency build +``` +This action will retrieve the chart to /charts. + +4. When installing the Helm Chart, the secrets for PostgreSQL and Hybrid Data Pipeline are required to be created. To create secrets: +``` +kubectl create secret secrets/hdp-secrets.yaml +kubectl create secret secrets/postgres-secrets.yaml +``` + +5. Adjust the settings in the values.yaml file to create a two-node Hybrid Data Pipeline cluster with a minimum resource allocation of 2 vCPUs, 8 GB RAM, and 100 GB storage for Hybrid Data Pipeline Server. For detailed guidance, refer to the Hybrid Data Pipeline Product Requirements Documentation. + +Utilize the latest Hybrid Data Pipeline Server Docker image for the new implementation as specified in the values.yaml file. To access the most recent image available, consult Progress ESD. + +If necessary, push the Hybrid Data Pipeline Server Docker image to a container registry and update the image.repository and image.tag values accordingly. + +Should you desire HAProxy Load Balancer service for serving Hybrid Data Pipeline Server pod containers, perform these actions: + Specify an FQDN name in hdp.loadbalancer.hostName. + Set haproxy.kubernetesIngress.enabled to true (default is true). + +``` +## Progress DataDirect Hybrid Data Pipeline Server parameters +hdp: + + # Number of HDP nodes + replicaCount: 2 + + ## Progress DataDirect Hybrid Data Pipeline Server image parameters + image: + repository: + tag: + pullPolicy: IfNotPresent + + ## Progress DataDirect Hybrid Data Pipeline Server Container persistence parameters + persistence: + mountPath: /hdpshare + size: 1Gi + storageClassName: azurefile-csi + + ## Progress DataDirect Hybrid Data Pipeline Server Container resources parameters + resources: + requests: + memory: "4096Mi" + cpu: "2000m" + limits: + memory: "8096Mi" + cpu: "4000m" + + ## Progress DataDirect Hybrid Data Pipeline Server License parameters + licenseKey: + + ## Progress DataDirect Hybrid Data Pipeline Server Container load balancer parameters + loadbalancer: + hostName: +``` +6. Install the Hybrid Data Pipeline Helm Chart with the above custom settings. +``` +helm install my-release hdp-kubernetes-preview/hybriddatapipeline --values values.yaml --namespace=hybriddatapipeline +``` +Once the installation is complete and the pod is in a running state, the Hybrid Data Pipeline can be accessed using hostname as configured for the hdp.loadbalancer.hostName in values.yaml diff --git a/charts/hybriddatapipeline/config/hdpdeploy.properties b/charts/hybriddatapipeline/config/hdpdeploy.properties new file mode 100644 index 0000000..918e43d --- /dev/null +++ b/charts/hybriddatapipeline/config/hdpdeploy.properties @@ -0,0 +1,189 @@ +; This properties file is used to configure a Docker deployment of Hybrid Data +; Pipeline (HDP). Specify values for each property to suit your environment. A +; number of values can be set using environment variables. Environment variables +; must be specified in the Docker run command used to deploy the service. For +; example: +; docker run -dt -p 8080:8080 -p 8443:8443 -e "ACCEPT_EULA=true" +; -v /opt/hdpshare:/hdpshare --hostname hdp1 --name hdp1 +; --network hdp-net %HDP_DOCKER_IMAGE% +; -e HDP_USER_PASSWORD=MyHDPUserPassword +; -e HDP_ADMIN_PASSWORD=MyHDPAdminPassword +; The names of supported environment variables match the names of the properties +; in this file. For more information about using environment variables, refer to +; "Using environment variables for server and system database credentials": +; https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-installation-46/page/Using-environment-variables-for-server-and-system-database-credentials.html + +; HDP server hostname +; ------------------- +; The fully qualified hostname of the HDP server. In a Docker deployment, the +; hostname is the name of the Docker container host. If a hostname is not +; supplied, the Docker image will attempt to detect it. +HDP_SERVER={{ .Release.Name }}-{{ .Values.hdp.services.hdpService.name }} + +; HDP SSL certificate +; ------------------- +; The name of the SSL certificate file used by the HDP server to establish SSL +; connections with ODBC and JDBC client applications. The certificate file must +; be placed in the shared volume. (NOTE: The default location of the shared +; volume for each demo is C:\hdpshare.) The value provided depends on whether +; you are deploying HDP as a cluster or as a standalone node. +; For cluster deployments, the specified file must be the root certificate used +; to sign the certificate for the load balancer server. PEM, DER, and Base64 +; encodings are supported. +; For standalone deployments, you have two options. +; (1) Leave blank to use the self-signed certificate included with the +; deployment. +; (2) Specify the SSL certificate in PEM file format. The PEM file must consist +; of a private key, a public key certificate issued by a certificate authority +; (CA), and additional certificates that make up the trust chain. +; For more information about creating a PEM file, refer to "The PEM file": +; https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-installation-46/page/The-PEM-file.html. +HDP_SERVER_CERT_FILE= + +; System database properties +; ========================== +; An external system database is required for this Docker deployment of HDP. +; For supported databases and requirements, refer to "External system +; databases": +; https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-installation-46/page/External-system-databases.html. +; The following properties are used to configure the external database. Several +; properties are specific to the database vendor. + +; Required. The hostname of the system database. +; NOTE: If you are locally deploying the system database as a Docker container, +; the hostname does not resolve correctly if set to 'localhost'. Instead, you +; must use use the Docker-specific lookup name 'host.docker.internal'. +HDP_DATABASE_HOSTNAME={{ .Release.Name }}-{{.Values.hdp.database.hostName}} + +; Required. The port of the system database. +HDP_DATABASE_PORT={{.Values.hdp.database.port}} + +; Required for PostgreSQL and MySQL databases. The name of the system database. +HDP_DATABASE_NAME={{.Values.hdp.database.name}} + +; Required. The name of the system database vendor. +; Valid values include: oracle | mssqlserver | postgresql | mysqlcommunity +HDP_DATABASE_VENDOR=postgresql + +; Required if you are using a MySQL Community Edition system database. The +; MySQL Connector/J jar file must be placed in the hdpshare volume, and the name +; of the jar file must be specified with this property. +HDP_MYSQL_DRIVER= + +; Required if you are using an Oracle system database with the Oracle System +; Identifier (SID). Specify the SID for this property. Otherwise, leave this +; property blank. +HDP_ORACLE_SID= + +; Required if you are using an Oracle system database with the service name. For +; this property, provide the database service name that specifies the database +; that is used for the connection. The service name is a string that is the +; global database name. The global database name typically comprises the +; database name and domain name. Otherwise, leave this property blank. +HDP_ORACLE_SERVICE_NAME= + +; Required if you are using a SQL Server system database. Provide the name of +; the SQL Server database schema. +HDP_SQLSERVER_SCHEMA= + +; Additional connection parameters may be included in the database connection +; URL. For supported parameters, refer to the data store topic associated with +; your system database in the HDP user's guide: +; https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-46/page/Supported-data-stores.html +; Parameters should be provided as a list of key-value pairs. For a MySQL +; Community database, the key-value pairs should be separated by an ampersand. +; For other databases, the key-value pairs should be separated by a semicolon. +HDP_DATABASE_ADVANCED_OPTIONS={{.Values.hdp.database.advancedOptions}} + +; Required but may be set as an environment variable. The name of the database +; user without administrator privileges. Required privileges depend on the type +; of database you are using. For details on required privileges, refer to +; "External system databases": +; https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-installation-46/page/External-system-databases.html. +HDP_DATABASE_USER={{ include "hybriddatapipeline.secretValue" (list "default" "hdp-secrets" "hdpDbUser") | trim }} + +; Required but may be set as an environment variable. The password of the +; database user without administrator privileges. +HDP_DATABASE_USER_PASSWORD={{ include "hybriddatapipeline.secretValue" (list "default" "hdp-secrets" "hdpDbUserPwd") | trim }} + +; Required but may be set as an environment variable. The name of the database +; user with administrator privileges. Required privileges depend on the type of +; database you are using. For details on required privileges, refer to "External +; system databases": +; https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-installation-46/page/External-system-databases.html. +HDP_DATABASE_ADMIN={{ include "hybriddatapipeline.secretValue" (list "default" "hdp-secrets" "hdpDbAdmin") | trim }} + +; Required but may be set as an environment variable. The password of the +; database user with administrator privileges. +HDP_DATABASE_ADMIN_PASSWORD={{ include "hybriddatapipeline.secretValue" (list "default" "hdp-secrets" "hdpDbAdminPwd") | trim }} + +; On-Premises Connector +; --------------------- +; Specify 'yes' if you plan to deploy the On-Premises Connector. +; Specify 'no' if you are not deploying the On-Premises Connector. +; If 'yes' is specified, the HDP server uses the On-Premises Access Port (40501) +; and the Notification Server Port (11443) to communicate with the On-Premises +; Connector. These ports must be made available across the firewall. The default +; value is 'yes'. +HDP_ENABLE_ONPREMISE_CONNECTOR=yes + +; HDP Logon credentials +; ===================== +; HDP provides the default d2cadmin and d2cuser accounts. You must specify +; passwords for each of these accounts with the following properties. + +; Required but may be set as an environment variable. The password for the +; default administrator account. +HDP_ADMIN_PASSWORD={{ include "hybriddatapipeline.secretValue" (list "default" "hdp-secrets" "hdpAdminPwd") | trim }} + +; Required but may be set as an environment variable. The password for the +; default user account. +HDP_USER_PASSWORD={{ include "hybriddatapipeline.secretValue" (list "default" "hdp-secrets" "hdpUserPwd") | trim }} + +; Load balancer +; ------------- +; Required. Indicates whether or not a load balancer is being used. +; Valid values: none | tunnel | websocket +; * 'none' should be specified for standalone deployments. +; * 'tunnel' should be specified for load balancer routing using tunneling. +; Tunneling is generally used for network load balancing. +; * 'websocket' should be specified for load balancer routing using the +; WebSocket protocol. The WebSocket protocol is generally used for load +; balancing when using a hosted service such as AWS or Azure. +HDP_LOAD_BALANCER_TYPE=cloud + +; Load balancer name +; ------------------ +; Required if a load balancer is being used. The hostname or IP address of the +; machine where the load balancer is running. +HDP_LOAD_BALANCER_HOST_NAME={{ .Values.hdp.loadbalancer.hostName }} + +; The name of the SSL certificate file used by the load balancer to establish SSL +; connections with ODBC and JDBC client applications. The certificate file must be +; placed in the shared volume. This must be the root certificate used in the +; wildcard certificate chain. +; This certificate must be provided in a Base64 encoding, such as a +; PEM or DER file format. +; For more information about creating a PEM file, refer to "The PEM file": +; https://docs.progress.com/bundle/datadirect-hybrid-data-pipeline-installation-46/page/The-PEM-file.html. +HDP_LOAD_BALANCER_CERT_FILE={{ .Values.haproxy.tls.certFileName }} + +; License key +; ----------- +; Leave blank to deploy an evaluation version of the product. +; Provide a valid license key to deploy a licensed version of the product. +HDP_LICENSE_KEY={{ .Values.hdp.licenseKey }} + +; JVM heap size +; ============= +; JVM heap size properties may be specified if you need to limit the memory +; allocated to HDP nodes. Specifying heap size may be particularly useful when +; running multiple Docker nodes on a single host machine. + +; The initial JVM heap size in megabytes. The default is 4096 megabytes. +HDP_HEAP_INITIAL={{- .Values.hdp.resources.requests.memory | trunc 63 | trimSuffix "Mi" }} + +; The maximum JVM heap size in megabytes. This value cannot be less than the +; initial JVM heap size. The default value is the value specified for the +; HDP_HEAP_INITIAL property. +HDP_HEAP_MAX={{- .Values.hdp.resources.limits.memory | trunc 63 | trimSuffix "Mi" }} diff --git a/charts/hybriddatapipeline/secrets/hdp-secrets.yaml b/charts/hybriddatapipeline/secrets/hdp-secrets.yaml new file mode 100644 index 0000000..18cfe01 --- /dev/null +++ b/charts/hybriddatapipeline/secrets/hdp-secrets.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: hdp-secrets +data: + hdpDbUser: cGd1c2Vy + hdpDbUserPwd: cGd1c3Jwd2QhQCMxMjM= + hdpDbAdmin: cGdhZG1pbg== + hdpDbAdminPwd: cGdhZG1wd2QhQCMxMjM= + hdpAdminPwd: ZDJjYWRtIUAjMTIz + hdpUserPwd: ZDJjdXNyIUAjMTIz diff --git a/charts/hybriddatapipeline/secrets/postgres-secrets.yaml b/charts/hybriddatapipeline/secrets/postgres-secrets.yaml new file mode 100644 index 0000000..8f3fb73 --- /dev/null +++ b/charts/hybriddatapipeline/secrets/postgres-secrets.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Secret +metadata: + name: postgres-secrets +data: + postgres.db.name: cG9zdGdyZXM= + postgres.db.username: cG9zdGdyZXM= + postgres.db.password: ZG9ja2Vy + postgres.hdp.pgadmin.user: cGdhZG1pbg== + postgres.hdp.pgadmin.pwd: cGdhZG1wd2QhQCMxMjM= + postgres.hdp.pguser.user: cGd1c2Vy + postgres.hdp.pguser.pwd: cGd1c3Jwd2QhQCMxMjM= diff --git a/charts/hybriddatapipeline/templates/NOTES.txt b/charts/hybriddatapipeline/templates/NOTES.txt new file mode 100644 index 0000000..3504fa7 --- /dev/null +++ b/charts/hybriddatapipeline/templates/NOTES.txt @@ -0,0 +1,17 @@ +Thank you for installing {{ .Chart.Name }}. + +Your release is named {{ .Release.Name }}. + +To get pods in the {{ .Release.Namespace }} namespace, try: + + $ kubectl get pods --namespace={{ .Release.Namespace }} + +To get all services in the {{ .Release.Namespace }} namespace, try: + + $ kubectl get services --namespace={{ .Release.Namespace }} + +To get ingress in the {{ .Release.Namespace }} namespace, try: + + $ kubectl get ingress --namespace={{ .Release.Namespace }} + +Hybrid Data Pipeline can be accessed at https://{{ .Values.hdp.loadbalancer.hostName }}/ diff --git a/charts/hybriddatapipeline/templates/_helpers.tpl b/charts/hybriddatapipeline/templates/_helpers.tpl new file mode 100644 index 0000000..186927a --- /dev/null +++ b/charts/hybriddatapipeline/templates/_helpers.tpl @@ -0,0 +1,82 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "hybriddatapipeline.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{- define "hybriddatapipeline.namespace" -}} + {{- printf "%s" .Release.Namespace -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hybriddatapipeline.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hybriddatapipeline.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "hybriddatapipeline.labels" -}} +helm.sh/chart: {{ include "hybriddatapipeline.chart" . }} +{{ include "hybriddatapipeline.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "hybriddatapipeline.selectorLabels" -}} +app.kubernetes.io/name: {{ include "hybriddatapipeline.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "hybriddatapipeline.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "hybriddatapipeline.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +Helper function to retrieve a value from a Kubernetes Secret +Arguments in Order - namespace, secretname, secretkey +Usage: {{ include "hybriddatapipeline.secretValue" (list "default" "hdp-secrets" "userpwd") | trim | b64dec}} +*/}} +{{- define "hybriddatapipeline.secretValue" -}} +{{- $namespace := index . 0 }} +{{- $secretName := index . 1 }} +{{- $key := index . 2 }} +{{- $existingSecret := (lookup "v1" "Secret" $namespace $secretName) }} +{{- if $existingSecret.data }} +{{- $userPassword := index $existingSecret.data $key}} +{{ $userPassword | b64dec}} +{{- end -}} +{{- end }} diff --git a/charts/hybriddatapipeline/templates/configmap.yaml b/charts/hybriddatapipeline/templates/configmap.yaml new file mode 100644 index 0000000..e3d6c83 --- /dev/null +++ b/charts/hybriddatapipeline/templates/configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-configmap +data: + hdpdeploy.properties: |- +{{ tpl (.Files.Get (printf "config/%s" .Values.hdp.configurationFileName)) . | indent 4 }} diff --git a/charts/hybriddatapipeline/templates/haproxy-ingress.yaml b/charts/hybriddatapipeline/templates/haproxy-ingress.yaml new file mode 100644 index 0000000..69028e1 --- /dev/null +++ b/charts/hybriddatapipeline/templates/haproxy-ingress.yaml @@ -0,0 +1,48 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + {{- if .Values.haproxy.kubernetesIngress.enabled }} + haproxy.org/affinity: "true" + haproxy.org/cookie-persistence: HDP-SESSION + haproxy.org/forwarded-for: "true" + haproxy.org/load-balance: roundrobin + haproxy.org/timeout-check: 5m + haproxy.org/timeout-server: 5m + {{- end}} + kubernetes.io/ingress.class: {{ .Values.kubernetesingress.controller.ingressClass }} + name: {{ .Release.Name }}-{{ .Values.haproxy.kubernetesIngress.ingressName }} + namespace: {{ .Release.Namespace }} +spec: + ingressClassName: {{ .Values.kubernetesingress.controller.ingressClass }} + rules: + - host: {{ .Values.hdp.loadbalancer.hostName }} + http: + paths: + - backend: + service: + name: {{ .Release.Name }}-{{ .Values.hdp.services.hdpService.name }} + port: + number: {{ .Values.hdp.ports.hdpServer.port }} + path: / + pathType: Prefix + - backend: + service: + name: {{ .Release.Name }}-{{ .Values.hdp.services.notificationService.name }} + port: + number: {{ .Values.hdp.ports.notificationServer.port }} + path: {{ .Values.hdp.services.notificationService.aclPath }} + pathType: Prefix + - backend: + service: + name: {{ .Release.Name }}-{{ .Values.hdp.services.opAccessorService.name }} + port: + number: {{ .Values.hdp.ports.opAccessor.port }} + path: {{ .Values.hdp.services.opAccessorService.aclPath }}_{{ .Release.Name }}-{{ .Values.hdp.services.hdpService.name }}_{{ .Values.hdp.ports.opAccessor.port }} + pathType: Prefix + {{- if .Values.haproxy.tls.enabled }} + tls: + - hosts: + - {{ .Values.hdp.loadbalancer.hostName }} + secretName: tls-cert + {{- end -}} diff --git a/charts/hybriddatapipeline/templates/hdp-service.yaml b/charts/hybriddatapipeline/templates/hdp-service.yaml new file mode 100644 index 0000000..4d373d3 --- /dev/null +++ b/charts/hybriddatapipeline/templates/hdp-service.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + {{- if .Values.haproxy.kubernetesIngress.enabled }} + annotations: + haproxy.org/check: "{{ .Values.hdp.services.hdpService.check }}" + haproxy.org/check-http: HEAD {{ .Values.hdp.services.hdpService.checkPath }} + haproxy.org/check-interval: {{ .Values.hdp.services.hdpService.checkInterval }} + {{- end}} + name: {{ .Release.Name }}-{{ .Values.hdp.services.hdpService.name }} + namespace: {{ .Release.Namespace }} +spec: + {{- if .Values.haproxy.kubernetesIngress.enabled }} + type: ClusterIP + {{ else }} + type: NodePort + {{- end}} + ports: + - name: hdpserver-port + port: {{.Values.hdp.ports.hdpServer.port}} + protocol: TCP + targetPort: {{.Values.hdp.ports.hdpServer.targetPort}} + - name: internal-api-port + port: 8190 + protocol: TCP + targetPort: 8190 + selector: + app: hdp-server-app diff --git a/charts/hybriddatapipeline/templates/notification-service.yaml b/charts/hybriddatapipeline/templates/notification-service.yaml new file mode 100644 index 0000000..4d3b9fe --- /dev/null +++ b/charts/hybriddatapipeline/templates/notification-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + {{- if .Values.haproxy.kubernetesIngress.enabled }} + annotations: + haproxy.org/check: "{{ .Values.hdp.services.notificationService.check }}" + haproxy.org/check-http: HEAD {{ .Values.hdp.services.notificationService.checkPath }} + haproxy.org/check-interval: {{ .Values.hdp.services.notificationService.checkInterval }} + haproxy.org/route-acl: path_end -i {{ .Values.hdp.services.notificationService.aclPath }} + {{- end }} + name: {{ .Release.Name }}-{{ .Values.hdp.services.notificationService.name }} + namespace: {{ .Release.Namespace }} +spec: + {{- if .Values.haproxy.kubernetesIngress.enabled }} + type: ClusterIP + {{ else }} + type: NodePort + {{- end}} + ports: + - name: notificationserver-port + port: {{ .Values.hdp.ports.notificationServer.port }} + protocol: TCP + targetPort: {{ .Values.hdp.ports.notificationServer.targetPort }} + selector: + app: hdp-server-app diff --git a/charts/hybriddatapipeline/templates/opa-service.yaml b/charts/hybriddatapipeline/templates/opa-service.yaml new file mode 100644 index 0000000..5963773 --- /dev/null +++ b/charts/hybriddatapipeline/templates/opa-service.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + {{- if .Values.haproxy.kubernetesIngress.enabled }} + annotations: + haproxy.org/check: "{{ .Values.hdp.services.opAccessorService.check }}" + haproxy.org/check-http: HEAD {{ .Values.hdp.services.opAccessorService.checkPath }} + haproxy.org/check-interval: {{ .Values.hdp.services.opAccessorService.checkInterval }} + haproxy.org/route-acl: path_end -i {{ .Values.hdp.services.opAccessorService.aclPath }}_{{ .Release.Name }}-{{ .Values.hdp.services.hdpService.name }}_{{ .Values.hdp.ports.opAccessor.port }} + {{- end }} + name: {{ .Release.Name }}-{{ .Values.hdp.services.opAccessorService.name }} + namespace: {{ .Release.Namespace }} +spec: + {{- if .Values.haproxy.kubernetesIngress.enabled }} + type: ClusterIP + {{ else }} + type: NodePort + {{- end}} + ports: + - name: opaccessor-port + port: {{ .Values.hdp.ports.opAccessor.port }} + protocol: TCP + targetPort: {{ .Values.hdp.ports.opAccessor.targetPort }} + selector: + app: hdp-server-app diff --git a/charts/hybriddatapipeline/templates/postgres-init-sql.yaml b/charts/hybriddatapipeline/templates/postgres-init-sql.yaml new file mode 100644 index 0000000..63eabd9 --- /dev/null +++ b/charts/hybriddatapipeline/templates/postgres-init-sql.yaml @@ -0,0 +1,24 @@ +{{- if (.Values.postgres.enabled) }} +# Creates the initial script to setup the PostgresSQL Database for HDP as an external Account Database +# Expects the secrets to be created prior to setup the ConfigMap +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.postgresql.primary.initdb.scriptsConfigMap }} +data: + init.sh: |- + set -e + export PGPASSWORD='{{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.db.password") | trim }}' + psql -v ON_ERROR_STOP=1 --username {{.Values.postgres.init.postgres_user}} --dbname {{.Values.postgres.init.dbname}} <<-EOSQL + create database {{.Values.postgres.hdp.dbname}}; + create user {{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pgadmin.user") | trim }} with PASSWORD '{{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pgadmin.pwd") | trim }}'; + create user {{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pguser.user") | trim }} with PASSWORD '{{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pguser.pwd") | trim }}'; + EOSQL + psql -v ON_ERROR_STOP=1 --username {{.Values.postgres.init.postgres_user}} --dbname {{.Values.postgres.hdp.dbname}} <<-EOSQL + create schema {{.Values.postgres.hdp.schemaname}} authorization {{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pgadmin.user") | trim }}; + create extension citext with schema {{.Values.postgres.hdp.schemaname}}; + grant usage on schema {{.Values.postgres.hdp.schemaname}} to {{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pguser.user") | trim }}; + alter user {{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pgadmin.user") | trim }} SET search_path TO {{.Values.postgres.hdp.schemaname}}; + alter user {{ include "hybriddatapipeline.secretValue" (list "default" "postgres-secrets" "postgres.hdp.pguser.user") | trim }} SET search_path TO {{.Values.postgres.hdp.schemaname}}; + EOSQL +{{- end }} diff --git a/charts/hybriddatapipeline/templates/pvc.yaml b/charts/hybriddatapipeline/templates/pvc.yaml new file mode 100644 index 0000000..1efd5e9 --- /dev/null +++ b/charts/hybriddatapipeline/templates/pvc.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ .Release.Name }}-shared-storage-volume + annotations: + "helm.sh/resource-policy": keep +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: {{.Values.hdp.persistence.size}} + storageClassName: {{.Values.hdp.persistence.storageClassName}} diff --git a/charts/hybriddatapipeline/templates/statefulset.yaml b/charts/hybriddatapipeline/templates/statefulset.yaml new file mode 100644 index 0000000..972a0a3 --- /dev/null +++ b/charts/hybriddatapipeline/templates/statefulset.yaml @@ -0,0 +1,103 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-{{ .Values.hdp.services.hdpService.name }} + namespace: {{ .Release.Namespace }} +spec: + replicas: {{ .Values.hdp.replicaCount }} + updateStrategy: + type: {{ .Values.hdp.updateStrategy.type }} + selector: + matchLabels: + app: hdp-server-app + template: + metadata: + annotations: + rollme: {{ randAlphaNum 5 | quote }} + labels: + app: hdp-server-app + date: "{{ now | unixEpoch }}" + spec: + initContainers: + - name: init-properties + image: "{{.Values.hdp.image.repository}}:{{.Values.hdp.image.tag}}" + imagePullPolicy: {{.Values.hdp.image.pullPolicy}} + {{- if .Values.haproxy.tls.enabled }} + command: ["/bin/sh", "-c", "mkdir -p /hdpshare && cp /hdpdeployprops/{{.Values.hdp.configurationFileName}} /hdpshare/hdpdeploy.properties && echo \'Config Map mounted and properties file copied to /hdpshare!\'; cat /certs/tls.crt /certs/tls.key > /hdpshare/{{.Values.haproxy.tls.certFileName}} && echo \'Certificate copied to /hdpshare!\'"] + {{- else }} + command: ["/bin/sh", "-c", "mkdir -p /hdpshare && cp /hdpdeployprops/{{.Values.hdp.configurationFileName}} /hdpshare/hdpdeploy.properties && echo \'Config Map mounted and properties file copied to /hdpshare!\'"] + {{- end }} + volumeMounts: + - name: properties + mountPath: /hdpdeployprops + {{- if .Values.haproxy.tls.enabled }} + - name: certs + mountPath: /certs + {{- end }} + - name: shared-storage-volume + mountPath: /hdpshare + containers: + - name: hdp-server + image: "{{.Values.hdp.image.repository}}:{{.Values.hdp.image.tag}}" + imagePullPolicy: {{.Values.hdp.image.pullPolicy}} + ports: + - name: hdpserver + containerPort: {{.Values.hdp.ports.hdpServer.port}} + - name: opaccessor + containerPort: {{.Values.hdp.ports.opAccessor.port}} + - name: notification + containerPort: {{.Values.hdp.ports.notificationServer.port}} + - name: internal + containerPort: 8190 + env: + - name: ACCEPT_EULA + value: "true" + volumeMounts: + - name: properties + mountPath: /hdpdeployprops + - name: shared-storage-volume + mountPath: {{ .Values.hdp.persistence.mountPath }} + {{- if .Values.hdp.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: {{.Values.hdp.ports.hdpServer.port}} + initialDelaySeconds: {{ .Values.hdp.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.hdp.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.hdp.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.hdp.livenessProbe.failureThreshold }} + successThreshold: {{ .Values.hdp.livenessProbe.successThreshold }} + {{- end }} + {{- if .Values.hdp.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: {{.Values.hdp.ports.hdpServer.port}} + initialDelaySeconds: {{ .Values.hdp.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.hdp.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.hdp.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.hdp.readinessProbe.failureThreshold }} + successThreshold: {{ .Values.hdp.readinessProbe.successThreshold }} + {{- end }} + resources: + requests: + memory: {{.Values.hdp.resources.requests.memory}} + cpu: {{.Values.hdp.resources.requests.cpu}} + limits: + memory: {{.Values.hdp.resources.limits.memory}} + cpu: {{.Values.hdp.resources.limits.cpu}} + volumes: + - name: shared-storage-volume + persistentVolumeClaim: + claimName: {{ .Release.Name }}-shared-storage-volume + - name: properties + configMap: + name: {{ .Release.Name }}-configmap + {{- if .Values.haproxy.tls.enabled }} + - name: certs + secret: + secretName: {{ .Values.haproxy.tls.secretName }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: {{- toYaml .Values.imagePullSecrets | nindent 8 }} + {{- end }} diff --git a/charts/hybriddatapipeline/values.yaml b/charts/hybriddatapipeline/values.yaml new file mode 100644 index 0000000..98c90d6 --- /dev/null +++ b/charts/hybriddatapipeline/values.yaml @@ -0,0 +1,172 @@ +## Values for Progress DataDirect Hybrid Data Pipeline(HDP) Server Deployment + +## Values used for Hybrid Data Pipeline(HDP) installation +hdp: + ## Controls number of nodes to be deployed + replicaCount: 2 + ## HDP Docker image details + image: + repository: + tag: + pullPolicy: IfNotPresent + imagePullSecrets: [] + + ## HDP Server License Key, leave this blank to install HDP in Evaluation Mode + licenseKey: + + ## Hybrid Data Pipeline Server Container load balancer hostname + loadbalancer: + hostName: + + ## Controls On-Premise Connector components configuration on the HDP Server side + ## This should be enabled to establish On-premise datastore connections + onPremiseConnector: + enabled: true + + ## HDP Server ports + ports: + hdpServer: + port: 8080 + targetPort: 8080 + opAccessor: + port: 40501 + targetPort: 40501 + notificationServer: + port: 11280 + targetPort: 11280 + + ## HDP deploy properties file name + configurationFileName: hdpdeploy.properties + + ## HDP Account database details, by default PostgreSQL is configured as account database + database: + hostName: postgresql + port: 5432 + name: hdp + advancedOptions: + + ## HDP Server Container persistence parameters + persistence: + mountPath: /hdpshare + size: 1Gi + storageClassName: azurefile-csi + + ## Update strategy for HDP Server and Helm chart upgrades + ## It is recommended to use OnDelete updateStrategy as HDP Server + ## bootstrap host (pod-0) needs to be upgraded first in the cluster and + ## OnDelete allows more control over the upgrade and recovery in case of failure. + updateStrategy: + type: OnDelete + + ## HDP Server Container resources parameters + resources: + requests: + memory: "4096Mi" + cpu: "2000m" + limits: + memory: "4096Mi" + cpu: "2000m" + + ## HDP Server Container probe parameters + livenessProbe: + enabled: true + initialDelaySeconds: 300 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 15 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + + services: + hdpService: + name: hdpserver + check: "true" + checkInterval: 5m + checkPath: /api/healthcheck + opAccessorService: + name: opaccessor + check: "true" + checkInterval: 5m + checkPath: / + aclPath: /connect/opa + notificationService: + name: notificationserver + check: "true" + checkInterval: 5m + checkPath: / + aclPath: /connect/X_DataDirect_Notification_Server + +## HAProxy Kubneretes Ingress Helm Chart Configuration +haproxy: + kubernetesIngress: + enabled: true + ingressName: "hdp-ingress" + ## Configure TLS for HAProxy + ## Put the PEM-formatted SSL certificate into a secret and provide the secret name in the secretName field. + ## The PEM-formatted SSL certificate should contain the private key and the certificate. For example: cat certificate.pem private-key.pem > mycert.pem + ## To generate the secret in Kubernetes: kubectl create secret generic tls-cert --from-file=mycert.pem + tls: + enabled: false + secretName: "" # tls-cert + ## The name of the certificate file in the secret. + certFileName: "" # mycert.pem + +## Kubneretes Ingress Controller Configuration +kubernetesingress: + controller: + ingressClass: haproxy + service: + type: "LoadBalancer" + externalTrafficPolicy: Local + annotations: + service.beta.kubernetes.io/azure-load-balancer-health-probe-request-path: "/healthz" + +## PostgreSQL Helm Chart Configuration +## This will setup a new PostgreSQL database with required database and accounts needed for HDP Account Database +postgres: + enabled: true + init: + postgres_user: postgres + dbname: postgres + hdp: + schemaname: hdp + dbname: hdp +postgresql: + global: + postgresql: + auth: + existingSecret: "postgres-secrets" + secretKeys: + adminPasswordKey: "postgres.db.password" + primary: + fullname: "postgresql" + initdb: + scriptsConfigMap: "postgres-initsql" + persistence: + size: 1Gi + resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 \ No newline at end of file