diff --git a/.changes/v3.12.0/1195-deprecations.md b/.changes/v3.12.0/1195-deprecations.md new file mode 100644 index 000000000..bb3f5776d --- /dev/null +++ b/.changes/v3.12.0/1195-deprecations.md @@ -0,0 +1,2 @@ +* Resource `vcd_cse_kubernetes_cluster` deprecates the Container Service Extension cluster management guide, + so users should not use `vcd_rde` resources to create a Kubernetes cluster anymore [GH-1195] diff --git a/.changes/v3.12.0/1195-features.md b/.changes/v3.12.0/1195-features.md new file mode 100644 index 000000000..39903b4c7 --- /dev/null +++ b/.changes/v3.12.0/1195-features.md @@ -0,0 +1,5 @@ +* **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension + 4.2.1, 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] +* **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension + 4.2.1, 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] +* **New Data Source:** `vcd_version` to get the VCD version and perform additional checks with version constraints [GH-1195] \ No newline at end of file diff --git a/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf b/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf index a449fd426..adb0f3c9f 100644 --- a/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf +++ b/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.1 TKGm cluster creation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_cluster_management +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_cluster_management # before applying this configuration. # # * Please make sure to have CSE v4.1 installed in your VCD appliance and the CSE Server is correctly running. diff --git a/examples/container-service-extension/v4.1/cluster/variables.tf b/examples/container-service-extension/v4.1/cluster/variables.tf index ac19d6f49..fa9dd7f1e 100644 --- a/examples/container-service-extension/v4.1/cluster/variables.tf +++ b/examples/container-service-extension/v4.1/cluster/variables.tf @@ -36,7 +36,7 @@ variable "k8s_cluster_name" { description = "The name of the Kubernetes cluster. Name must contain only lowercase alphanumeric characters or '-' start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters (Example: 'MyCluster')" type = string validation { - condition = can(regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$", var.k8s_cluster_name)) + condition = can(regex("^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$", var.k8s_cluster_name)) error_message = "Name must contain only lowercase alphanumeric characters or '-', start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters." } } diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf index ba3ef6103..6d7a6ac1d 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be @@ -11,12 +11,12 @@ # Other than that, this snippet should be applied as it is. # ------------------------------------------------------------------------------------------------------------ -# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. terraform { required_providers { vcd = { source = "vmware/vcd" - version = ">= 3.11" + version = ">= 3.12" } } } @@ -32,3 +32,15 @@ provider "vcd" { logging = true logging_file = "cse_install_step1.log" } + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} + +# There are some special rights and elements introduced in VCD 10.5.1 +data "vcd_version" "gte_1051" { + condition = ">= 10.5.1" + fail_if_not_match = false +} diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf index 2da8e4909..2d1d6adca 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be @@ -94,7 +94,7 @@ resource "vcd_role" "cse_admin_role" { org = var.administrator_org name = "CSE Admin Role" description = "Used for administrative purposes" - rights = [ + rights = concat([ "API Tokens: Manage", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator Full access", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator View", @@ -106,7 +106,7 @@ resource "vcd_role" "cse_admin_role" { "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View" - ] + ], data.vcd_version.gte_1051.matches_condition ? ["Organization: Traversal"] : []) } # This will allow to have a user with a limited set of rights that can access the Provider area of VCD. diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf index 306284e13..da8184288 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf index f3d622de6..74ca0790b 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 2: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be @@ -11,12 +11,12 @@ # Other than that, this snippet should be applied as it is. # ------------------------------------------------------------------------------------------------------------ -# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. terraform { required_providers { vcd = { source = "vmware/vcd" - version = ">= 3.11" + version = ">= 3.12" } time = { source = "hashicorp/time" @@ -40,3 +40,9 @@ provider "vcd" { logging = true logging_file = "cse_install_step2.log" } + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf index a5e2d6cde..8bf1614e0 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf index 6e4112601..d9526f3cc 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf index 41622cefe..a845605d5 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf index 79fe239b2..48530284f 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.2/entities/vcdkeconfig.json.template b/examples/container-service-extension/v4.2/entities/vcdkeconfig.json.template new file mode 100644 index 000000000..3d2b0c5ac --- /dev/null +++ b/examples/container-service-extension/v4.2/entities/vcdkeconfig.json.template @@ -0,0 +1,94 @@ +{ + "profiles": [ + { + "name": "production", + "active": true, + "serverConfig": { + "rdePollIntervalInMin": 1, + "heartbeatWatcherTimeoutInMin": 10, + "staleHeartbeatIntervalInMin": 30 + }, + "vcdKeInstances": [ + { + "name": "vcd-container-service-extension" + } + ], + "K8Config": { + "certificateAuthorities": [ + ${k8s_cluster_certificates} + ], + "cni": { + "name": "antrea", + "version": "" + }, + "cpi": { + "name": "cpi for cloud director", + "version": "${cpi_version}" + }, + "csi": [ + { + "name": "csi for cloud director", + "version": "${csi_version}" + } + ], + "mhc": { + "maxUnhealthyNodes": ${max_unhealthy_node_percentage}, + "nodeStartupTimeout": "${node_startup_timeout}", + "nodeNotReadyTimeout": "${node_not_ready_timeout}", + "nodeUnknownTimeout": "${node_unknown_timeout}" + }, + "rdeProjectorVersion": "${rde_projector_version}" + }, + "vcdConfig": { + "sysLogger": { + "host": "${syslog_host}", + "port": "${syslog_port}" + } + }, + "githubConfig": { + "githubPersonalAccessToken": "" + }, + "bootstrapClusterConfig": { + "capiEcosystem": { + "infraProvider": { + "name": "capvcd", + "version": "v${capvcd_version}", + "capvcdRde": { + "nss": "capvcdCluster", + "vendor": "vmware", + "version": "1.3.0" + } + }, + "coreCapiVersion": "v1.4.0", + "bootstrapProvider": { + "name": "CAPBK", + "version": "v1.4.0" + }, + "controlPlaneProvider": { + "name": "KCP", + "version": "v1.4.0" + }, + "certManagerVersion": "v1.11.1" + }, + "certificateAuthorities": [ + ${bootstrap_vm_certificates} + ], + "clusterctl": { + "version": "v1.4.0", + "clusterctlyaml": "" + }, + "dockerVersion": "", + "kindVersion": "v0.19.0", + "kindestNodeVersion": "v1.27.1", + "kubectlVersion": "", + "proxyConfig": { + "noProxy": "${no_proxy}", + "httpProxy": "${http_proxy}", + "httpsProxy": "${https_proxy}" + }, + "sizingPolicy": "${bootstrap_vm_sizing_policy}" + }, + "containerRegistryUrl": "${container_registry_url}" + } + ] +} diff --git a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf new file mode 100644 index 000000000..a76207bc3 --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf @@ -0,0 +1,46 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation, step 1: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# ------------------------------------------------------------------------------------------------------------ + +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. +terraform { + required_providers { + vcd = { + source = "vmware/vcd" + version = ">= 3.12" + } + } +} + +provider "vcd" { + url = "${var.vcd_url}/api" + user = var.administrator_user + password = var.administrator_password + auth_type = "integrated" + sysorg = var.administrator_org + org = var.administrator_org + allow_unverified_ssl = var.insecure_login + logging = true + logging_file = "cse_install_step1.log" +} + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} + +# There are some special rights and elements introduced in VCD 10.5.1 +data "vcd_version" "gte_1051" { + condition = ">= 10.5.1" + fail_if_not_match = false +} diff --git a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf new file mode 100644 index 000000000..1d79a488a --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -0,0 +1,281 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation, step 1: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * This file contains the same resources created by the "Configure Settings for CSE Server > Set Up Prerequisites" step in the +# UI wizard. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# This is the RDE Interface required to create the "VCDKEConfig" RDE Type. +# This should not be changed. +resource "vcd_rde_interface" "vcdkeconfig_interface" { + vendor = "vmware" + nss = "VCDKEConfig" + version = "1.0.0" + name = "VCDKEConfig" +} + +# This resource will manage the "VCDKEConfig" RDE Type required to instantiate the CSE Server configuration. +# The schema URL points to the JSON schema hosted in the terraform-provider-vcd repository. +# This should not be changed. +resource "vcd_rde_type" "vcdkeconfig_type" { + vendor = "vmware" + nss = "VCDKEConfig" + version = "1.1.0" + name = "VCD-KE RDE Schema" + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2/schemas/vcdkeconfig-type-schema-v1.1.0.json" + interface_ids = [vcd_rde_interface.vcdkeconfig_interface.id] +} + +# This RDE Interface exists in VCD, so it must be fetched with a RDE Interface data source. This RDE Interface is used to be +# able to create the "capvcdCluster" RDE Type. +# This should not be changed. +data "vcd_rde_interface" "kubernetes_interface" { + vendor = "vmware" + nss = "k8s" + version = "1.0.0" +} + +# This is the interface required to create the "CAPVCD" Runtime Defined Entity Type. +# This should not be changed. +resource "vcd_rde_interface" "cse_interface" { + vendor = "cse" + nss = "capvcd" + version = "1.0.0" + name = "cseInterface" +} + +# This RDE Interface behavior is required to be able to obtain the Kubeconfig and other important information. +# This should not be changed. +resource "vcd_rde_interface_behavior" "capvcd_behavior" { + rde_interface_id = vcd_rde_interface.cse_interface.id + name = "getFullEntity" + execution = { + "type" : "noop" + "id" : "getFullEntity" + } +} + +# This RDE Interface will create the "capvcdCluster" RDE Type required to create Kubernetes clusters. +# The schema URL points to the JSON schema hosted in the terraform-provider-vcd repository. +# This should not be changed. +resource "vcd_rde_type" "capvcdcluster_type" { + vendor = "vmware" + nss = "capvcdCluster" + version = "1.3.0" + name = "CAPVCD Cluster" + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json" + interface_ids = [data.vcd_rde_interface.kubernetes_interface.id] + + depends_on = [vcd_rde_interface_behavior.capvcd_behavior] # Interface Behaviors must be created before any RDE Type +} + +# Access Level for the CAPVCD Type Behavior +# This should not be changed. +resource "vcd_rde_type_behavior_acl" "capvcd_behavior_acl" { + rde_type_id = vcd_rde_type.capvcdcluster_type.id + behavior_id = vcd_rde_interface_behavior.capvcd_behavior.id + access_level_ids = ["urn:vcloud:accessLevel:FullControl"] +} + +# This role is having only the minimum set of rights required for the CSE Server to function. +# It is created in the "System" provider organization scope. +# This should not be changed. +resource "vcd_role" "cse_admin_role" { + org = var.administrator_org + name = "CSE Admin Role" + description = "Used for administrative purposes" + rights = concat([ + "API Tokens: Manage", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator Full access", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator View", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Full Access", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Modify", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator Full access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View" + ], data.vcd_version.gte_1051.matches_condition ? ["Organization: Traversal"] : []) +} + +# This will allow to have a user with a limited set of rights that can access the Provider area of VCD. +# This user will be used by the CSE Server, with an API token that must be created in Step 2. +# This should not be changed. +resource "vcd_org_user" "cse_admin" { + org = var.administrator_org + name = var.cse_admin_username + password = var.cse_admin_password + role = vcd_role.cse_admin_role.name +} + +# This resource manages the Rights Bundle required by tenants to create and consume Kubernetes clusters. +# This should not be changed. +resource "vcd_rights_bundle" "k8s_clusters_rights_bundle" { + name = "Kubernetes Clusters Rights Bundle" + description = "Rights bundle with required rights for managing Kubernetes clusters" + rights = [ + "API Tokens: Manage", + "Access All Organization VDCs", + "Catalog: View Published Catalogs", + "Certificate Library: Manage", + "Certificate Library: View", + "General: Administrator View", + "Organization vDC Gateway: Configure Load Balancer", + "Organization vDC Gateway: Configure NAT", + "Organization vDC Gateway: View Load Balancer", + "Organization vDC Gateway: View NAT", + "Organization vDC Gateway: View", + "Organization vDC Named Disk: Create", + "Organization vDC Named Disk: Edit Properties", + "Organization vDC Named Disk: View Properties", + "Organization vDC Shared Named Disk: Create", + "vApp: Allow All Extra Config", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator Full access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator View", + "vmware:tkgcluster: Full Access", + "vmware:tkgcluster: Modify", + "vmware:tkgcluster: View", + "vmware:tkgcluster: Administrator View", + "vmware:tkgcluster: Administrator Full access", + ] + publish_to_all_tenants = true # This needs to be published to all the Organizations +} + + +# With the Rights Bundle specified above, we need also a new Role for tenant users who want to create and manage +# Kubernetes clusters. +# This should not be changed. +resource "vcd_global_role" "k8s_cluster_author" { + name = "Kubernetes Cluster Author" + description = "Role to create Kubernetes clusters" + rights = [ + "API Tokens: Manage", + "Access All Organization VDCs", + "Catalog: Add vApp from My Cloud", + "Catalog: View Private and Shared Catalogs", + "Catalog: View Published Catalogs", + "Certificate Library: View", + "Organization vDC Compute Policy: View", + "Organization vDC Disk: View IOPS", + "Organization vDC Gateway: Configure Load Balancer", + "Organization vDC Gateway: Configure NAT", + "Organization vDC Gateway: View", + "Organization vDC Gateway: View Load Balancer", + "Organization vDC Gateway: View NAT", + "Organization vDC Named Disk: Create", + "Organization vDC Named Disk: Delete", + "Organization vDC Named Disk: Edit Properties", + "Organization vDC Named Disk: View Encryption Status", + "Organization vDC Named Disk: View Properties", + "Organization vDC Network: View Properties", + "Organization vDC Shared Named Disk: Create", + "Organization vDC: VM-VM Affinity Edit", + "Organization: View", + "UI Plugins: View", + "VAPP_VM_METADATA_TO_VCENTER", + "vApp Template / Media: Copy", + "vApp Template / Media: Edit", + "vApp Template / Media: View", + "vApp Template: Checkout", + "vApp: Allow All Extra Config", + "vApp: Copy", + "vApp: Create / Reconfigure", + "vApp: Delete", + "vApp: Download", + "vApp: Edit Properties", + "vApp: Edit VM CPU", + "vApp: Edit VM Compute Policy", + "vApp: Edit VM Hard Disk", + "vApp: Edit VM Memory", + "vApp: Edit VM Network", + "vApp: Edit VM Properties", + "vApp: Manage VM Password Settings", + "vApp: Power Operations", + "vApp: Sharing", + "vApp: Snapshot Operations", + "vApp: Upload", + "vApp: Use Console", + "vApp: VM Boot Options", + "vApp: View ACL", + "vApp: View VM and VM's Disks Encryption Status", + "vApp: View VM metrics", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator Full access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator View", + "vmware:tkgcluster: Full Access", + "vmware:tkgcluster: Modify", + "vmware:tkgcluster: View", + ] + + publish_to_all_tenants = true # This needs to be published to all the Organizations + + # As we use rights created by the CAPVCD Type created previously, we need to depend on it + depends_on = [ + vcd_rights_bundle.k8s_clusters_rights_bundle + ] +} + +# The VM Sizing Policies defined below MUST be created as they are specified in this HCL. These are the default +# policies required by CSE to create TKGm clusters. +# This should not be changed. +resource "vcd_vm_sizing_policy" "tkg_xl" { + name = "TKG extra-large" + description = "Extra-large VM sizing policy for a Kubernetes cluster node (8 CPU, 32GB memory)" + cpu { + count = 8 + } + memory { + size_in_mb = "32768" + } +} + +resource "vcd_vm_sizing_policy" "tkg_l" { + name = "TKG large" + description = "Large VM sizing policy for a Kubernetes cluster node (4 CPU, 16GB memory)" + cpu { + count = 4 + } + memory { + size_in_mb = "16384" + } +} + +resource "vcd_vm_sizing_policy" "tkg_m" { + name = "TKG medium" + description = "Medium VM sizing policy for a Kubernetes cluster node (2 CPU, 8GB memory)" + cpu { + count = 2 + } + memory { + size_in_mb = "8192" + } +} + +resource "vcd_vm_sizing_policy" "tkg_s" { + name = "TKG small" + description = "Small VM sizing policy for a Kubernetes cluster node (2 CPU, 4GB memory)" + cpu { + count = 2 + } + memory { + size_in_mb = "4048" + } +} diff --git a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf new file mode 100644 index 000000000..11ddb0e56 --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf @@ -0,0 +1,46 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation, step 1: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * This file contains the same resources created by the "Configure Settings for CSE Server > Set Configuration Parameters" step in the +# UI wizard. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on the resource for context. +# ------------------------------------------------------------------------------------------------------------ + +# This RDE configures the CSE Server. It can be customised through variables, and the bootstrap_cluster_sizing_policy +# can also be changed. +# Other than that, this should be applied as it is. +resource "vcd_rde" "vcdkeconfig_instance" { + org = var.administrator_org + name = "vcdKeConfig" + rde_type_id = vcd_rde_type.vcdkeconfig_type.id + resolve = true + input_entity = templatefile(var.vcdkeconfig_template_filepath, { + capvcd_version = var.capvcd_version + cpi_version = var.cpi_version + csi_version = var.csi_version + rde_projector_version = var.rde_projector_version + github_personal_access_token = var.github_personal_access_token + bootstrap_vm_sizing_policy = vcd_vm_sizing_policy.tkg_s.name # References the small VM Sizing Policy, it can be changed. + no_proxy = var.no_proxy + http_proxy = var.http_proxy + https_proxy = var.https_proxy + syslog_host = var.syslog_host + syslog_port = var.syslog_port + node_startup_timeout = var.node_startup_timeout + node_not_ready_timeout = var.node_not_ready_timeout + node_unknown_timeout = var.node_unknown_timeout + max_unhealthy_node_percentage = var.max_unhealthy_node_percentage + container_registry_url = var.container_registry_url + k8s_cluster_certificates = join(",", var.k8s_cluster_certificates) + bootstrap_vm_certificates = join(",", var.bootstrap_vm_certificates) + }) +} diff --git a/examples/container-service-extension/v4.2/install/step1/terraform.tfvars.example b/examples/container-service-extension/v4.2/install/step1/terraform.tfvars.example new file mode 100644 index 000000000..2d1d3f7ec --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step1/terraform.tfvars.example @@ -0,0 +1,61 @@ +# Change configuration to your needs and rename to 'terraform.tfvars' +# For more details about the variables specified here, please read the guide first: +# https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install + +# ------------------------------------------------ +# VCD Provider config +# ------------------------------------------------ + +vcd_url = "https://vcd.my-awesome-corp.com" +administrator_user = "administrator" +administrator_password = "change-me" +administrator_org = "System" +insecure_login = "false" + +# ------------------------------------------------ +# CSE Server Pre-requisites +# ------------------------------------------------ + +# This user will be created by the Terraform configuration, so you can +# customise what its username and password will be. +# This user will have an API token that must be consumed by the CSE Server. +cse_admin_username = "cse_admin" +cse_admin_password = "change-me" + +# ------------------------------------------------ +# CSE Server Settings +# ------------------------------------------------ + +# These are required to create the Runtime Defined Entity that will contain the CSE Server configuration (vcdKeConfig) +# To know more about the specific versions, please refer to the CSE documentation. +# The values set here correspond to CSE 4.2: +vcdkeconfig_template_filepath = "../../entities/vcdkeconfig.json.template" +capvcd_version = "1.2.0" +cpi_version = "1.5.0" +csi_version = "1.5.0" +rde_projector_version = "0.7.0" + +# Optional but recommended to avoid rate limiting when configuring the TKGm clusters. +# Create this one in https://github.com/settings/tokens +github_personal_access_token = "" + +# Node will be considered unhealthy and remediated if joining the cluster takes longer than this timeout (seconds) +node_startup_timeout = "900" +# A newly joined node will be considered unhealthy and remediated if it cannot host workloads for longer than this timeout (seconds) +node_not_ready_timeout = "300" +# A healthy node will be considered unhealthy and remediated if it is unreachable for longer than this timeout (seconds) +node_unknown_timeout = "300" +# Remediation will be suspended when the number of unhealthy nodes exceeds this percentage. +# (100% means that unhealthy nodes will always be remediated, while 0% means that unhealthy nodes will never be remediated) +max_unhealthy_node_percentage = 100 + +# URL from where TKG clusters will fetch container images +container_registry_url = "projects.registry.vmware.com" + +# Certificate(s) to allow the ephemeral VM (created during cluster creation) to authenticate with. +# For example, when pulling images from a container registry. (Copy and paste .cert file contents) +k8s_cluster_certificates = [] + +# Certificate(s) to allow clusters to authenticate with. +# For example, when pulling images from a container registry. (Copy and paste .cert file contents) +bootstrap_vm_certificates = [] diff --git a/examples/container-service-extension/v4.2/install/step1/variables.tf b/examples/container-service-extension/v4.2/install/step1/variables.tf new file mode 100644 index 000000000..123d9db5f --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step1/variables.tf @@ -0,0 +1,164 @@ +# ------------------------------------------------ +# Provider config +# ------------------------------------------------ + +variable "vcd_url" { + description = "The VCD URL (Example: 'https://vcd.my-company.com')" + type = string +} + +variable "insecure_login" { + description = "Allow unverified SSL connections when operating with VCD" + type = bool + default = false +} + +variable "administrator_user" { + description = "The VCD administrator user (Example: 'administrator')" + default = "administrator" + type = string +} + +variable "administrator_password" { + description = "The VCD administrator password" + type = string + sensitive = true +} + +variable "administrator_org" { + description = "The VCD administrator organization (Example: 'System')" + type = string + default = "System" +} + +# ------------------------------------------------ +# CSE Server Pre-requisites +# ------------------------------------------------ + +variable "cse_admin_username" { + description = "The CSE administrator user that will be created (Example: 'cse-admin')" + type = string +} + +variable "cse_admin_password" { + description = "The password to set for the CSE administrator to be created" + type = string + sensitive = true +} + +# ------------------------------------------------ +# CSE Server Settings +# ------------------------------------------------ + +variable "vcdkeconfig_template_filepath" { + type = string + description = "Path to the VCDKEConfig JSON template" + default = "../../entities/vcdkeconfig.json.template" +} + +variable "capvcd_version" { + type = string + description = "Version of CAPVCD" + default = "1.2.0" +} + +variable "cpi_version" { + type = string + description = "VCDKEConfig: Cloud Provider Interface version" + default = "1.5.0" +} + +variable "csi_version" { + type = string + description = "VCDKEConfig: Container Storage Interface version" + default = "1.5.0" +} + +variable "rde_projector_version" { + type = string + description = "VCDKEConfig: RDE Projector version" + default = "0.7.0" +} + +variable "github_personal_access_token" { + type = string + description = "VCDKEConfig: Prevents potential github rate limiting errors during cluster creation and deletion" + default = "" + sensitive = true +} + +variable "no_proxy" { + type = string + description = "VCDKEConfig: List of comma-separated domains without spaces" + default = "localhost,127.0.0.1,cluster.local,.svc" +} + +variable "http_proxy" { + type = string + description = "VCDKEConfig: Address of your HTTP proxy server" + default = "" +} + +variable "https_proxy" { + type = string + description = "VCDKEConfig: Address of your HTTPS proxy server" + default = "" +} + +variable "syslog_host" { + type = string + description = "VCDKEConfig: Domain for system logs" + default = "" +} + +variable "syslog_port" { + type = string + description = "VCDKEConfig: Port for system logs" + default = "" +} + +variable "node_startup_timeout" { + type = string + description = "VCDKEConfig: Node will be considered unhealthy and remediated if joining the cluster takes longer than this timeout (seconds)" + default = "900" +} + +variable "node_not_ready_timeout" { + type = string + description = "VCDKEConfig: A newly joined node will be considered unhealthy and remediated if it cannot host workloads for longer than this timeout (seconds)" + default = "300" +} + +variable "node_unknown_timeout" { + type = string + description = "VCDKEConfig: A healthy node will be considered unhealthy and remediated if it is unreachable for longer than this timeout (seconds)" + default = "300" +} + +variable "max_unhealthy_node_percentage" { + type = number + description = "VCDKEConfig: Remediation will be suspended when the number of unhealthy nodes exceeds this percentage. (100% means that unhealthy nodes will always be remediated, while 0% means that unhealthy nodes will never be remediated)" + default = 100 + validation { + condition = var.max_unhealthy_node_percentage >= 0 && var.max_unhealthy_node_percentage <= 100 + error_message = "The value must be a percentage, hence between 0 and 100" + } +} + +variable "container_registry_url" { + type = string + description = "VCDKEConfig: URL from where TKG clusters will fetch container images" + default = "projects.registry.vmware.com" +} + +variable "bootstrap_vm_certificates" { + type = list(string) + description = "VCDKEConfig: Certificate(s) to allow the ephemeral VM (created during cluster creation) to authenticate with. For example, when pulling images from a container registry. (Copy and paste .cert file contents)" + default = [] +} + +variable "k8s_cluster_certificates" { + type = list(string) + description = "VCDKEConfig: Certificate(s) to allow clusters to authenticate with. For example, when pulling images from a container registry. (Copy and paste .cert file contents)" + default = [] +} diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf new file mode 100644 index 000000000..0decbddfa --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf @@ -0,0 +1,48 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation, step 2: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# ------------------------------------------------------------------------------------------------------------ + +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. +terraform { + required_providers { + vcd = { + source = "vmware/vcd" + version = ">= 3.12" + } + time = { + source = "hashicorp/time" + version = "0.9.1" + } + local = { + source = "hashicorp/local" + version = "2.4.0" + } + } +} + +provider "vcd" { + url = "${var.vcd_url}/api" + user = var.administrator_user + password = var.administrator_password + auth_type = "integrated" + sysorg = var.administrator_org + org = var.administrator_org + allow_unverified_ssl = var.insecure_login + logging = true + logging_file = "cse_install_step2.log" +} + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf new file mode 100644 index 000000000..102bd7f02 --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf @@ -0,0 +1,449 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# +# * Please review this file carefully, as it shapes the structure of your organization, hence you should customise +# it to your needs. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# The two resources below will create the two Organizations mentioned in the CSE documentation: +# https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/index.html + +# The Solutions Organization will host the CSE Server and its intended to be used by CSE Administrators only. +# The TKGm clusters are NOT placed here. The attributes related to lease are set to unlimited, as the CSE +# Server should be always up and running in order to process requests. +resource "vcd_org" "solutions_organization" { + name = "solutions_org" + full_name = "Solutions Organization" + is_enabled = true + delete_force = true + delete_recursive = true + + vapp_lease { + maximum_runtime_lease_in_sec = 0 + power_off_on_runtime_lease_expiration = false + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } + + vapp_template_lease { + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } +} + +# The Tenant Organization will host the TKGm clusters and its intended to be used by tenants. +# The TKGm clusters must be placed here. The attributes related to lease are set to unlimited, as the TKGm clusters vApps +# should not be powered off. +resource "vcd_org" "tenant_organization" { + name = "tenant_org" + full_name = "Tenant Organization" + is_enabled = true + delete_force = true + delete_recursive = true + + vapp_lease { + maximum_runtime_lease_in_sec = 0 + power_off_on_runtime_lease_expiration = false + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } + + vapp_template_lease { + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } +} + +# This section will create one VDC per organization. To create the VDCs we need to fetch some elements like +# Provider VDC, Edge Clusters, etc. +data "vcd_provider_vdc" "nsxt_pvdc" { + name = var.provider_vdc_name +} + +data "vcd_nsxt_edge_cluster" "nsxt_edgecluster" { + org = vcd_org.tenant_organization.name + provider_vdc_id = data.vcd_provider_vdc.nsxt_pvdc.id + name = var.nsxt_edge_cluster_name +} + +# Fetch the VM Sizing Policies created in step 1 +data "vcd_vm_sizing_policy" "tkg_s" { + name = "TKG small" +} + +data "vcd_vm_sizing_policy" "tkg_m" { + name = "TKG medium" +} + +data "vcd_vm_sizing_policy" "tkg_l" { + name = "TKG large" +} + +data "vcd_vm_sizing_policy" "tkg_xl" { + name = "TKG extra-large" +} + +# The VDC that will host the Kubernetes clusters. +resource "vcd_org_vdc" "tenant_vdc" { + name = "tenant_vdc" + description = "Tenant VDC" + org = vcd_org.tenant_organization.name + + allocation_model = "AllocationVApp" # You can use other models. + network_pool_name = var.network_pool_name + provider_vdc_name = data.vcd_provider_vdc.nsxt_pvdc.name + edge_cluster_id = data.vcd_nsxt_edge_cluster.nsxt_edgecluster.id + + # You can tune these arguments to your fit your needs. + network_quota = 50 + compute_capacity { + cpu { + allocated = 0 + } + + memory { + allocated = 0 + } + } + + # You can tune these arguments to your fit your needs. + storage_profile { + name = "*" + limit = 0 + default = true + } + + # You can tune these arguments to your fit your needs. + enabled = true + enable_thin_provisioning = true + enable_fast_provisioning = true + delete_force = true + delete_recursive = true + + # Make sure you specify the required VM Sizing Policies managed by the data sources specified above. + default_compute_policy_id = data.vcd_vm_sizing_policy.tkg_s.id + vm_sizing_policy_ids = [ + data.vcd_vm_sizing_policy.tkg_xl.id, + data.vcd_vm_sizing_policy.tkg_l.id, + data.vcd_vm_sizing_policy.tkg_m.id, + data.vcd_vm_sizing_policy.tkg_s.id, + ] +} + +# The VDC that will host the CSE server and other provider-level items +resource "vcd_org_vdc" "solutions_vdc" { + name = "solutions_vdc" + description = "Solutions VDC" + org = vcd_org.solutions_organization.name + + allocation_model = "AllocationVApp" # You can use other models + network_pool_name = var.network_pool_name + provider_vdc_name = data.vcd_provider_vdc.nsxt_pvdc.name + edge_cluster_id = data.vcd_nsxt_edge_cluster.nsxt_edgecluster.id + + # You can tune these arguments to your fit your needs + network_quota = 10 + compute_capacity { + cpu { + allocated = 0 + } + + memory { + allocated = 0 + } + } + + # You can tune these arguments to your fit your needs + storage_profile { + name = "*" + limit = 0 + default = true + } + + # You can tune these arguments to your fit your needs + enabled = true + enable_thin_provisioning = true + enable_fast_provisioning = true + delete_force = true + delete_recursive = true +} + +# The networking setup specified below will configure one Provider Gateway + Edge Gateway + Routed network per +# organization. You can customise this section according to your needs. + +data "vcd_nsxt_manager" "cse_nsxt_manager" { + name = var.nsxt_manager_name +} + +data "vcd_nsxt_tier0_router" "solutions_tier0_router" { + name = var.solutions_nsxt_tier0_router_name + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id +} + +resource "vcd_external_network_v2" "solutions_tier0" { + name = "solutions_tier0" + + nsxt_network { + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id + nsxt_tier0_router_id = data.vcd_nsxt_tier0_router.solutions_tier0_router.id + } + + ip_scope { + gateway = var.solutions_provider_gateway_gateway_ip + prefix_length = var.solutions_provider_gateway_gateway_prefix_length + + dynamic "static_ip_pool" { + for_each = var.solutions_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +data "vcd_nsxt_tier0_router" "tenant_tier0_router" { + name = var.tenant_nsxt_tier0_router_name + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id +} + +resource "vcd_external_network_v2" "tenant_tier0" { + name = "tenant_tier0" + + nsxt_network { + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id + nsxt_tier0_router_id = data.vcd_nsxt_tier0_router.tenant_tier0_router.id + } + + ip_scope { + gateway = var.tenant_provider_gateway_gateway_ip + prefix_length = var.tenant_provider_gateway_gateway_prefix_length + + dynamic "static_ip_pool" { + for_each = var.tenant_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +# This Edge Gateway will consume automatically the available IPs from the Provider Gateway. +resource "vcd_nsxt_edgegateway" "solutions_edgegateway" { + org = vcd_org.solutions_organization.name + owner_id = vcd_org_vdc.solutions_vdc.id + + name = "solutions_edgegateway" + external_network_id = vcd_external_network_v2.solutions_tier0.id + + subnet { + gateway = var.solutions_provider_gateway_gateway_ip + prefix_length = var.solutions_provider_gateway_gateway_prefix_length + primary_ip = var.solutions_provider_gateway_static_ip_ranges[0][0] + + dynamic "allocated_ips" { + for_each = var.solutions_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +# This Edge Gateway will consume automatically the available IPs from the Provider Gateway. +resource "vcd_nsxt_edgegateway" "tenant_edgegateway" { + org = vcd_org.tenant_organization.name + owner_id = vcd_org_vdc.tenant_vdc.id + + name = "tenant_edgegateway" + external_network_id = vcd_external_network_v2.tenant_tier0.id + + subnet { + gateway = var.tenant_provider_gateway_gateway_ip + prefix_length = var.tenant_provider_gateway_gateway_prefix_length + primary_ip = var.tenant_provider_gateway_static_ip_ranges[0][0] + + dynamic "allocated_ips" { + for_each = var.tenant_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +# CSE requires ALB to be configured to support the LoadBalancers that are deployed by the CPI of VMware Cloud Director. +resource "vcd_nsxt_alb_controller" "cse_avi_controller" { + name = "cse_alb_controller" + username = var.alb_controller_username + password = var.alb_controller_password + url = var.alb_controller_url +} + +data "vcd_nsxt_alb_importable_cloud" "cse_importable_cloud" { + name = var.alb_importable_cloud_name + controller_id = vcd_nsxt_alb_controller.cse_avi_controller.id +} + +resource "vcd_nsxt_alb_cloud" "cse_nsxt_alb_cloud" { + name = "cse_nsxt_alb_cloud" + + controller_id = vcd_nsxt_alb_controller.cse_avi_controller.id + importable_cloud_id = data.vcd_nsxt_alb_importable_cloud.cse_importable_cloud.id + network_pool_id = data.vcd_nsxt_alb_importable_cloud.cse_importable_cloud.network_pool_id +} + +resource "vcd_nsxt_alb_service_engine_group" "cse_alb_seg" { + name = "cse_alb_seg" + alb_cloud_id = vcd_nsxt_alb_cloud.cse_nsxt_alb_cloud.id + importable_service_engine_group_name = "Default-Group" + reservation_model = "SHARED" +} + +# We introduce a sleep to wait for the provider part of ALB to be ready before the assignment to the Edge gateways +resource "time_sleep" "cse_alb_wait" { + depends_on = [vcd_nsxt_alb_service_engine_group.cse_alb_seg] + create_duration = "30s" +} + +## ALB for solutions edge gateway +resource "vcd_nsxt_alb_settings" "solutions_alb_settings" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + is_active = true + + # This dependency is required to make sure that provider part of operations is done + depends_on = [time_sleep.cse_alb_wait] +} + +resource "vcd_nsxt_alb_edgegateway_service_engine_group" "solutions_assignment" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_alb_settings.solutions_alb_settings.edge_gateway_id + service_engine_group_id = vcd_nsxt_alb_service_engine_group.cse_alb_seg.id + reserved_virtual_services = 50 + max_virtual_services = 50 +} + +resource "vcd_nsxt_alb_edgegateway_service_engine_group" "tenant_assignment" { + org = vcd_org.tenant_organization.name + edge_gateway_id = vcd_nsxt_alb_settings.tenant_alb_settings.edge_gateway_id + service_engine_group_id = vcd_nsxt_alb_service_engine_group.cse_alb_seg.id + reserved_virtual_services = 50 + max_virtual_services = 50 +} + +## ALB for tenant edge gateway +resource "vcd_nsxt_alb_settings" "tenant_alb_settings" { + org = vcd_org.tenant_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + is_active = true + + # This dependency is required to make sure that provider part of operations is done + depends_on = [time_sleep.cse_alb_wait] +} + +# We create a Routed network in the Solutions organization that will be used by the CSE Server. +resource "vcd_network_routed_v2" "solutions_routed_network" { + org = vcd_org.solutions_organization.name + name = "solutions_routed_network" + description = "Solutions routed network" + + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + + gateway = var.solutions_routed_network_gateway_ip + prefix_length = var.solutions_routed_network_prefix_length + + static_ip_pool { + start_address = var.solutions_routed_network_ip_pool_start_address + end_address = var.solutions_routed_network_ip_pool_end_address + } + + dns1 = var.solutions_routed_network_dns + dns_suffix = var.solutions_routed_network_dns_suffix +} + +# We create a Routed network in the Tenant organization that will be used by the Kubernetes clusters. +resource "vcd_network_routed_v2" "tenant_routed_network" { + org = vcd_org.tenant_organization.name + name = "tenant_net_routed" + description = "Routed network for the K8s clusters" + + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + + gateway = var.tenant_routed_network_gateway_ip + prefix_length = var.tenant_routed_network_prefix_length + + static_ip_pool { + start_address = var.tenant_routed_network_ip_pool_start_address + end_address = var.tenant_routed_network_ip_pool_end_address + } + + dns1 = var.tenant_routed_network_dns + dns_suffix = var.tenant_routed_network_dns_suffix +} + +# We need SNAT rules in both networks to provide with Internet connectivity. +resource "vcd_nsxt_nat_rule" "solutions_nat" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + + name = "Solutions SNAT rule" + rule_type = "SNAT" + description = "Solutions SNAT rule" + + external_address = var.solutions_snat_external_ip + internal_address = var.solutions_snat_internal_network_cidr + logging = true +} + +resource "vcd_nsxt_nat_rule" "tenant_nat" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + + name = "Tenant SNAT rule" + rule_type = "SNAT" + description = "Tenant SNAT rule" + + external_address = var.tenant_snat_external_ip + internal_address = var.tenant_snat_internal_network_cidr + logging = true +} + +# WARNING: Please adjust this rule to your needs. The CSE Server requires Internet access to be configured. +resource "vcd_nsxt_firewall" "solutions_firewall" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + + rule { + action = "ALLOW" + name = "Allow all traffic" + direction = "IN_OUT" + ip_protocol = "IPV4_IPV6" + } +} + +# WARNING: Please adjust this rule to your needs. The Bootstrap clusters and final Kubernetes clusters require Internet access to be configured. +resource "vcd_nsxt_firewall" "tenant_firewall" { + org = vcd_org.tenant_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + + rule { + action = "ALLOW" + name = "Allow all traffic" + direction = "IN_OUT" + ip_protocol = "IPV4_IPV6" + } +} diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf new file mode 100644 index 000000000..75b5015c1 --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf @@ -0,0 +1,75 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# In this section we create two Catalogs, one to host all CSE Server OVAs and another one to host TKGm OVAs. +# They are created in the Solutions organization and only the TKGm will be shared as read-only. This will guarantee +# that only CSE admins can manage OVAs. +resource "vcd_catalog" "cse_catalog" { + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + name = "cse_catalog" + + delete_force = "true" + delete_recursive = "true" + + # In this example, everything is created from scratch, so it is needed to wait for the VDC to be available, so the + # Catalog can be created. + depends_on = [ + vcd_org_vdc.solutions_vdc + ] +} + +resource "vcd_catalog" "tkgm_catalog" { + org = vcd_org.solutions_organization.name # References the Solutions Organization + name = "tkgm_catalog" + + delete_force = "true" + delete_recursive = "true" + + # In this example, everything is created from scratch, so it is needed to wait for the VDC to be available, so the + # Catalog can be created. + depends_on = [ + vcd_org_vdc.solutions_vdc + ] +} + +# We share the TKGm Catalog with the Tenant Organization created previously. +resource "vcd_catalog_access_control" "tkgm_catalog_ac" { + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + catalog_id = vcd_catalog.tkgm_catalog.id + shared_with_everyone = false + shared_with { + org_id = vcd_org.tenant_organization.id # Shared with the Tenant Organization + access_level = "ReadOnly" + } +} + +# We upload a minimum set of OVAs for CSE to work. Read the official documentation to check +# where to find the OVAs: +# https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/index.html +resource "vcd_catalog_vapp_template" "tkgm_ova" { + for_each = toset(var.tkgm_ova_files) + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + catalog_id = vcd_catalog.tkgm_catalog.id # References the TKGm Catalog created previously + + name = replace(each.key, ".ova", "") + description = replace(each.key, ".ova", "") + ova_path = format("%s/%s", var.tkgm_ova_folder, each.key) +} + +resource "vcd_catalog_vapp_template" "cse_ova" { + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog created previously + + name = replace(var.cse_ova_file, ".ova", "") + description = replace(var.cse_ova_file, ".ova", "") + ova_path = format("%s/%s", var.cse_ova_folder, var.cse_ova_file) +} + diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf new file mode 100644 index 000000000..69a232905 --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf @@ -0,0 +1,105 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# Log in to VCD with the cse_admin username created above. This will be used to provision +# an API token that must be consumed by the CSE Server. +# This should not be changed. +provider "vcd" { + alias = "cse_admin" + url = "${var.vcd_url}/api" + user = var.cse_admin_username + password = var.cse_admin_password + auth_type = "integrated" + org = var.administrator_org + allow_unverified_ssl = var.insecure_login + logging = true + logging_file = "cse_install_cse_admin.log" +} + +# Generates an API token for the CSE Admin user, that will be used to instantiate the CSE Server. +# This should not be changed. +resource "vcd_api_token" "cse_admin_token" { + provider = vcd.cse_admin + name = "CSE Admin API Token" + file_name = var.cse_admin_api_token_file + allow_token_file = true +} + +data "local_file" "cse_admin_token_file" { + filename = vcd_api_token.cse_admin_token.file_name +} + +# This is the CSE Server vApp +resource "vcd_vapp" "cse_server_vapp" { + org = vcd_org.solutions_organization.name + vdc = vcd_org_vdc.solutions_vdc.name + name = "CSE Server vApp" + + lease { + runtime_lease_in_sec = 0 + storage_lease_in_sec = 0 + } +} + +# The CSE Server vApp network that will consume an existing routed network from +# the solutions organization. +resource "vcd_vapp_org_network" "cse_server_network" { + org = vcd_org.solutions_organization.name + vdc = vcd_org_vdc.solutions_vdc.name + + vapp_name = vcd_vapp.cse_server_vapp.name + org_network_name = vcd_network_routed_v2.solutions_routed_network.name + + reboot_vapp_on_removal = true +} + +# The CSE Server VM. It requires guest properties to be introduced for it to work +# properly. You can troubleshoot it by checking the cse.log file. +resource "vcd_vapp_vm" "cse_server_vm" { + org = vcd_org.solutions_organization.name + vdc = vcd_org_vdc.solutions_vdc.name + + vapp_name = vcd_vapp.cse_server_vapp.name + name = "CSE Server VM" + + vapp_template_id = vcd_catalog_vapp_template.cse_ova.id + + network { + type = "org" + name = vcd_vapp_org_network.cse_server_network.org_network_name + ip_allocation_mode = "POOL" + } + + guest_properties = { + + # VCD host + "cse.vcdHost" = var.vcd_url + + # CSE Server org + "cse.vAppOrg" = vcd_org.solutions_organization.name + + # CSE admin account's Access Token + "cse.vcdRefreshToken" = jsondecode(data.local_file.cse_admin_token_file.content)["refresh_token"] + + # CSE admin account's username + "cse.vcdUsername" = var.cse_admin_username + + # CSE admin account's org + "cse.userOrg" = vcd_org.solutions_organization.name + } + + customization { + force = false + enabled = true + allow_local_admin_password = true + auto_generate_password = true + } +} diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf new file mode 100644 index 000000000..ed0354665 --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2 installation: +# +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# This resource installs the UI Plugin. It can be useful for tenant users that are not familiar with +# Terraform. +resource "vcd_ui_plugin" "k8s_container_clusters_ui_plugin" { + count = var.k8s_container_clusters_ui_plugin_path == "" ? 0 : 1 + plugin_path = var.k8s_container_clusters_ui_plugin_path + enabled = true + tenant_ids = [ + data.vcd_org.system_org.id, + vcd_org.solutions_organization.id, + vcd_org.tenant_organization.id, + ] +} + +data "vcd_org" "system_org" { + name = var.administrator_org +} diff --git a/examples/container-service-extension/v4.2/install/step2/terraform.tfvars.example b/examples/container-service-extension/v4.2/install/step2/terraform.tfvars.example new file mode 100644 index 000000000..204a477bf --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step2/terraform.tfvars.example @@ -0,0 +1,106 @@ +# Change configuration to your needs and rename to 'terraform.tfvars' +# For more details about the variables specified here, please read the guide first: +# https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install + +# ------------------------------------------------ +# VCD Provider config +# ------------------------------------------------ + +vcd_url = "https://vcd.my-awesome-corp.com" +administrator_user = "administrator" +administrator_password = "change-me" +administrator_org = "System" +insecure_login = "false" + +# ------------------------------------------------ +# Infrastructure +# ------------------------------------------------ + +# These variables are required to create both the Solutions NSX-T VDC and Tenant NSX-T VDC +# The values here need to exist already in your VCD appliance. +provider_vdc_name = "change-me" # Name of an existing PVDC that can be used to create VDCs +nsxt_edge_cluster_name = "change-me" # Name of an existing NSX-T Edge Cluster that can be used to create VDCs +network_pool_name = "change-me" # Name of an existing network pool that can be used to create VDCs + +# These variables are used to build a basic networking setup to run the CSE Server +# and the TKGm clusters +nsxt_manager_name = "change-me" # Name of an existing NSX-T manager, required to create the Provider Gateways + +# These are all required to create the Solutions Organization Provider Gateway. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +solutions_nsxt_tier0_router_name = "change-me" # The name of an existing NSX-T Tier 0 router +solutions_provider_gateway_gateway_ip = "10.20.30.250" # Gateway IP to use in the Solutions Provider Gateway +solutions_provider_gateway_gateway_prefix_length = "19" # Prefix length to use in the Solutions Provider Gateway +solutions_provider_gateway_static_ip_ranges = [ # IP ranges to use in the Solutions Provider Gateway + ["10.20.30.16", "10.20.30.16"], # Single IP + ["10.20.30.20", "10.20.30.25"], # Many IPs +] + +# These are all required to create the Tenant Organization Provider Gateway. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +tenant_nsxt_tier0_router_name = "change-me" # The name of an existing NSX-T Tier 0 router +tenant_provider_gateway_gateway_ip = "10.30.20.150" # Gateway IP to use in the Tenant Provider Gateway +tenant_provider_gateway_gateway_prefix_length = "19" # Prefix length to use in the Tenant Provider Gateway +tenant_provider_gateway_static_ip_ranges = [ # IP ranges to use in the Tenant Provider Gateway + ["10.30.20.14", "10.30.20.14"], # Single IP + ["10.30.20.30", "10.30.20.37"], # Many IPs +] + +# These will configure the Routed network for the Solutions Organization VDC. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +solutions_routed_network_gateway_ip = "192.168.0.1" # Required. Gateway IP for the Routed network inside the Solutions Organization +solutions_routed_network_prefix_length = "24" # Required. Prefix length for the Routed network inside the Solutions Organization +solutions_routed_network_ip_pool_start_address = "192.168.0.2" # Required. First IP for the Routed network pool +solutions_routed_network_ip_pool_end_address = "192.168.0.254" # Required. Last IP for the Routed network pool +solutions_snat_external_ip = "10.20.30.25" # Required. For example, pick the last IP from solutions_provider_gateway_static_ip_ranges +solutions_snat_internal_network_cidr = "192.168.0.0/24" # Required. It should match the Routed network IP addresses +solutions_routed_network_dns = "" # Optional, if you need DNS +solutions_routed_network_dns_suffix = "" # Optional, if you need DNS + +# These will configure the Routed network for the Tenant Organization VDC. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +tenant_routed_network_gateway_ip = "10.0.0.1" # Required. Gateway IP for the Routed network inside the Tenant Organization +tenant_routed_network_prefix_length = "16" # Required. Prefix length for the Routed network inside the Tenant Organization +tenant_routed_network_ip_pool_start_address = "10.0.0.2" # Required. First IP for the Routed network pool +tenant_routed_network_ip_pool_end_address = "10.0.255.254" # Required. Last IP for the Routed network pool +tenant_snat_external_ip = "10.30.20.37" # Required. For example, pick the last IP from tenant_provider_gateway_static_ip_ranges +tenant_snat_internal_network_cidr = "10.0.0.0/16" # Required. It should match the Routed network IP addresses +tenant_routed_network_dns = "" # Optional, if you need DNS +tenant_routed_network_dns_suffix = "" # Optional, if you need DNS + +# These are required to create a new ALB setup in VCD that will be used by TKGm clusters. +# Your VCD should have an existing ALB deployment that will be imported, the values below must correspond to +# the existing controller to be imported into VCD: +alb_controller_username = "admin" # Username to access the ALB Controller +alb_controller_password = "change-me" # Password to access the ALB Controller +alb_controller_url = "https://alb-ctrl.my-awesome-corp.com" # URL of the ALB Controller +alb_importable_cloud_name = "change-me" # Name of the Cloud to import to create a Service Engine Group + +# ------------------------------------------------ +# Catalog and OVAs +# ------------------------------------------------ + +# These variables are required to upload the necessary OVAs to the Solutions Organization shared catalog. +# You can find the download links in the guide referenced at the top of this file. +tkgm_ova_folder = "/home/changeme/tkgm-folder" # An existing absolute path to a folder containing TKGm OVAs +tkgm_ova_files = [ # Existing TKGm OVAs + "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc.ova" +] +cse_ova_folder = "/home/changeme/cse-folder" # An existing absolute path to a folder containing CSE Server OVAs +cse_ova_file = "VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova" # An existing CSE Server OVA + +# ------------------------------------------------ +# CSE Server initialization +# ------------------------------------------------ + +cse_admin_username = "cse_admin" # This must be the same user created in step 1 +cse_admin_password = "change-me" # This must be the same password of the user created in step 1 +cse_admin_api_token_file = "cse_admin_api_token.json" # This file will contain the API token of the CSE Admin user, store it carefully. + +# ------------------------------------------------ +# Other configuration +# ------------------------------------------------ +# This path points to the .zip file that contains the bundled Kubernetes Container Clusters UI Plugin. +# It is optional: if not set, it won't be installed. +# Remember to remove older CSE UI plugins if present (for example 3.x plugins) before installing this one. +k8s_container_clusters_ui_plugin_path = "/home/change-me/container-ui-plugin-4.1.zip" diff --git a/examples/container-service-extension/v4.2/install/step2/variables.tf b/examples/container-service-extension/v4.2/install/step2/variables.tf new file mode 100644 index 000000000..0836d3e69 --- /dev/null +++ b/examples/container-service-extension/v4.2/install/step2/variables.tf @@ -0,0 +1,255 @@ +# ------------------------------------------------ +# Provider config +# ------------------------------------------------ + +variable "vcd_url" { + description = "The VCD URL (Example: 'https://vcd.my-company.com')" + type = string +} + +variable "insecure_login" { + description = "Allow unverified SSL connections when operating with VCD" + type = bool + default = false +} + +variable "administrator_user" { + description = "The VCD administrator user (Example: 'administrator')" + default = "administrator" + type = string +} + +variable "administrator_password" { + description = "The VCD administrator password" + type = string + sensitive = true +} + +variable "administrator_org" { + description = "The VCD administrator organization (Example: 'System')" + type = string + default = "System" +} + +# ------------------------------------------------ +# Infrastructure +# ------------------------------------------------ + +variable "provider_vdc_name" { + description = "The Provider VDC that will be used to create the required VDCs" + type = string +} + +variable "nsxt_edge_cluster_name" { + description = "The NSX-T Edge Cluster name, that relates to the specified Provider VDC" + type = string +} + +variable "network_pool_name" { + description = "The network pool to be used on VDC creation" + type = string +} + +variable "nsxt_manager_name" { + description = "NSX-T manager name, required to create the Provider Gateways" + type = string +} + +variable "solutions_nsxt_tier0_router_name" { + description = "Name of an existing NSX-T tier-0 router to create the Solutions Provider Gateway" + type = string +} + +variable "solutions_provider_gateway_gateway_ip" { + description = "Gateway IP for the Solutions Provider Gateway" + type = string +} + +variable "solutions_provider_gateway_gateway_prefix_length" { + description = "Prefix length for the Solutions Provider Gateway" + type = string +} + +variable "solutions_provider_gateway_static_ip_ranges" { + type = list(list(string)) + description = "List of pairs of public IPs for the Solutions Provider Gateway" +} + +variable "tenant_nsxt_tier0_router_name" { + description = "Name of an existing NSX-T tier-0 router to create the Tenant Provider Gateway" + type = string +} + +variable "tenant_provider_gateway_gateway_ip" { + description = "Gateway IP for the Tenant Provider Gateway" + type = string +} + +variable "tenant_provider_gateway_gateway_prefix_length" { + description = "Prefix length for the Tenant Provider Gateway" + type = string +} + +variable "tenant_provider_gateway_static_ip_ranges" { + type = list(list(string)) + description = "List of pairs of public IPs for the Tenant Provider Gateway" +} + +variable "solutions_routed_network_gateway_ip" { + description = "Gateway IP for the Solutions routed network" + type = string +} + +variable "solutions_routed_network_prefix_length" { + description = "Prefix length for the Solutions routed network" + type = string +} + +variable "solutions_routed_network_ip_pool_start_address" { + description = "Start address for the IP pool of the Solutions routed network" + type = string +} + +variable "solutions_routed_network_ip_pool_end_address" { + description = "End address for the IP pool of the Solutions routed network" + type = string +} + +variable "solutions_snat_external_ip" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the external IP, which should be one of the Provider Gateway available IPs" + type = string +} + +variable "solutions_snat_internal_network_cidr" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the internal subnet CIDR, which should correspond to the routed network IPs" + type = string +} + +variable "solutions_routed_network_dns" { + description = "Custom DNS server IP to use for the Solutions routed network" + type = string + default = "" +} + +variable "solutions_routed_network_dns_suffix" { + description = "Custom DNS suffix to use for the Solutions routed network" + type = string + default = "" +} + +variable "tenant_routed_network_gateway_ip" { + description = "Gateway IP for the Tenant routed network" + type = string +} + +variable "tenant_routed_network_prefix_length" { + description = "Prefix length for the Tenant routed network" + type = string +} + +variable "tenant_routed_network_ip_pool_start_address" { + description = "Start address for the IP pool of the Tenant routed network" + type = string +} + +variable "tenant_routed_network_ip_pool_end_address" { + description = "End address for the IP pool of the Tenant routed network" + type = string +} + +variable "tenant_snat_external_ip" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the external IP, which should be one of the Provider Gateway available IPs" + type = string +} + +variable "tenant_snat_internal_network_cidr" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the internal subnet CIDR, which should correspond to the routed network IPs" + type = string +} + +variable "tenant_routed_network_dns" { + description = "Custom DNS server IP to use for the Tenant routed network" + type = string + default = "" +} + +variable "tenant_routed_network_dns_suffix" { + description = "Custom DNS suffix to use for the Tenant routed network" + type = string + default = "" +} + +variable "alb_controller_username" { + description = "The user to create an ALB Controller with" + type = string +} + +variable "alb_controller_password" { + description = "The password for the user that will be used to create the ALB Controller" + type = string +} + +variable "alb_controller_url" { + description = "The URL to create the ALB Controller" + type = string +} + +variable "alb_importable_cloud_name" { + description = "Name of an available importable cloud to be able to create an ALB NSX-T Cloud" + type = string +} + +# ------------------------------------------------ +# Catalog and OVAs +# ------------------------------------------------ + +variable "tkgm_ova_folder" { + description = "Absolute path to the TKGm OVA files, with no file name (Example: '/home/bob/Downloads/tkgm')" + type = string +} + +variable "tkgm_ova_files" { + description = "A set of TKGm OVA file names, with no path (Example: 'ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc.ova')" + type = set(string) +} + +variable "cse_ova_folder" { + description = "Absolute path to the CSE OVA file, with no file name (Example: '/home/bob/Downloads/cse')" + type = string +} + +variable "cse_ova_file" { + description = "CSE OVA file name, with no path (Example: 'VMware_Cloud_Director_Container_Service_Extension-4.0.1.62-21109756.ova')" + type = string +} + +# ------------------------------------------------ +# CSE Server initialization +# ------------------------------------------------ + +variable "cse_admin_username" { + description = "The CSE administrator user that was created in step 1" + type = string +} + +variable "cse_admin_password" { + description = "The password to set for the CSE administrator user that was created in step 1" + type = string + sensitive = true +} + +variable "cse_admin_api_token_file" { + description = "The file where the API Token for the CSE Administrator will be stored" + type = string + default = "cse_admin_api_token.json" +} + +# ------------------------------------------------ +# Other configuration +# ------------------------------------------------ + +variable "k8s_container_clusters_ui_plugin_path" { + type = string + description = "Path to the Kubernetes Container Clusters UI Plugin zip file" + default = "" +} diff --git a/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json b/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json new file mode 100644 index 000000000..f4b9db1f2 --- /dev/null +++ b/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json @@ -0,0 +1,727 @@ +{ + "definitions": { + "k8sNetwork": { + "type": "object", + "description": "The network-related settings for the cluster.", + "properties": { + "pods": { + "type": "object", + "description": "The network settings for Kubernetes pods.", + "properties": { + "cidrBlocks": { + "type": "array", + "description": "Specifies a range of IP addresses to use for Kubernetes pods.", + "items": { + "type": "string" + } + } + } + }, + "services": { + "type": "object", + "description": "The network settings for Kubernetes services", + "properties": { + "cidrBlocks": { + "type": "array", + "description": "The range of IP addresses to use for Kubernetes services", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "type": "object", + "required": [ + "kind", + "metadata", + "apiVersion", + "spec" + ], + "properties": { + "kind": { + "enum": [ + "CAPVCDCluster" + ], + "type": "string", + "description": "The kind of the Kubernetes cluster.", + "title": "The kind of the Kubernetes cluster.", + "default": "CAPVCDCluster" + }, + "spec": { + "type": "object", + "properties": { + "capiYaml": { + "type": "string", + "title": "CAPI yaml", + "description": "User specification of the CAPI yaml; It is user's responsibility to embed the correct CAPI yaml generated as per instructions - https://github.com/vmware/cluster-api-provider-cloud-director/blob/main/docs/CLUSTERCTL.md#generate-cluster-manifests-for-workload-cluster" + }, + "yamlSet": { + "type": "array", + "items": { + "type": "string" + }, + "title": "User specified K8s Yaml strings", + "description": "User specified K8s Yaml strings to be applied on the target cluster. The component Projector will process this property periodically." + }, + "projector": { + "type": "object", + "x-vcloud-restricted": "private", + "title": "User specification for Projector component", + "description": "Defines the operations to be executed by the component projector", + "properties": { + "operations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "verb": { + "type": "string", + "enum": [ + "apply", + "create" + ], + "title": "Kubernetes command of the operation", + "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" + }, + "name": { + "type": "string", + "title": "Name of the operation", + "description": "The name of the operation, if applicable." + }, + "valueType": { + "type": "string", + "enum": [ + "yamlLink", + "yamlString" + ], + "title": "Value type of the operation", + "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." + }, + "values": { + "type": "array", + "x-vcloud-restricted": [ + "private", + "secure" + ], + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ] + }, + "title": "Value of the operation", + "description": "Array of values used for the operation.Type of the values must be consistent with the valueType" + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "minimum": 1, + "description": "Specifies the sequence/order in which the operation should be executed." + }, + "retryUntilSuccess": { + "type": "boolean", + "title": "Operation will be retried until it succeeds", + "description": "Operation will be retried until it succeeds", + "default": false + } + }, + "required": [ + "verb", + "values", + "valueType", + "sequence" + ] + }, + "title": "Operations to be executed by the component projector", + "description": "User-specified operations to be applied on the target cluster. " + } + } + }, + "vcdKe": { + "type": "object", + "properties": { + "isVCDKECluster": { + "type": "boolean", + "title": "User's intent to have this specification processed by VCDKE", + "description": "Does user wants this specification to be processed by the VCDKE component of CSE stack?" + }, + "markForDelete": { + "type": "boolean", + "title": "User's intent to delete the cluster", + "description": "Mark the cluster for deletion", + "default": false + }, + "autoRepairOnErrors": { + "type": "boolean", + "title": "User's intent to let the VCDKE repair/recreate the cluster", + "description": "User's intent to let the VCDKE repair/recreate the cluster on any errors during cluster creation", + "default": true + }, + "forceDelete": { + "type": "boolean", + "title": "User's intent to delete the cluster forcefully", + "description": "User's intent to delete the cluster forcefully", + "default": false + }, + "defaultStorageClassOptions": { + "type": "object", + "properties": { + "vcdStorageProfileName": { + "type": "string", + "title": "Name of the VCD storage profile", + "description": "Name of the VCD storage profile" + }, + "k8sStorageClassName": { + "type": "string", + "title": "Name of the Kubernetes storage class to be created", + "description": "Name of the Kubernetes storage class to be created" + }, + "useDeleteReclaimPolicy": { + "type": "boolean", + "title": "Reclaim policy of the Kubernetes storage class", + "description": "Reclaim policy of the Kubernetes storage class" + }, + "fileSystem": { + "type": "string", + "title": "Default file System of the volumes", + "description": "Default file System of the volumes to be created from the default storage class" + } + }, + "title": "Default Storage class options to be set on the target cluster", + "description": "Default Storage class options to be set on the target cluster" + }, + "secure": { + "type": "object", + "x-vcloud-restricted": [ + "private", + "secure" + ], + "properties": { + "apiToken": { + "type": "string", + "title": "API Token (Refresh Token) of the user", + "description": "API Token (Refresh Token) of the user." + } + }, + "title": "Encrypted data", + "description": "Fields under this section will be encrypted" + } + }, + "title": "User specification for VCDKE component", + "description": "User specification for VCDKE component" + } + }, + "title": "User specification for the cluster", + "description": "User specification for the cluster" + }, + "metadata": { + "type": "object", + "properties": { + "orgName": { + "type": "string", + "description": "The name of the Organization in which cluster needs to be created or managed.", + "title": "The name of the Organization in which cluster needs to be created or managed." + }, + "virtualDataCenterName": { + "type": "string", + "description": "The name of the Organization data center in which the cluster need to be created or managed.", + "title": "The name of the Organization data center in which the cluster need to be created or managed." + }, + "name": { + "type": "string", + "description": "The name of the cluster.", + "title": "The name of the cluster." + }, + "site": { + "type": "string", + "description": "Fully Qualified Domain Name (https://VCD-FQDN.com) of the VCD site in which the cluster is deployed", + "title": "Fully Qualified Domain Name of the VCD site in which the cluster is deployed" + } + }, + "title": "User specification of the metadata of the cluster", + "description": "User specification of the metadata of the cluster" + }, + "status": { + "type": "object", + "x-vcloud-restricted": "protected", + "properties": { + "capvcd": { + "type": "object", + "properties": { + "phase": { + "type": "string" + }, + "kubernetes": { + "type": "string" + }, + "errorSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "eventSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "k8sNetwork": { + "$ref": "#/definitions/k8sNetwork" + }, + "uid": { + "type": "string" + }, + "parentUid": { + "type": "string" + }, + "useAsManagementCluster": { + "type": "boolean" + }, + "clusterApiStatus": { + "type": "object", + "properties": { + "phase": { + "type": "string", + "description": "The phase describing the control plane infrastructure deployment." + }, + "apiEndpoints": { + "type": "array", + "description": "Control Plane load balancer endpoints", + "items": { + "host": { + "type": "string" + }, + "port": { + "type": "integer" + } + } + } + } + }, + "nodePool": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "name of the node pool" + }, + "sizingPolicy": { + "type": "string", + "description": "name of the sizing policy used by the node pool" + }, + "placementPolicy": { + "type": "string", + "description": "name of the sizing policy used by the node pool" + }, + "diskSizeMb": { + "type": "integer", + "description": "disk size of the VMs in the node pool in MB" + }, + "nvidiaGpuEnabled": { + "type": "boolean", + "description": "boolean indicating if the node pools have nvidia GPU enabled" + }, + "storageProfile": { + "type": "string", + "description": "storage profile used by the node pool" + }, + "desiredReplicas": { + "type": "integer", + "description": "desired replica count of the nodes in the node pool" + }, + "availableReplicas": { + "type": "integer", + "description": "number of available replicas in the node pool" + } + } + } + }, + "clusterResourceSet": { + "properties": {}, + "type": "object" + }, + "clusterResourceSetBindings": { + "type": "array", + "items": { + "type": "object", + "properties": { + "clusterResourceSetName": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "name": { + "type": "string" + }, + "applied": { + "type": "boolean" + }, + "lastAppliedTime": { + "type": "string" + } + } + } + }, + "capvcdVersion": { + "type": "string" + }, + "vcdProperties": { + "type": "object", + "properties": { + "organizations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "id": { + "type": "string" + } + } + } + }, + "site": { + "type": "string" + }, + "orgVdcs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "id": { + "type": "string" + }, + "ovdcNetworkName": { + "type": "string" + } + } + } + } + } + }, + "upgrade": { + "type": "object", + "description": "determines the state of upgrade. If no upgrade is issued, only the existing version is stored.", + "properties": { + "current": { + "type": "object", + "properties": { + "kubernetesVersion": { + "type": "string", + "description": "current kubernetes version of the cluster. If being upgraded, will represent target kubernetes version of the cluster." + }, + "tkgVersion": { + "type": "string", + "description": "current TKG version of the cluster. If being upgraded, will represent the tarkget TKG version of the cluster." + } + } + }, + "previous": { + "type": "object", + "properties": { + "kubernetesVersion": { + "type": "string", + "description": "the kubernetes version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source kubernetes version from which the cluster is being upgraded." + }, + "tkgVersion": { + "type": "string", + "description": "the TKG version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source TKG versoin from which the cluster is being upgraded." + } + } + }, + "ready": { + "type": "boolean", + "description": "boolean indicating the status of the cluster upgrade." + } + } + }, + "private": { + "type": "object", + "x-vcloud-restricted": [ + "private", + "secure" + ], + "description": "Placeholder for the properties invisible and secure to non-admin users.", + "properties": { + "kubeConfig": { + "type": "string", + "description": "Kube config to access the Kubernetes cluster." + } + } + }, + "vcdResourceSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "createdByVersion": { + "type": "string", + "description": "CAPVCD version used to create the cluster" + } + }, + "title": "CAPVCD's view of the current status of the cluster", + "description": "CAPVCD's view of the current status of the cluster" + }, + "vcdKe": { + "type": "object", + "properties": { + "state": { + "type": "string", + "title": "VCDKE's view of the current state of the cluster", + "description": "VCDKE's view of the current state of the cluster - provisioning/provisioned/error" + }, + "vcdKeVersion": { + "type": "string", + "title": "VCDKE/CSE product version", + "description": "The VCDKE version with which the cluster is originally created" + }, + "defaultStorageClass": { + "type": "object", + "properties": { + "vcdStorageProfileName": { + "type": "string", + "title": "Name of the VCD storage profile", + "description": "Name of the VCD storage profile" + }, + "k8sStorageClassName": { + "type": "string", + "title": "Name of the Kubernetes storage class to be created", + "description": "Name of the Kubernetes storage class to be created" + }, + "useDeleteReclaimPolicy": { + "type": "boolean", + "title": "Reclaim policy of the Kubernetes storage class", + "description": "Reclaim policy of the Kubernetes storage class" + }, + "fileSystem": { + "type": "string", + "title": "Default file System of the volumes", + "description": "Default file System of the volumes to be created from the default storage class" + } + }, + "title": "Default Storage class options to be set on the target cluster", + "description": "Default Storage class options to be set on the target cluster" + } + }, + "title": "VCDKE's view of the current status of the cluster", + "description": "Current status of the cluster from VCDKE's point of view" + }, + "cpi": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the Cloud Provider Interface", + "description": "Name of the CPI" + }, + "version": { + "type": "string", + "title": "Product version of the CPI", + "description": "Product version of the CPI" + } + }, + "title": "CPI for VCD's view of the current status of the cluster", + "description": "CPI for VCD's view of the current status of the cluster" + }, + "projector": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Projector Name", + "description": "The name of the projector component." + }, + "version": { + "type": "string", + "title": "Projector Version", + "description": "The product version of the projector component." + }, + "errorSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + }, + "title": "Error Set", + "description": "An array containing error information related to the operations of the projector component." + }, + "eventSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + }, + "title": "Event Set", + "description": "An array containing event information related to the operations of the projector component." + }, + "lastAppliedSequence": { + "type": "integer", + "minimum": 1, + "default": 1, + "title": "Last Applied Sequence", + "description": "The sequence number of the last applied operation in the projector component." + }, + "lastAppliedTimestamp": { + "type": "string", + "title": "Last Applied Timestamp", + "description": "The timestamp of the last applied operation in the projector component." + }, + "operations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "verb": { + "type": "string", + "enum": [ + "apply", + "create" + ], + "title": "Kubernetes command of the operation", + "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" + }, + "name": { + "type": "string", + "title": "Name of the operation", + "description": "The name of the operation, if applicable." + }, + "valueType": { + "type": "string", + "enum": [ + "yamlLink", + "yamlString" + ], + "title": "Value type the operation", + "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "description": "Specifies the sequence/order in which the operation should be executed." + }, + "forceDelete": { + "type": "boolean", + "title": "Flag which indicates whether the operation should be forcefully deleted.", + "description": "Indicates whether the operation should be forcefully deleted." + }, + "output": { + "type": "string", + "title": "Output", + "description": "The execution output of the operation." + } + }, + "title": "Operation Status", + "description": "Status of a specific operation executed in the projector component." + }, + "title": "Operation Status of Projector after Execution", + "description": "An array containing the status of operations executed in the projector component." + }, + "retrySet": { + "type": "array", + "items": { + "type": "object", + "properties": { + "operation": { + "type": "object", + "properties": { + "verb": { + "type": "string", + "enum": [ + "apply", + "create" + ], + "title": "Kubernetes command of the operation", + "description": "Kubernetes command of the operation" + }, + "name": { + "type": "string", + "title": "Name of the operation", + "description": "Name of the operation" + }, + "valueType": { + "type": "string", + "enum": [ + "yamlLink", + "yamlString" + ], + "title": "Value type of the operation", + "description": "Value type of the operation" + }, + "values": { + "type": "array", + "x-vcloud-restricted": [ + "private", + "secure" + ], + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ] + }, + "title": "Value of the operation", + "description": "Array of values used for the operation.Type of the values must be consistent with the valueType" + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "description": "Sequence number of the operation" + }, + "retryUntilSuccess": { + "type": "boolean", + "title": "Operation will be retried until it succeeds", + "description": "Operation will be retried until it succeeds" + } + }, + "title": "Spec of the operation to be retried", + "description": "Spec of the operation to be retried" + }, + "createTimeStamp": { + "type": "string", + "title": "The timestamp at which this operation failed for the first time", + "description": "The timestamp at which this operation failed for the first time" + } + } + }, + "title": "The operations to be retried by the Projector", + "description": "The operations to be retried by the Projector" + } + }, + "title": "Current Status of the Projector Component", + "description": "Current status of the projector component. It reflects the operation execution status of the projector component." + } + }, + "title": "Current status of the cluster", + "description": "Current status of the cluster. The subsections are updated by various components of CSE stack - VCDKE, Projector, CAPVCD, CPI, CSI and Extensions" + }, + "apiVersion": { + "type": "string", + "default": "capvcd.vmware.com/v1.2", + "description": "The version of the payload format" + } + } +} diff --git a/examples/container-service-extension/v4.2/schemas/vcdkeconfig-type-schema-v1.1.0.json b/examples/container-service-extension/v4.2/schemas/vcdkeconfig-type-schema-v1.1.0.json new file mode 100644 index 000000000..1f721919a --- /dev/null +++ b/examples/container-service-extension/v4.2/schemas/vcdkeconfig-type-schema-v1.1.0.json @@ -0,0 +1,323 @@ +{ + "type": "object", + "properties": { + "profiles": { + "type": "array", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "active": { + "type": "boolean" + }, + "vcdKeInstances": { + "type": "array", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "4.1.0" + }, + "vcdKeInstanceId": { + "type": "string" + } + } + } + ] + }, + "serverConfig": { + "type": "object", + "properties": { + "rdePollIntervalInMin": { + "type": "integer", + "description": "Server polls and processes the RDEs for every #rdePollIntervalInMin minutes." + }, + "heartbeatWatcherTimeoutInMin": { + "type": "integer", + "description": "The watcher thread kills itself if it does not receive heartbeat with in #heartbeatWatcherTimeoutInMin from the associated worker thread. Eventually worker also dies off as it can no longer post to the already closed heartbeat channel." + }, + "staleHeartbeatIntervalInMin": { + "type": "integer", + "description": "New worker waits for about #staleHeartbeatIntervalinMin before it calls the current heartbeat stale and picks up the RDE. The value must always be greater than #heartbeatWatcherTimeoutInmin" + } + } + }, + "vcdConfig": { + "type": "object", + "properties": { + "sysLogger": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "port": { + "type": "string" + } + }, + "required": [ + "host", + "port" + ] + } + } + }, + "githubConfig": { + "type": "object", + "properties": { + "githubPersonalAccessToken": { + "type": "string" + } + } + }, + "bootstrapClusterConfig": { + "type": "object", + "properties": { + "sizingPolicy": { + "type": "string" + }, + "dockerVersion": { + "type": "string" + }, + "kindVersion": { + "type": "string", + "default": "v0.19.0" + }, + "kindestNodeVersion": { + "type": "string", + "default": "v1.27.1", + "description": "Image tag of kindest/node container, used by KinD to deploy a cluster" + }, + "kubectlVersion": { + "type": "string" + }, + "clusterctl": { + "type": "object", + "properties": { + "version": { + "type": "string", + "default": "v1.4.0" + }, + "clusterctlyaml": { + "type": "string" + } + } + }, + "capiEcosystem": { + "type": "object", + "properties": { + "coreCapiVersion": { + "type": "string", + "default": "v1.4.0" + }, + "controlPlaneProvider": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "v1.4.0" + } + } + }, + "bootstrapProvider": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "v1.4.0" + } + } + }, + "infraProvider": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "v1.1.0" + }, + "capvcdRde": { + "type": "object", + "properties": { + "vendor": { + "type": "string" + }, + "nss": { + "type": "string" + }, + "version": { + "type": "string" + } + } + } + } + }, + "certManagerVersion": { + "type": "string", + "default": "v1.11.1" + } + } + }, + "proxyConfig": { + "type": "object", + "properties": { + "httpProxy": { + "type": "string" + }, + "httpsProxy": { + "type": "string" + }, + "noProxy": { + "type": "string" + } + } + }, + "certificateAuthorities": { + "type": "array", + "description": "Certificates to be used as the certificate authority in the bootstrap (ephemeral) VM", + "items": { + "type": "string" + } + } + } + }, + "K8Config": { + "type": "object", + "properties": { + "csi": { + "type": "array", + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "1.4.0" + } + }, + "required": [ + "name", + "version" + ] + } + ] + }, + "cpi": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "1.4.0" + } + }, + "required": [ + "name", + "version" + ] + }, + "cni": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "required": [ + "name", + "version" + ] + }, + "rdeProjectorVersion": { + "type": "string", + "default": "0.6.0" + }, + "mhc": { + "type": "object", + "description": "Parameters to configure MachineHealthCheck", + "properties": { + "maxUnhealthyNodes": { + "type": "number", + "default": 100, + "minimum": 1, + "maximum": 100, + "description": "Dictates whether MHC should remediate the machine if the given percentage of nodes in the cluster are down" + }, + "nodeStartupTimeout": { + "type": "string", + "default": "900s", + "description": "Determines how long a MachineHealthCheck should wait for a Node to join the cluster, before considering a Machine unhealthy." + }, + "nodeNotReadyTimeout": { + "type": "string", + "default": "300s", + "description": "Determines how long MachineHealthCheck should wait for before remediating Machines if the Node Ready condition is False" + }, + "nodeUnknownTimeout": { + "type": "string", + "default": "300s", + "description": "Determines how long MachineHealthCheck should wait for before remediating machines if the Node Ready condition is Unknown" + } + }, + "required": [ + "maxUnhealthyNodes", + "nodeStartupTimeout", + "nodeNotReadyTimeout", + "nodeUnknownTimeout" + ] + }, + "certificateAuthorities": { + "type": "array", + "description": "Certificates to be used as the certificate authority", + "items": { + "type": "string" + } + } + }, + "required": [ + "csi", + "cpi", + "cni" + ] + }, + "containerRegistryUrl": { + "type": "string", + "default": "projects.registry.vmware.com" + } + }, + "required": [ + "name", + "active" + ] + } + ] + } + }, + "required": [ + "profiles" + ] +} diff --git a/go.mod b/go.mod index 712b0ae3c..4296e215d 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/kr/pretty v0.2.1 - github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.4 + github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.5 ) require ( @@ -17,6 +17,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) require ( diff --git a/go.sum b/go.sum index 028c4a344..0d1e0e9cb 100644 --- a/go.sum +++ b/go.sum @@ -44,6 +44,7 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -141,8 +142,8 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.4 h1:04HmxljmBJIpZtctGYoq8WqOTPZCzy+tvl87gtS2aw4= -github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.4/go.mod h1:7zG7TXViQ48JpYZIflmdPu3o30xkQSZ4nO+tZdSq31A= +github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.5 h1:HL3T2EGsR8zaTbBbAPIzqQON7x9GQ+USKuz88I1N3cY= +github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.5/go.mod h1:NyNcb2ymhrzwv4GyYXyYOm1NbqRwGNxDWn90AtWniXc= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -232,3 +233,5 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vcd/config_test.go b/vcd/config_test.go index 5140b1068..13cb52e4e 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -1,4 +1,4 @@ -//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbServiceMonitor || lbServerPool || lbAppProfile || lbAppRule || lbVirtualServer || access_control || user || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || ALL +//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbServiceMonitor || lbServerPool || lbAppProfile || lbAppRule || lbVirtualServer || access_control || user || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || cse || ALL package vcd @@ -231,6 +231,17 @@ type TestConfig struct { OrgUserPassword string `json:"orgUserPassword"` // Password for the Org User to be created within the organization } `json:"testEnvBuild"` EnvVariables map[string]string `json:"envVariables,omitempty"` + Cse struct { + Version string `json:"version,omitempty"` + StorageProfile string `json:"storageProfile,omitempty"` + SolutionsOrg string `json:"solutionsOrg,omitempty"` + TenantOrg string `json:"tenantOrg,omitempty"` + TenantVdc string `json:"tenantVdc,omitempty"` + OvaCatalog string `json:"ovaCatalog,omitempty"` + OvaName string `json:"ovaName,omitempty"` + RoutedNetwork string `json:"routedNetwork,omitempty"` + EdgeGateway string `json:"edgeGateway,omitempty"` + } `json:"cse,omitempty"` } // names for created resources for all the tests diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go new file mode 100644 index 000000000..483eeaa96 --- /dev/null +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -0,0 +1,349 @@ +package vcd + +import ( + "context" + _ "embed" + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/vmware/go-vcloud-director/v2/govcd" +) + +func datasourceVcdCseKubernetesCluster() *schema.Resource { + return &schema.Resource{ + ReadContext: datasourceVcdCseKubernetesRead, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_id", "name"}, + Description: "The unique ID of the Kubernetes cluster to read", + }, + "name": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_id", "name"}, + RequiredWith: []string{"cse_version", "org_id"}, + Description: "The name of the Kubernetes cluster to read. If there is more than one Kubernetes cluster with the same name, searching by name will fail", + }, + "org_id": { + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"cse_version", "name"}, + Description: "The ID of organization that owns the Kubernetes cluster, only required if 'name' is set", + }, + "cse_version": { + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"name", "org_id"}, + Description: "The CSE version used by the cluster, only required if 'name' is set", + }, + "runtime": { + Type: schema.TypeString, + Computed: true, + Description: "The Kubernetes runtime used by the cluster", + }, + "kubernetes_template_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", + }, + "vdc_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the VDC that hosts the Kubernetes cluster", + }, + "network_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the network that the Kubernetes cluster uses", + }, + "owner": { + Type: schema.TypeString, + Computed: true, + Description: "The user that created the cluster", + }, + "ssh_public_key": { + Type: schema.TypeString, + Computed: true, + Description: "The SSH public key used to login into the cluster nodes", + }, + "control_plane": { + Type: schema.TypeList, + Computed: true, + Description: "Defines the control plane for the cluster", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of nodes that the control plane has", + }, + "disk_size_gi": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk size, in Gibibytes (Gi), of the control plane nodes", + }, + "sizing_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Sizing policy of the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Placement policy of the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Computed: true, + Description: "Storage profile of the control plane nodes", + }, + "ip": { + Type: schema.TypeString, + Computed: true, + Description: "IP of the control plane", + }, + }, + }, + }, + "worker_pool": { + Type: schema.TypeList, + Computed: true, + Description: "Defines a node pool for the cluster", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of this node pool", + }, + "machine_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of nodes that this node pool has", + }, + "disk_size_gi": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk size, in Gibibytes (Gi), of the control plane nodes", + }, + "sizing_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Sizing policy of the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Placement policy of the control plane nodes", + }, + "vgpu_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "vGPU policy of the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Computed: true, + Description: "Storage profile of the control plane nodes", + }, + }, + }, + }, + "default_storage_class": { + Type: schema.TypeList, + Computed: true, + Description: "The default storage class of the cluster, if any", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_profile_id": { + Computed: true, + Type: schema.TypeString, + Description: "ID of the storage profile used by the storage class", + }, + "name": { + Computed: true, + Type: schema.TypeString, + Description: "Name of the storage class", + }, + "reclaim_policy": { + Computed: true, + Type: schema.TypeString, + Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", + }, + "filesystem": { + Computed: true, + Type: schema.TypeString, + Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", + }, + }, + }, + }, + "pods_cidr": { + Type: schema.TypeString, + Computed: true, + Description: "CIDR that the Kubernetes pods use", + }, + "services_cidr": { + Type: schema.TypeString, + Computed: true, + Description: "CIDR that the Kubernetes services use", + }, + "virtual_ip_subnet": { + Type: schema.TypeString, + Computed: true, + Description: "Virtual IP subnet of the cluster", + }, + "auto_repair_on_errors": { + Type: schema.TypeBool, + Computed: true, + Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", + }, + "node_health_check": { + Type: schema.TypeBool, + Computed: true, + Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", + }, + "kubernetes_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of Kubernetes installed in this cluster", + }, + "tkg_product_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of TKG installed in this cluster", + }, + "capvcd_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of CAPVCD used by this cluster", + }, + "cluster_resource_set_bindings": { + Type: schema.TypeSet, + Computed: true, + Description: "The cluster resource set bindings of this cluster", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cpi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Cloud Provider Interface used by this cluster", + }, + "csi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Container Storage Interface used by this cluster", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the cluster, can be 'provisioning', 'provisioned', 'deleting' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", + }, + "supported_upgrades": { + Type: schema.TypeSet, + Computed: true, + Description: "A set of vApp Template names that could be used to upgrade the existing cluster", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "events": { + Type: schema.TypeList, // Order matters here, as they're ordered by date + Computed: true, + Description: "A set of events that happened during the Kubernetes cluster lifecycle", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Computed: true, + Type: schema.TypeString, + Description: "Name of the event", + }, + "resource_id": { + Computed: true, + Type: schema.TypeString, + Description: "ID of the resource that caused the event", + }, + "type": { + Computed: true, + Type: schema.TypeString, + Description: "Type of the event, either 'event' or 'error'", + }, + "occurred_at": { + Computed: true, + Type: schema.TypeString, + Description: "When the event happened", + }, + "details": { + Computed: true, + Type: schema.TypeString, + Description: "Details of the event", + }, + }, + }, + }, + }, + } +} + +func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + vcdClient := meta.(*VCDClient) + var cluster *govcd.CseKubernetesCluster + var err error + if id, ok := d.GetOk("cluster_id"); ok { + cluster, err = vcdClient.CseGetKubernetesClusterById(id.(string)) + if err != nil { + return diag.FromErr(err) + } + } else if name, ok := d.GetOk("name"); ok { + cseVersion, err := semver.NewVersion(d.Get("cse_version").(string)) + if err != nil { + return diag.Errorf("could not parse cse_version='%s': %s", cseVersion, err) + } + + orgId := d.Get("org_id").(string) + org, err := vcdClient.GetOrgById(orgId) + if err != nil { + return diag.Errorf("could not find an Organization with ID '%s': %s", orgId, err) + } + + clusters, err := org.CseGetKubernetesClustersByName(*cseVersion, name.(string)) + if err != nil { + return diag.FromErr(err) + } + if len(clusters) != 1 { + return diag.Errorf("expected one Kubernetes cluster with name '%s', got %d. Try to use 'cluster_id' instead of 'name'", name, len(clusters)) + } + cluster = clusters[0] + } + + // These fields are specific to the data source + dSet(d, "org_id", cluster.OrganizationId) + dSet(d, "cluster_id", cluster.ID) + + warns, err := saveClusterDataToState(d, vcdClient, cluster, "datasource") + if err != nil { + return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) + } + for _, warning := range warns { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: warning.Error(), + }) + } + + if len(diags) > 0 { + return diags + } + return nil +} diff --git a/vcd/datasource_vcd_version.go b/vcd/datasource_vcd_version.go new file mode 100644 index 000000000..c4dec5f14 --- /dev/null +++ b/vcd/datasource_vcd_version.go @@ -0,0 +1,79 @@ +package vcd + +import ( + "context" + "fmt" + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func datasourceVcdVersion() *schema.Resource { + return &schema.Resource{ + ReadContext: datasourceVcdVersionRead, + Schema: map[string]*schema.Schema{ + "condition": { + Type: schema.TypeString, + Optional: true, + Description: "A condition to check against the VCD version", + RequiredWith: []string{"fail_if_not_match"}, + }, + "fail_if_not_match": { + Type: schema.TypeBool, + Optional: true, + Description: "This data source fails if the VCD doesn't match the version constraint set in 'condition'", + RequiredWith: []string{"condition"}, + }, + "matches_condition": { + Type: schema.TypeBool, + Computed: true, + Description: "Whether VCD matches the condition or not", + }, + "vcd_version": { + Type: schema.TypeString, + Computed: true, + Description: "The VCD version", + }, + "api_version": { + Type: schema.TypeString, + Computed: true, + Description: "The VCD API version", + }, + }, + } +} + +func datasourceVcdVersionRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + vcdVersion, err := vcdClient.VCDClient.Client.GetVcdShortVersion() + if err != nil { + return diag.Errorf("could not get VCD version: %s", err) + } + apiVersion, err := vcdClient.VCDClient.Client.MaxSupportedVersion() + if err != nil { + return diag.Errorf("could not get VCD API version: %s", err) + } + + dSet(d, "vcd_version", vcdVersion) + dSet(d, "api_version", apiVersion) + + if condition, ok := d.GetOk("condition"); ok { + checkVer, err := semver.NewVersion(vcdVersion) + if err != nil { + return diag.Errorf("unable to parse version '%s': %s", vcdVersion, err) + } + constraints, err := semver.NewConstraint(condition.(string)) + if err != nil { + return diag.Errorf("unable to parse given version constraint '%s' : %s", condition, err) + } + matchesCondition := constraints.Check(checkVer) + dSet(d, "matches_condition", matchesCondition) + if !matchesCondition && d.Get("fail_if_not_match").(bool) { + return diag.Errorf("the VCD version '%s' doesn't match the version constraint '%s'", vcdVersion, condition) + } + } + + // The ID is artificial, and we try to identify each data source instance unequivocally through its parameters. + d.SetId(fmt.Sprintf("vcd_version='%s',condition='%s',fail_if_not_match='%t'", vcdVersion, d.Get("condition"), d.Get("fail_if_not_match"))) + return nil +} diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go new file mode 100644 index 000000000..dcc411624 --- /dev/null +++ b/vcd/datasource_vcd_version_test.go @@ -0,0 +1,137 @@ +//go:build ALL || functional + +package vcd + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccVcdVersion(t *testing.T) { + preTestChecks(t) + skipIfNotSysAdmin(t) + + vcdClient := createSystemTemporaryVCDConnection() + currentVersion, err := vcdClient.Client.GetVcdShortVersion() + if err != nil { + t.Fatalf("could not get VCD version: %s", err) + } + + apiVersion, err := vcdClient.VCDClient.Client.MaxSupportedVersion() + if err != nil { + t.Fatalf("could not get VCD API version: %s", err) + } + + var params = StringMap{ + "Condition": ">= 99.99.99", + "FailIfNotMatch": "false", + } + testParamsNotEmpty(t, params) + + step1 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) + + params["FuncName"] = t.Name() + "-step2" + params["FailIfNotMatch"] = "true" + step2 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) + + params["FuncName"] = t.Name() + "-step3" + params["Condition"] = "= " + currentVersion + step3 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step3: %s", step3) + + params["FuncName"] = t.Name() + "-step4" + versionTokens := strings.Split(currentVersion, ".") + params["Condition"] = fmt.Sprintf("~> %s.%s", versionTokens[0], versionTokens[1]) + step4 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) + + params["FuncName"] = t.Name() + "-step5" + params["Condition"] = "!= 10.3.0" + step5 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step5: %s", step5) + + params["FuncName"] = t.Name() + "-step6" + params["Condition"] = " " // Not used, but illustrates the point of this check + params["FailIfNotMatch"] = " " + step6 := templateFill(testAccVcdVersionWithoutArguments, params) + debugPrintf("#[DEBUG] CONFIGURATION step6: %s", step6) + + if vcdShortTest { + t.Skip(acceptanceTestsSkipped) + return + } + + resource.ParallelTest(t, resource.TestCase{ + ProviderFactories: testAccProviders, + Steps: []resource.TestStep{ + { + Config: step1, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='>= 99.99.99',fail_if_not_match='false'", currentVersion)), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "false"), + ), + }, + { + Config: step2, + ExpectError: regexp.MustCompile(fmt.Sprintf(`the VCD version '%s' doesn't match the version constraint '>= 99.99.99'`, currentVersion)), + }, + { + Config: step3, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='= %s',fail_if_not_match='true'", currentVersion, currentVersion)), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), + ), + }, + { + Config: step4, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='~> %s.%s',fail_if_not_match='true'", currentVersion, versionTokens[0], versionTokens[1])), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), + ), + }, + { + Config: step5, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='!= 10.3.0',fail_if_not_match='true'", currentVersion)), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), + ), + }, + { + Config: step6, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='',fail_if_not_match='false'", currentVersion)), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckNoResourceAttr("data.vcd_version.version", "matches_condition"), + ), + }, + }, + }) + postTestChecks(t) +} + +const testAccVcdVersion = ` +data "vcd_version" "version" { + condition = "{{.Condition}}" + fail_if_not_match = {{.FailIfNotMatch}} +} +` + +const testAccVcdVersionWithoutArguments = ` +data "vcd_version" "version" { +} +` diff --git a/vcd/provider.go b/vcd/provider.go index adaa9e2f5..021595d28 100644 --- a/vcd/provider.go +++ b/vcd/provider.go @@ -153,6 +153,8 @@ var globalDataSourceMap = map[string]*schema.Resource{ "vcd_nsxt_edgegateway_dns": datasourceVcdNsxtEdgeGatewayDns(), // 3.11 "vcd_vgpu_profile": datasourceVcdVgpuProfile(), // 3.11 "vcd_vm_vgpu_policy": datasourceVcdVmVgpuPolicy(), // 3.11 + "vcd_cse_kubernetes_cluster": datasourceVcdCseKubernetesCluster(), // 3.12 + "vcd_version": datasourceVcdVersion(), // 3.12 } var globalResourceMap = map[string]*schema.Resource{ @@ -262,6 +264,7 @@ var globalResourceMap = map[string]*schema.Resource{ "vcd_network_pool": resourceVcdNetworkPool(), // 3.11 "vcd_nsxt_edgegateway_dns": resourceVcdNsxtEdgeGatewayDns(), // 3.11 "vcd_vm_vgpu_policy": resourceVcdVmVgpuPolicy(), // 3.11 + "vcd_cse_kubernetes_cluster": resourceVcdCseKubernetesCluster(), // 3.12 } // Provider returns a terraform.ResourceProvider. diff --git a/vcd/provider_test.go b/vcd/provider_test.go index 0d925f4d0..38c240f71 100644 --- a/vcd/provider_test.go +++ b/vcd/provider_test.go @@ -1,4 +1,4 @@ -//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbAppProfile || lbAppRule || lbServiceMonitor || lbServerPool || lbVirtualServer || user || access_control || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || ALL +//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbAppProfile || lbAppRule || lbServiceMonitor || lbServerPool || lbVirtualServer || user || access_control || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || cse || ALL package vcd diff --git a/vcd/resource_vcd_catalog_access_control.go b/vcd/resource_vcd_catalog_access_control.go index 28d4e73e5..422b6c95a 100644 --- a/vcd/resource_vcd_catalog_access_control.go +++ b/vcd/resource_vcd_catalog_access_control.go @@ -352,5 +352,5 @@ func runWithRetry(operationDescription, errorMessage string, timeout time.Durati elapsed = time.Since(start) attempts++ } - return nil, fmt.Errorf(errorMessage+" :%s", err) + return nil, fmt.Errorf(errorMessage+": %s", err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go new file mode 100644 index 000000000..2c0059b30 --- /dev/null +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -0,0 +1,803 @@ +package vcd + +import ( + "context" + _ "embed" + "fmt" + "github.com/hashicorp/go-cty/cty" + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/vmware/go-vcloud-director/v2/govcd" + "sort" + "time" +) + +func resourceVcdCseKubernetesCluster() *schema.Resource { + // This regular expression matches strings with at most 31 characters, composed only by lowercase alphanumeric characters or '-', + // that must start with an alphabetic character, and end with an alphanumeric. + // This is used for any "name" property in CSE, like cluster name, worker pool name or storage class name. + const kubernetesNameRegex = `^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$` + + return &schema.Resource{ + CreateContext: resourceVcdCseKubernetesClusterCreate, + ReadContext: resourceVcdCseKubernetesRead, + UpdateContext: resourceVcdCseKubernetesUpdate, + DeleteContext: resourceVcdCseKubernetesDelete, + Importer: &schema.ResourceImporter{ + StateContext: resourceVcdCseKubernetesImport, + }, + Schema: map[string]*schema.Schema{ + "cse_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0", "4.2.1"}, false), + Description: "The CSE version to use", + DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { + // This custom diff function allows to correctly compare versions. + oldVersion, err := semver.NewVersion(oldValue) + if err != nil { + return false + } + newVersion, err := semver.NewVersion(newValue) + if err != nil { + return false + } + return oldVersion.Equal(newVersion) + }, + DiffSuppressOnRefresh: true, + }, + "runtime": { + Type: schema.TypeString, + Optional: true, + Default: "tkg", + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"tkg"}, false), // May add others in future releases of CSE + Description: "The Kubernetes runtime for the cluster. Only 'tkg' (Tanzu Kubernetes Grid) is supported", + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the Kubernetes cluster", + ValidateDiagFunc: matchRegex(kubernetesNameRegex, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "kubernetes_template_id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", + }, + "org": { + Type: schema.TypeString, + Optional: true, // Gets the Provider org if not set + ForceNew: true, + Description: "The name of organization that will own this Kubernetes cluster, optional if defined at provider " + + "level. Useful when connected as sysadmin working across different organizations", + }, + "vdc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the VDC that hosts the Kubernetes cluster", + }, + "network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the network that the Kubernetes cluster will use", + }, + "owner": { + Type: schema.TypeString, + Optional: true, // Gets the Provider user if not set + ForceNew: true, + Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role. If not specified, it assumes it's the user from the provider configuration", + }, + "api_token_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: false, // It's only used on creation, so we do not care about updates + Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", + }, + "ssh_public_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The SSH public key used to login into the cluster nodes", + }, + "control_plane": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "Defines the control plane for the cluster", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_count": { + Type: schema.TypeInt, + Optional: true, + Default: 3, // As suggested in UI + Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", + ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v' for control plane nodes", v) + } + if value < 1 || value%2 == 0 { + return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) + } + return nil + }, + }, + "disk_size_gi": { + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + ForceNew: true, + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes (Gi) must be at least 20"), + Description: "Disk size, in Gibibytes (Gi), for the control plane nodes. Must be at least 20", + }, + "sizing_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Sizing policy for the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Placement policy for the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Storage profile for the control plane nodes", + }, + "ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, // IP can be auto-assigned if left-empty + ForceNew: true, + Description: "IP for the control plane. It will be automatically assigned during cluster creation if left empty", + ValidateFunc: checkEmptyOrSingleIP(), + }, + }, + }, + }, + "worker_pool": { + // This is a list because TypeSet tries to replace the whole block when we just change a sub-attribute like "machine_count", + // that would cause the worker pool to be deleted and then re-created, which is not allowed in CSE. + // On the other hand, with TypeList the updates on sub-attributes work as expected but in exchange + // we need to be careful on reads to guarantee that order is respected. + Type: schema.TypeList, + Required: true, + Description: "Defines a worker pool for the cluster", + Elem: &schema.Resource{ + // Ideally, all of these sub-attributes should have ForceNew: true except for "machine_count", as + // they can't be changed. However, this doesn't work well, so we check this at runtime. + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of this worker pool. Must be unique", + ValidateDiagFunc: matchRegex(kubernetesNameRegex, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "machine_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, // As suggested in UI + Description: "The number of nodes that this worker pool has. Must be higher than or equal to 0", + ValidateDiagFunc: minimumValue(0, "number of nodes must be higher than or equal to 0"), + }, + "disk_size_gi": { + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + Description: "Disk size, in Gibibytes (Gi), for the control plane nodes", + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes (Gi) must be at least 20"), + }, + "sizing_policy_id": { + Type: schema.TypeString, + Optional: true, + Description: "VM Sizing policy for the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Optional: true, + Description: "VM Placement policy for the control plane nodes", + }, + "vgpu_policy_id": { + Type: schema.TypeString, + Optional: true, + Description: "vGPU policy for the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Optional: true, + Description: "Storage profile for the control plane nodes", + }, + }, + }, + }, + "default_storage_class": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Defines the default storage class for the cluster", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_profile_id": { + Required: true, + ForceNew: true, + Type: schema.TypeString, + Description: "ID of the storage profile to use for the storage class", + }, + "name": { + Required: true, + ForceNew: true, + Type: schema.TypeString, + Description: "Name to give to this storage class", + ValidateDiagFunc: matchRegex(kubernetesNameRegex, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "reclaim_policy": { + Required: true, + ForceNew: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), + Description: "Reclaim policy. Possible values are: `delete` deletes the volume when the `PersistentVolumeClaim` is deleted; `retain` does not delete, and the volume can be manually reclaimed", + }, + "filesystem": { + Required: true, + ForceNew: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), + Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", + }, + }, + }, + }, + "pods_cidr": { + Type: schema.TypeString, + Optional: true, + Default: "100.96.0.0/11", // As suggested in UI + Description: "CIDR that the Kubernetes pods will use", + }, + "services_cidr": { + Type: schema.TypeString, + Optional: true, + Default: "100.64.0.0/13", // As suggested in UI + Description: "CIDR that the Kubernetes services will use", + }, + "virtual_ip_subnet": { + Type: schema.TypeString, + Optional: true, + Description: "Virtual IP subnet for the cluster", + }, + "auto_repair_on_errors": { + Type: schema.TypeBool, + Optional: true, + Computed: true, // CSE Server turns this off after the cluster is successfully provisioned + Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", + }, + "node_health_check": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", + }, + "operations_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + Description: "The time, in minutes, to wait for the cluster operations to be successfully completed. For example, during cluster creation, it should be in `provisioned`" + + "state before the timeout is reached, otherwise the operation will return an error. For cluster deletion, this timeout" + + "specifies the time to wait until the cluster is completely deleted. Setting this argument to `0` means to wait indefinitely", + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), + }, + "kubernetes_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of Kubernetes installed in this cluster", + }, + "tkg_product_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of TKG installed in this cluster", + }, + "capvcd_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of CAPVCD used by this cluster", + }, + "cluster_resource_set_bindings": { + Type: schema.TypeSet, + Computed: true, + Description: "The cluster resource set bindings of this cluster", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cpi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Cloud Provider Interface used by this cluster", + }, + "csi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Container Storage Interface used by this cluster", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the cluster, can be 'provisioning', 'provisioned', 'deleting' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", + }, + "supported_upgrades": { + Type: schema.TypeSet, + Computed: true, + Description: "A set of vApp Template names that can be used to upgrade the cluster", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "events": { + Type: schema.TypeList, // Order matters here, as they're ordered by date + Computed: true, + Description: "A list of events that happened during the Kubernetes cluster lifecycle", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Computed: true, + Type: schema.TypeString, + Description: "Name of the event", + }, + "resource_id": { + Computed: true, + Type: schema.TypeString, + Description: "ID of the resource that caused the event", + }, + "type": { + Computed: true, + Type: schema.TypeString, + Description: "Type of the event, either 'event' or 'error'", + }, + "occurred_at": { + Computed: true, + Type: schema.TypeString, + Description: "When the event happened", + }, + "details": { + Computed: true, + Type: schema.TypeString, + Description: "Details of the event", + }, + }, + }, + }, + }, + } +} + +func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + cseVersion, err := semver.NewSemver(d.Get("cse_version").(string)) + if err != nil { + return diag.Errorf("the introduced 'cse_version=%s' is not valid: %s", d.Get("cse_version"), err) + } + + vcdClient := meta.(*VCDClient) + org, err := vcdClient.GetOrgFromResource(d) + if err != nil { + return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) + } + + apiTokenFile := d.Get("api_token_file").(string) + if apiTokenFile == "" { + return diag.Errorf("the API token file 'is required during Kubernetes cluster creation") + } + apiToken, err := govcd.GetTokenFromFile(apiTokenFile) + if err != nil { + return diag.Errorf("could not read the API token from the file '%s': %s", apiTokenFile, err) + } + + owner := d.Get("owner").(string) + if owner == "" { + session, err := vcdClient.Client.GetSessionInfo() + if err != nil { + return diag.Errorf("could not get an Owner for the Kubernetes cluster. 'owner' is not set and cannot get one from the Provider configuration: %s", err) + } + owner = session.User.Name + if owner == "" { + return diag.Errorf("could not get an Owner for the Kubernetes cluster. 'owner' is not set and cannot get one from the Provider configuration") + } + } + + creationData := govcd.CseClusterSettings{ + CseVersion: *cseVersion, + Name: d.Get("name").(string), + OrganizationId: org.Org.ID, + VdcId: d.Get("vdc_id").(string), + NetworkId: d.Get("network_id").(string), + KubernetesTemplateOvaId: d.Get("kubernetes_template_id").(string), + ControlPlane: govcd.CseControlPlaneSettings{ + MachineCount: d.Get("control_plane.0.machine_count").(int), + DiskSizeGi: d.Get("control_plane.0.disk_size_gi").(int), + SizingPolicyId: d.Get("control_plane.0.sizing_policy_id").(string), + PlacementPolicyId: d.Get("control_plane.0.placement_policy_id").(string), + StorageProfileId: d.Get("control_plane.0.storage_profile_id").(string), + Ip: d.Get("control_plane.0.ip").(string), + }, + Owner: owner, + ApiToken: apiToken.RefreshToken, + NodeHealthCheck: d.Get("node_health_check").(bool), + PodCidr: d.Get("pods_cidr").(string), + ServiceCidr: d.Get("services_cidr").(string), + SshPublicKey: d.Get("ssh_public_key").(string), + VirtualIpSubnet: d.Get("virtual_ip_subnet").(string), + AutoRepairOnErrors: d.Get("auto_repair_on_errors").(bool), + } + + workerPoolsAttr := d.Get("worker_pool").([]interface{}) + workerPools := make([]govcd.CseWorkerPoolSettings, len(workerPoolsAttr)) + for i, w := range workerPoolsAttr { + workerPool := w.(map[string]interface{}) + workerPools[i] = govcd.CseWorkerPoolSettings{ + Name: workerPool["name"].(string), + MachineCount: workerPool["machine_count"].(int), + DiskSizeGi: workerPool["disk_size_gi"].(int), + SizingPolicyId: workerPool["sizing_policy_id"].(string), + PlacementPolicyId: workerPool["placement_policy_id"].(string), + VGpuPolicyId: workerPool["vgpu_policy_id"].(string), + StorageProfileId: workerPool["storage_profile_id"].(string), + } + } + creationData.WorkerPools = workerPools + + if _, ok := d.GetOk("default_storage_class"); ok { + creationData.DefaultStorageClass = &govcd.CseDefaultStorageClassSettings{ + StorageProfileId: d.Get("default_storage_class.0.storage_profile_id").(string), + Name: d.Get("default_storage_class.0.name").(string), + ReclaimPolicy: d.Get("default_storage_class.0.reclaim_policy").(string), + Filesystem: d.Get("default_storage_class.0.filesystem").(string), + } + } + + cluster, err := org.CseCreateKubernetesCluster(creationData, time.Duration(d.Get("operations_timeout_minutes").(int))*time.Minute) + if err != nil && cluster == nil { + return diag.Errorf("Kubernetes cluster creation failed: %s", err) + } + + // If we get here, it means we got either a successful created cluster, a timeout or a cluster in "error" state. + // Either way, from this point we should go to the Update logic as the cluster is definitely present in VCD, so we store the ID. + // Also, we need to set the ID to be able to distinguish this cluster from all the others that may have the same name and RDE Type. + // We could use some other ways of filtering, but ID is the only accurate one. + // If the cluster can't be created due to errors, users should delete it and retry, like in UI. + d.SetId(cluster.ID) + + if cluster.State != "provisioned" { + return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state: %s", err) + } + + return resourceVcdCseKubernetesRead(ctx, d, meta) +} + +func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + vcdClient := meta.(*VCDClient) + // The ID must be already set for the read to be successful. We can't rely on the name as there can be + // many clusters with the same name in the same org. + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) + if err != nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + warns, err := saveClusterDataToState(d, vcdClient, cluster, "resource") + if err != nil { + return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) + } + for _, warning := range warns { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: warning.Error(), + }) + } + + if len(diags) > 0 { + return diags + } + return nil +} + +// resourceVcdCseKubernetesUpdate updates the Kubernetes clusters. Note that re-creating the CAPI YAML and sending it +// back will break everything, so we must patch the YAML piece by piece. +func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + // Some arguments don't require changes in the backend + if !d.HasChangesExcept("operations_timeout_minutes") { + return nil + } + + vcdClient := meta.(*VCDClient) + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) + if err != nil { + return diag.Errorf("could not get Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + payload := govcd.CseClusterUpdateInput{} + if d.HasChange("worker_pool") { + oldPools, newPools := d.GetChange("worker_pool") + existingPools := map[string]bool{} + + // Fetch the already existing worker pools that have been modified + changePoolsPayload := map[string]govcd.CseWorkerPoolUpdateInput{} + for _, o := range oldPools.([]interface{}) { + oldPool := o.(map[string]interface{}) + for _, n := range newPools.([]interface{}) { + newPool := n.(map[string]interface{}) + if oldPool["name"].(string) == newPool["name"].(string) { + if oldPool["disk_size_gi"] != newPool["disk_size_gi"] { + return diag.Errorf("'disk_size_gi' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["sizing_policy_id"] != newPool["sizing_policy_id"] { + return diag.Errorf("'sizing_policy_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["placement_policy_id"] != newPool["placement_policy_id"] { + return diag.Errorf("'placement_policy_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["vgpu_policy_id"] != newPool["vgpu_policy_id"] { + return diag.Errorf("'vgpu_policy_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["storage_profile_id"] != newPool["storage_profile_id"] { + return diag.Errorf("'storage_profile_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + changePoolsPayload[newPool["name"].(string)] = govcd.CseWorkerPoolUpdateInput{MachineCount: newPool["machine_count"].(int)} + existingPools[newPool["name"].(string)] = true // Register this pool as not new + } + } + } + payload.WorkerPools = &changePoolsPayload + + // Check that no Worker Pools are deleted + for _, o := range oldPools.([]interface{}) { + oldPool := o.(map[string]interface{}) + if _, ok := existingPools[oldPool["name"].(string)]; !ok { + return diag.Errorf("the Worker Pool '%s' can't be deleted, but you can scale it to 0", oldPool["name"].(string)) + } + } + + // Fetch the worker pools that are brand new + var addPoolsPayload []govcd.CseWorkerPoolSettings + for _, n := range newPools.([]interface{}) { + newPool := n.(map[string]interface{}) + if _, ok := existingPools[newPool["name"].(string)]; !ok { + addPoolsPayload = append(addPoolsPayload, govcd.CseWorkerPoolSettings{ + Name: newPool["name"].(string), + MachineCount: newPool["machine_count"].(int), + DiskSizeGi: newPool["disk_size_gi"].(int), + SizingPolicyId: newPool["sizing_policy_id"].(string), + PlacementPolicyId: newPool["placement_policy_id"].(string), + VGpuPolicyId: newPool["vgpu_policy_id"].(string), + StorageProfileId: newPool["storage_profile_id"].(string), + }) + } + } + payload.NewWorkerPools = &addPoolsPayload + } + if d.HasChange("control_plane") { + controlPlane := govcd.CseControlPlaneUpdateInput{} + for _, controlPlaneAttr := range d.Get("control_plane").([]interface{}) { + c := controlPlaneAttr.(map[string]interface{}) + controlPlane.MachineCount = c["machine_count"].(int) + } + payload.ControlPlane = &controlPlane + } + if d.HasChange("kubernetes_template_id") { + payload.KubernetesTemplateOvaId = addrOf(d.Get("kubernetes_template_id").(string)) + } + + if d.HasChange("node_health_check") { + payload.NodeHealthCheck = addrOf(d.Get("node_health_check").(bool)) + } + + if d.HasChanges("auto_repair_on_errors") { + payload.AutoRepairOnErrors = addrOf(d.Get("auto_repair_on_errors").(bool)) + } + + err = cluster.Update(payload, true) + if err != nil { + return diag.Errorf("Kubernetes cluster update failed: %s", err) + } + + return resourceVcdCseKubernetesRead(ctx, d, meta) +} + +// resourceVcdCseKubernetesDelete deletes a CSE Kubernetes cluster. To delete a Kubernetes cluster, one must send +// the flags "markForDelete" and "forceDelete" back to true, so the CSE Server is able to delete all cluster elements +// and perform a cleanup. Hence, this function sends an update of just these two properties and waits for the cluster RDE +// to be gone. +func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) + if err != nil { + if govcd.ContainsNotFound(err) { + return nil // The cluster is gone, nothing to do + } + return diag.FromErr(err) + } + err = cluster.Delete(time.Duration(d.Get("operations_timeout_minutes").(int)) * time.Minute) + if err != nil { + return diag.FromErr(err) + } + return nil +} + +func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + vcdClient := meta.(*VCDClient) + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) + if err != nil { + return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + warns, err := saveClusterDataToState(d, vcdClient, cluster, "import") + if err != nil { + return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", cluster.ID, err) + } + for _, warn := range warns { + // We can't do much here as Import does not support Diagnostics + logForScreen(cluster.ID, fmt.Sprintf("got a warning during import: %s", warn)) + } + + return []*schema.ResourceData{d}, nil +} + +// saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. +// Returns a slice of warnings first and an error second. +func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluster *govcd.CseKubernetesCluster, origin string) ([]error, error) { + var warnings []error + + dSet(d, "name", cluster.Name) + dSet(d, "cse_version", cluster.CseVersion.Original()) + dSet(d, "runtime", "tkg") // Only one supported + dSet(d, "vdc_id", cluster.VdcId) + dSet(d, "network_id", cluster.NetworkId) + dSet(d, "cpi_version", cluster.CpiVersion.Original()) + dSet(d, "csi_version", cluster.CsiVersion.Original()) + dSet(d, "capvcd_version", cluster.CapvcdVersion.Original()) + dSet(d, "kubernetes_version", cluster.KubernetesVersion.Original()) + dSet(d, "tkg_product_version", cluster.TkgVersion.Original()) + dSet(d, "pods_cidr", cluster.PodCidr) + dSet(d, "services_cidr", cluster.ServiceCidr) + dSet(d, "kubernetes_template_id", cluster.KubernetesTemplateOvaId) + dSet(d, "ssh_public_key", cluster.SshPublicKey) + dSet(d, "virtual_ip_subnet", cluster.VirtualIpSubnet) + dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) + dSet(d, "node_health_check", cluster.NodeHealthCheck) + + // The data source does not have the attribute "org", so we cannot set it + if origin != "datasource" { + // If the Org was set, it needs to be refreshed (it should not change, though) + // We also set it always during imports. + if _, ok := d.GetOk("org"); ok || origin == "import" { + if cluster.OrganizationId != "" { + org, err := vcdClient.GetOrgById(cluster.OrganizationId) + if err != nil { + return nil, fmt.Errorf("could not set 'org' argument: %s", err) + } + dSet(d, "org", org.Org.Name) + } + } + } + + // If the Owner was set, it needs to be refreshed (it should not change, though). + // If the origin is a data source or import, we always need to set this one. + if _, ok := d.GetOk("owner"); ok || origin == "datasource" || origin == "import" { + dSet(d, "owner", cluster.Owner) + } + + err := d.Set("cluster_resource_set_bindings", cluster.ClusterResourceSetBindings) + if err != nil { + return nil, err + } + + workerPoolBlocks := make([]map[string]interface{}, len(cluster.WorkerPools)) + for i, workerPool := range cluster.WorkerPools { + workerPoolBlocks[i] = map[string]interface{}{ + "machine_count": workerPool.MachineCount, + "name": workerPool.Name, + "vgpu_policy_id": workerPool.VGpuPolicyId, + "sizing_policy_id": workerPool.SizingPolicyId, + "placement_policy_id": workerPool.PlacementPolicyId, + "storage_profile_id": workerPool.StorageProfileId, + "disk_size_gi": workerPool.DiskSizeGi, + } + } + // The "worker_pool" argument is a TypeList, not a TypeSet (check the Schema comments for context), + // so we need to guarantee order. We order them by name, which is unique. + sort.SliceStable(workerPoolBlocks, func(i, j int) bool { + return workerPoolBlocks[i]["name"].(string) < workerPoolBlocks[j]["name"].(string) + }) + + err = d.Set("worker_pool", workerPoolBlocks) + if err != nil { + return nil, err + } + + err = d.Set("control_plane", []map[string]interface{}{ + { + "machine_count": cluster.ControlPlane.MachineCount, + "ip": cluster.ControlPlane.Ip, + "sizing_policy_id": cluster.ControlPlane.SizingPolicyId, + "placement_policy_id": cluster.ControlPlane.PlacementPolicyId, + "storage_profile_id": cluster.ControlPlane.StorageProfileId, + "disk_size_gi": cluster.ControlPlane.DiskSizeGi, + }, + }) + if err != nil { + return nil, err + } + + if cluster.DefaultStorageClass != nil { + err = d.Set("default_storage_class", []map[string]interface{}{{ + "storage_profile_id": cluster.DefaultStorageClass.StorageProfileId, + "name": cluster.DefaultStorageClass.Name, + "reclaim_policy": cluster.DefaultStorageClass.ReclaimPolicy, + "filesystem": cluster.DefaultStorageClass.Filesystem, + }}) + if err != nil { + return nil, err + } + } + + dSet(d, "state", cluster.State) + + supportedUpgrades, err := cluster.GetSupportedUpgrades(true) + if err != nil { + return nil, fmt.Errorf("could not fetch the supported upgrades for the Kubernetes cluster with ID '%s': %s", cluster.ID, err) + } + supportedUpgradesNames := make([]string, len(supportedUpgrades)) + for i, upgrade := range supportedUpgrades { + supportedUpgradesNames[i] = upgrade.Name + } + err = d.Set("supported_upgrades", supportedUpgradesNames) + if err != nil { + return nil, err + } + + events := make([]interface{}, len(cluster.Events)) + for i, event := range cluster.Events { + events[i] = map[string]interface{}{ + "resource_id": event.ResourceId, + "name": event.Name, + "occurred_at": event.OccurredAt.String(), + "details": event.Details, + "type": event.Type, + } + } + err = d.Set("events", events) + if err != nil { + return nil, err + } + + if cluster.State == "provisioned" { + kubeconfig, err := cluster.GetKubeconfig(false) + if err != nil { + return nil, fmt.Errorf("error getting Kubeconfig for the Kubernetes cluster with ID '%s': %s", cluster.ID, err) + } + dSet(d, "kubeconfig", kubeconfig) + } else { + warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, meaning that "+ + "the Kubeconfig cannot be retrieved and "+ + "some attributes could be unavailable", cluster.ID, cluster.State)) + } + + d.SetId(cluster.ID) + return warnings, nil +} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go new file mode 100644 index 000000000..7070393f6 --- /dev/null +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -0,0 +1,694 @@ +//go:build cse || ALL + +package vcd + +import ( + "fmt" + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/vmware/go-vcloud-director/v2/govcd" + "os" + "reflect" + "regexp" + "strings" + "testing" +) + +func requireCseConfig(t *testing.T, testConfig TestConfig) { + skippedPrefix := fmt.Sprintf("skipped %s because:", t.Name()) + if cse := os.Getenv("TEST_VCD_CSE"); cse == "" { + t.Skipf("%s the environment variable TEST_VCD_CSE is not set", skippedPrefix) + } + cseConfigValues := reflect.ValueOf(testConfig.Cse) + cseConfigType := cseConfigValues.Type() + for i := 0; i < cseConfigValues.NumField(); i++ { + if cseConfigValues.Field(i).String() == "" { + t.Skipf("%s the config value '%s' inside 'cse' object of vcd_test_config.json is not set", skippedPrefix, cseConfigType.Field(i).Name) + } + } +} + +func TestAccVcdCseKubernetesCluster(t *testing.T) { + preTestChecks(t) + requireCseConfig(t, testConfig) + + cseVersion, err := semver.NewVersion(testConfig.Cse.Version) + if err != nil { + t.Fatal(err) + } + + v411, err := semver.NewVersion("4.1.1") + if err != nil { + t.Fatal(err) + } + + tokenFilename := getCurrentDir() + t.Name() + ".json" + defer func() { + // Clean the API Token file + if fileExists(tokenFilename) { + err := os.Remove(tokenFilename) + if err != nil { + fmt.Printf("could not delete API token file '%s', please delete it manually", tokenFilename) + } + } + }() + + var params = StringMap{ + "CseVersion": testConfig.Cse.Version, + "Name": strings.ToLower(t.Name()), + "OvaCatalog": testConfig.Cse.OvaCatalog, + "OvaName": testConfig.Cse.OvaName, + "KubernetesOva": "data.vcd_catalog_vapp_template.tkg_ova.id", + "SolutionsOrg": testConfig.Cse.SolutionsOrg, + "TenantOrg": testConfig.Cse.TenantOrg, + "Vdc": testConfig.Cse.TenantVdc, + "EdgeGateway": testConfig.Cse.EdgeGateway, + "Network": testConfig.Cse.RoutedNetwork, + "TokenName": t.Name(), + "TokenFile": tokenFilename, + "ControlPlaneCount": 1, + "NodePoolCount": 1, + "ExtraWorkerPool": " ", + "PodsCidr": "100.96.0.0/11", + "ServicesCidr": "100.64.0.0/13", + "AutoRepairOnErrors": true, + "NodeHealthCheck": true, + "Timeout": 150, + } + testParamsNotEmpty(t, params) + + step1 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) + + params["FuncName"] = t.Name() + "Step2" + params["AutoRepairOnErrors"] = "false" // Deactivate it to avoid non-empty plans. Also, it is recommended after cluster creation + params["ControlPlaneCount"] = 3 + step2 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) + + params["FuncName"] = t.Name() + "Step3" + params["ControlPlaneCount"] = 1 + params["NodePoolCount"] = 2 + step3 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step3: %s", step3) + + params["FuncName"] = t.Name() + "Step4" + params["ControlPlaneCount"] = 1 + params["NodePoolCount"] = 1 + params["NodeHealthCheck"] = false + step4 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) + + extraWorkerPool := " worker_pool {\n" + + " name = \"worker-pool-2\"\n" + + " machine_count = 1\n" + + " disk_size_gi = 20\n" + + " sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id\n" + + " storage_profile_id = data.vcd_storage_profile.sp.id\n" + + " }" + + params["FuncName"] = t.Name() + "Step5" + params["NodeHealthCheck"] = true + params["ExtraWorkerPool"] = extraWorkerPool + step5 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step5: %s", step5) + + params["FuncName"] = t.Name() + "Step6" + step6 := templateFill(testAccVcdCseKubernetesCluster+testAccVcdCseKubernetesClusterDS, params) + debugPrintf("#[DEBUG] CONFIGURATION step6: %s", step5) + + if vcdShortTest { + t.Skip(acceptanceTestsSkipped) + return + } + cacheId := testCachedFieldValue{} + clusterName := "vcd_cse_kubernetes_cluster.my_cluster" + dataWithName := "data.vcd_cse_kubernetes_cluster.with_name_ds" + dataWithId := "data.vcd_cse_kubernetes_cluster.with_id_ds" + resource.Test(t, resource.TestCase{ + ProviderFactories: testAccProviders, + CheckDestroy: func(state *terraform.State) error { + if cacheId.fieldValue == "" { + return fmt.Errorf("cached ID '%s' is empty", cacheId.fieldValue) + } + conn := testAccProvider.Meta().(*VCDClient) + _, err := conn.GetRdeById(cacheId.fieldValue) + if err == nil { + return fmt.Errorf("cluster with ID '%s' still exists", cacheId.fieldValue) + } + return nil + }, + Steps: []resource.TestStep{ + // Basic scenario of cluster creation + { + Config: step1, + ExpectNonEmptyPlan: func() bool { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1, + // so it will return a non-empty plan + if cseVersion.GreaterThanOrEqual(v411) { + return true + } else { + return false + } + }(), + Check: resource.ComposeAggregateTestCheckFunc( + cacheId.cacheTestResourceFieldValue(clusterName, "id"), + resource.TestMatchResourceAttr(clusterName, "id", regexp.MustCompile(`^urn:vcloud:entity:vmware:capvcdCluster:.+$`)), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + func() resource.TestCheckFunc { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 + if cseVersion.GreaterThanOrEqual(v411) { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") + } else { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") + } + }(), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + ), + }, + // Scale the control plane to 3 replicas + { + Config: step2, + Check: resource.ComposeAggregateTestCheckFunc( + // Control plane should change + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "3"), + + // Other things should remain the same + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + ), + }, + // Scale down the control plane to 1 replica, scale up worker pool to 2 replicas + { + Config: step3, + Check: resource.ComposeAggregateTestCheckFunc( + // Changed settings + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "2"), + + // Other things should remain the same + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + ), + }, + // Scale down the worker pool to 1 replica, disable health check + { + Config: step4, + Check: resource.ComposeAggregateTestCheckFunc( + // Changed settings + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "false"), + + // Other things should remain the same + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + ), + }, + // Enable health check, add a worker pool + { + Config: step5, + Check: resource.ComposeAggregateTestCheckFunc( + // The new worker pool should be present + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "2"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.name", "worker-pool-2"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + + // Other things should remain the same + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + ), + }, + // Test data sources. Can't use resourceFieldsEqual function as we need to ignore the "events" TypeList which has an unknown size + { + Config: step6, + Check: resource.ComposeAggregateTestCheckFunc( + // Data source with name + resource.TestCheckResourceAttrPair(dataWithName, "id", clusterName, "id"), + resource.TestCheckResourceAttrPair(dataWithName, "cse_version", clusterName, "cse_version"), + resource.TestCheckResourceAttrPair(dataWithName, "runtime", clusterName, "runtime"), + resource.TestCheckResourceAttrPair(dataWithName, "name", clusterName, "name"), + resource.TestCheckResourceAttrPair(dataWithName, "kubernetes_template_id", clusterName, "kubernetes_template_id"), + resource.TestMatchResourceAttr(dataWithName, "org_id", regexp.MustCompile(`^urn:vcloud:org:.+$`)), + resource.TestCheckResourceAttrPair(dataWithName, "vdc_id", clusterName, "vdc_id"), + resource.TestCheckResourceAttrPair(dataWithName, "network_id", clusterName, "network_id"), + resource.TestCheckResourceAttrSet(dataWithName, "owner"), // This time the owner can be obtained + resource.TestCheckResourceAttrPair(dataWithName, "ssh_public_key", clusterName, "ssh_public_key"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.disk_size_gi", clusterName, "control_plane.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.sizing_policy_id", clusterName, "control_plane.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.storage_profile_id", clusterName, "control_plane.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.ip", clusterName, "control_plane.0.ip"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.#", clusterName, "worker_pool.#"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.name", clusterName, "worker_pool.0.name"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.machine_count", clusterName, "worker_pool.0.machine_count"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.disk_size_gi", clusterName, "worker_pool.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.sizing_policy_id", clusterName, "worker_pool.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.storage_profile_id", clusterName, "worker_pool.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.storage_profile_id", clusterName, "default_storage_class.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.name", clusterName, "default_storage_class.0.name"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.reclaim_policy", clusterName, "default_storage_class.0.reclaim_policy"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.filesystem", clusterName, "default_storage_class.0.filesystem"), + resource.TestCheckResourceAttrPair(dataWithName, "pods_cidr", clusterName, "pods_cidr"), + resource.TestCheckResourceAttrPair(dataWithName, "services_cidr", clusterName, "services_cidr"), + resource.TestCheckResourceAttrPair(dataWithName, "virtual_ip_subnet", clusterName, "virtual_ip_subnet"), + resource.TestCheckResourceAttrPair(dataWithName, "auto_repair_on_errors", clusterName, "auto_repair_on_errors"), + resource.TestCheckResourceAttrPair(dataWithName, "node_health_check", clusterName, "node_health_check"), + resource.TestCheckResourceAttrPair(dataWithName, "kubernetes_version", clusterName, "kubernetes_version"), + resource.TestCheckResourceAttrPair(dataWithName, "tkg_product_version", clusterName, "tkg_product_version"), + resource.TestCheckResourceAttrPair(dataWithName, "capvcd_version", clusterName, "capvcd_version"), + resource.TestCheckResourceAttrPair(dataWithName, "cluster_resource_set_bindings.#", clusterName, "cluster_resource_set_bindings.#"), + resource.TestCheckResourceAttrPair(dataWithName, "cpi_version", clusterName, "cpi_version"), + resource.TestCheckResourceAttrPair(dataWithName, "csi_version", clusterName, "csi_version"), + resource.TestCheckResourceAttrPair(dataWithName, "state", clusterName, "state"), + resource.TestCheckResourceAttrPair(dataWithName, "kubeconfig", clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(dataWithName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + + // Data source with ID + resource.TestCheckResourceAttrPair(dataWithId, "id", dataWithName, "id"), + resource.TestCheckResourceAttrPair(dataWithId, "cse_version", dataWithName, "cse_version"), + resource.TestCheckResourceAttrPair(dataWithId, "runtime", dataWithName, "runtime"), + resource.TestCheckResourceAttrPair(dataWithId, "name", dataWithName, "name"), + resource.TestCheckResourceAttrPair(dataWithId, "kubernetes_template_id", dataWithName, "kubernetes_template_id"), + resource.TestCheckResourceAttrPair(dataWithId, "org_id", dataWithName, "org_id"), + resource.TestCheckResourceAttrPair(dataWithId, "vdc_id", dataWithName, "vdc_id"), + resource.TestCheckResourceAttrPair(dataWithId, "network_id", dataWithName, "network_id"), + resource.TestCheckResourceAttrPair(dataWithId, "owner", dataWithName, "owner"), + resource.TestCheckResourceAttrPair(dataWithId, "ssh_public_key", dataWithName, "ssh_public_key"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.disk_size_gi", dataWithName, "control_plane.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.sizing_policy_id", dataWithName, "control_plane.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.storage_profile_id", dataWithName, "control_plane.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.ip", dataWithName, "control_plane.0.ip"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.#", dataWithName, "worker_pool.#"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.name", dataWithName, "worker_pool.0.name"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.machine_count", dataWithName, "worker_pool.0.machine_count"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.disk_size_gi", dataWithName, "worker_pool.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.sizing_policy_id", dataWithName, "worker_pool.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.storage_profile_id", dataWithName, "worker_pool.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.storage_profile_id", dataWithName, "default_storage_class.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.name", dataWithName, "default_storage_class.0.name"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.reclaim_policy", dataWithName, "default_storage_class.0.reclaim_policy"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.filesystem", dataWithName, "default_storage_class.0.filesystem"), + resource.TestCheckResourceAttrPair(dataWithId, "pods_cidr", dataWithName, "pods_cidr"), + resource.TestCheckResourceAttrPair(dataWithId, "services_cidr", dataWithName, "services_cidr"), + resource.TestCheckResourceAttrPair(dataWithId, "virtual_ip_subnet", dataWithName, "virtual_ip_subnet"), + resource.TestCheckResourceAttrPair(dataWithId, "auto_repair_on_errors", dataWithName, "auto_repair_on_errors"), + resource.TestCheckResourceAttrPair(dataWithId, "node_health_check", dataWithName, "node_health_check"), + resource.TestCheckResourceAttrPair(dataWithId, "kubernetes_version", dataWithName, "kubernetes_version"), + resource.TestCheckResourceAttrPair(dataWithId, "tkg_product_version", dataWithName, "tkg_product_version"), + resource.TestCheckResourceAttrPair(dataWithId, "capvcd_version", dataWithName, "capvcd_version"), + resource.TestCheckResourceAttrPair(dataWithId, "cluster_resource_set_bindings.#", dataWithName, "cluster_resource_set_bindings.#"), + resource.TestCheckResourceAttrPair(dataWithId, "cpi_version", dataWithName, "cpi_version"), + resource.TestCheckResourceAttrPair(dataWithId, "csi_version", dataWithName, "csi_version"), + resource.TestCheckResourceAttrPair(dataWithId, "state", dataWithName, "state"), + resource.TestCheckResourceAttrPair(dataWithId, "kubeconfig", dataWithName, "kubeconfig"), + resource.TestMatchResourceAttr(dataWithId, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + ), + }, + { + ResourceName: clusterName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: func(state *terraform.State) (string, error) { + return cacheId.fieldValue, nil + }, + // Ignore 'api_token_file' and 'operations_timeout_minutes' as these are not computed from VCD, so they are missing + // after any successful import. + // Ignore also 'owner' and 'org' as these may not be set in the resource configuration, but they are always + // set on imports. + // 'events' is ignored as the list may differ between runs. + ImportStateVerifyIgnore: []string{"api_token_file", "operations_timeout_minutes", "owner", "org", "events"}, + }, + }, + }) + postTestChecks(t) +} + +func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { + preTestChecks(t) + requireCseConfig(t, testConfig) + + vcdClient := createSystemTemporaryVCDConnection() + + cseVersion, err := semver.NewVersion(testConfig.Cse.Version) + if err != nil { + t.Fatal(err) + } + + tokenFilename := getCurrentDir() + t.Name() + ".json" + defer func() { + // Clean the API Token file + if fileExists(tokenFilename) { + err := os.Remove(tokenFilename) + if err != nil { + fmt.Printf("could not delete API token file '%s', please delete it manually", tokenFilename) + } + } + }() + + clusterName := "cse-k8s-cluster-failure" // We can't use the test name as it is too long + + var params = StringMap{ + "CseVersion": testConfig.Cse.Version, + "Name": clusterName, + "OvaCatalog": testConfig.Cse.OvaCatalog, + "OvaName": testConfig.Cse.OvaName, + "KubernetesOva": "data.vcd_catalog_vapp_template.tkg_ova.id", + "SolutionsOrg": testConfig.Cse.SolutionsOrg, + "TenantOrg": testConfig.Cse.TenantOrg, + "Vdc": testConfig.Cse.TenantVdc, + "EdgeGateway": testConfig.Cse.EdgeGateway, + "Network": testConfig.Cse.RoutedNetwork, + "TokenName": t.Name(), + "TokenFile": tokenFilename, + "ControlPlaneCount": 1, + "NodePoolCount": 1, + "ExtraWorkerPool": " ", + "PodsCidr": "1.2.3.4/24", // This will make the cluster to fail + "ServicesCidr": "5.6.7.8/24", // This will make the cluster to fail + "AutoRepairOnErrors": false, // This must be false + "NodeHealthCheck": false, + "Timeout": 150, + } + testParamsNotEmpty(t, params) + + step1 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) + + if vcdShortTest { + t.Skip(acceptanceTestsSkipped) + return + } + resource.Test(t, resource.TestCase{ + ProviderFactories: testAccProviders, + CheckDestroy: func(state *terraform.State) error { + org, err := vcdClient.GetOrgByName(testConfig.Cse.TenantOrg) + if err != nil { + return fmt.Errorf("could not check cluster deletion: %s", err) + } + clusters, err := org.CseGetKubernetesClustersByName(*cseVersion, clusterName) + if err != nil && !govcd.ContainsNotFound(err) { + return fmt.Errorf("could not check cluster deletion: %s", err) + } + if len(clusters) == 0 || govcd.ContainsNotFound(err) { + return nil + } + return fmt.Errorf("there are still %d clusters with name '%s': %s", len(clusters), clusterName, err) + }, + Steps: []resource.TestStep{ + { + Config: step1, + ExpectError: regexp.MustCompile(`Kubernetes cluster creation finished, but it is not in 'provisioned' state`), + }, + }, + }) + postTestChecks(t) +} + +const testAccVcdCseKubernetesCluster = ` +# skip-binary-test - This one requires a very special setup + +data "vcd_catalog" "tkg_catalog" { + org = "{{.SolutionsOrg}}" + name = "{{.OvaCatalog}}" +} + +data "vcd_catalog_vapp_template" "tkg_ova" { + org = data.vcd_catalog.tkg_catalog.org + catalog_id = data.vcd_catalog.tkg_catalog.id + name = "{{.OvaName}}" +} + +data "vcd_org_vdc" "vdc" { + org = "{{.TenantOrg}}" + name = "{{.Vdc}}" +} + +data "vcd_nsxt_edgegateway" "egw" { + org = data.vcd_org_vdc.vdc.org + owner_id = data.vcd_org_vdc.vdc.id + name = "{{.EdgeGateway}}" +} + +data "vcd_network_routed_v2" "routed" { + org = data.vcd_nsxt_edgegateway.egw.org + edge_gateway_id = data.vcd_nsxt_edgegateway.egw.id + name = "{{.Network}}" +} + +data "vcd_vm_sizing_policy" "tkg_small" { + name = "TKG small" +} + +data "vcd_storage_profile" "sp" { + org = data.vcd_org_vdc.vdc.org + vdc = data.vcd_org_vdc.vdc.name + name = "*" +} + +resource "vcd_api_token" "token" { + name = "{{.TokenName}}" + file_name = "{{.TokenFile}}" + allow_token_file = true +} + +resource "vcd_cse_kubernetes_cluster" "my_cluster" { + cse_version = "{{.CseVersion}}" + runtime = "tkg" + name = "{{.Name}}" + kubernetes_template_id = {{.KubernetesOva}} + org = data.vcd_org_vdc.vdc.org + vdc_id = data.vcd_org_vdc.vdc.id + network_id = data.vcd_network_routed_v2.routed.id + api_token_file = vcd_api_token.token.file_name + + control_plane { + machine_count = {{.ControlPlaneCount}} + disk_size_gi = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + worker_pool { + name = "worker-pool-1" + machine_count = {{.NodePoolCount}} + disk_size_gi = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + {{.ExtraWorkerPool}} + + default_storage_class { + name = "sc-1" + storage_profile_id = data.vcd_storage_profile.sp.id + reclaim_policy = "delete" + filesystem = "ext4" + } + + pods_cidr = "{{.PodsCidr}}" + services_cidr = "{{.ServicesCidr}}" + + auto_repair_on_errors = {{.AutoRepairOnErrors}} + node_health_check = {{.NodeHealthCheck}} + + operations_timeout_minutes = {{.Timeout}} +} +` + +const testAccVcdCseKubernetesClusterDS = ` +# skip-binary-test - This one requires a very special setup + +data "vcd_org" "tenant_org" { + name = "tenant_org" +} + +data "vcd_cse_kubernetes_cluster" "with_id_ds" { + cluster_id = vcd_cse_kubernetes_cluster.my_cluster.id +} + +data "vcd_cse_kubernetes_cluster" "with_name_ds" { + org_id = data.vcd_org.tenant_org.id + cse_version = vcd_cse_kubernetes_cluster.my_cluster.cse_version + name = vcd_cse_kubernetes_cluster.my_cluster.name +} +` diff --git a/vcd/sample_vcd_test_config.json b/vcd/sample_vcd_test_config.json index 40e0e77cc..b85b0fa96 100644 --- a/vcd/sample_vcd_test_config.json +++ b/vcd/sample_vcd_test_config.json @@ -225,5 +225,17 @@ "VCD_SKIP_TEMPLATE_WRITING": "", "VCD_ADD_PROVIDER": "", "REMOVE_ORG_VDC_FROM_TEMPLATE": "" + }, + "cse" :{ + "//" : "Only needed to test Container Service Extension specific resources", + "version": "4.2.0", + "solutionsOrg" : "solutions_org", + "storageProfile": "*", + "tenantOrg" : "tenant_org", + "tenantVdc": "tenant_vdc", + "routedNetwork": "tenant_net_routed", + "edgeGateway": "tenant_edgegateway", + "ovaCatalog": "tkgm_catalog", + "ovaName": "" } } diff --git a/vcd/testcheck_funcs_test.go b/vcd/testcheck_funcs_test.go index d232c92f8..e5c82fc0a 100644 --- a/vcd/testcheck_funcs_test.go +++ b/vcd/testcheck_funcs_test.go @@ -1,4 +1,4 @@ -//go:build api || vapp || vm || user || nsxt || extnetwork || network || gateway || catalog || standaloneVm || alb || vdcGroup || ldap || vdc || access_control || rde || uiPlugin || org || disk || providerVdc || ALL || functional +//go:build api || vapp || vm || user || nsxt || extnetwork || network || gateway || catalog || standaloneVm || alb || vdcGroup || ldap || vdc || access_control || rde || uiPlugin || org || disk || providerVdc || cse || ALL || functional package vcd diff --git a/vcd/validate_funcs.go b/vcd/validate_funcs.go index 58393f2a9..1ac00dc9c 100644 --- a/vcd/validate_funcs.go +++ b/vcd/validate_funcs.go @@ -171,6 +171,38 @@ func IsIntAndAtLeast(min int) schema.SchemaValidateFunc { } } +// minimumValue returns a SchemaValidateDiagFunc that tests if the provided value is at least min (inclusive) +func minimumValue(min int, errorMessage string) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v'", v) + } + if value < min { + return diag.Errorf("%s: %d < %d", errorMessage, value, min) + } + return nil + } +} + +// matchRegex returns a SchemaValidateDiagFunc that tests whether the provided value matches the regular expression +func matchRegex(regex, errorMessage string) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(string) + if !ok { + return diag.Errorf("could not parse string value '%v'", v) + } + r, err := regexp.Compile(regex) + if err != nil { + return diag.Errorf("could not compile regular expression '%s': %s", regex, err) + } + if !r.MatchString(value) { + return diag.Errorf("%s", errorMessage) + } + return nil + } +} + // IsFloatAndBetween returns a SchemaValidateFunc which tests if the provided value convertable to // float64 and is between min and max (inclusive). func IsFloatAndBetween(min, max float64) schema.SchemaValidateFunc { diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown new file mode 100644 index 000000000..bd4d76e40 --- /dev/null +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -0,0 +1,66 @@ +--- +layout: "vcd" +page_title: "VMware Cloud Director: vcd_cse_kubernetes_cluster" +sidebar_current: "docs-vcd-data-source-cse-kubernetes-cluster" +description: |- + Provides a data source to read Kubernetes clusters from VMware Cloud Director with Container Service Extension installed and running. +--- + +# vcd\_cse\_kubernetes\_cluster + +Provides a data source to read Kubernetes clusters in VMware Cloud Director with Container Service Extension (CSE) installed and running. + +Supported in provider *v3.12+* + +Supports the following **Container Service Extension** versions: + +* 4.1.0 +* 4.1.1 / 4.1.1a +* 4.2.0 +* 4.2.1 + +-> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) + +## Example Usage with ID + +The cluster ID identifies unequivocally the cluster within VCD, and can be obtained with the CSE Kubernetes Clusters UI Plugin, by selecting +the desired cluster and obtaining the ID from the displayed information. + +```hcl +data "vcd_cse_kubernetes_cluster" "my_cluster" { + cluster_id = "urn:vcloud:entity:vmware:capvcdCluster:e8e82bcc-50a1-484f-9dd0-20965ab3e865" +} +``` + +## Example Usage with Name + +Sometimes using the cluster ID is not convenient, so this data source allows using the cluster name. +As VCD allows to have multiple clusters with the same name, this option must be used with caution as it will fail +if there is more than one Kubernetes cluster with the same name in the same Organization: + +```hcl +locals { + my_clusters = toset(["my-cluster-1", "my-cluster-2", "my-cluster-3"]) +} + +data "vcd_cse_kubernetes_cluster" "my_clusters" { + for_each = local.my_clusters + org_id = data.vcd_org.org.id + cse_version = "4.2.1" + name = each.key +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cluster_id` - (Optional) Unequivocally identifies a cluster in VCD. Either `cluster_id` or `name` must be set. +* `name` - (Optional) Allows to find a Kubernetes cluster by name inside the given Organization with ID `org_id`. Either `cluster_id` or `name` must be set. This argument requires `cse_version` and `org_id` to be set. +* `org_id` - (Optional) The ID of the Organization to which the Kubernetes cluster belongs. Only used if `cluster_id` is not set. Must be present if `name` is used. +* `cse_version` - (Optional) Specifies the CSE Version of the cluster to find when `name` is used instead of `cluster_id`. + +## Attribute Reference + +All attributes defined in [vcd_cse_kubernetes_cluster](/providers/vmware/vcd/latest/docs/resources/cse_kubernetes_cluster) resource are supported. +Also, the resource arguments are also available as read-only attributes. diff --git a/website/docs/d/version.html.markdown b/website/docs/d/version.html.markdown new file mode 100644 index 000000000..46c0fc504 --- /dev/null +++ b/website/docs/d/version.html.markdown @@ -0,0 +1,65 @@ +--- +layout: "vcd" +page_title: "VMware Cloud Director: vcd_version" +sidebar_current: "docs-vcd-data-source-version" +description: |- + Provides a VCD version data source. +--- + +# vcd\_version + +Provides a VMware Cloud Director version data source to fetch the VCD version, the maximum supported API version and +perform some optional checks with version constraints. + +Supported in provider *v3.12+*. Requires System Administrator privileges. + +## Example Usage + +```hcl +# This data source will assert that the VCD version is exactly 10.5.1, otherwise it will fail +data "vcd_version" "eq_1051" { + condition = "= 10.5.1" + fail_if_not_match = true +} + +# This data source will assert that the VCD version is greater than or equal to 10.4.2, but it won't fail if it is not +data "vcd_version" "gte_1042" { + condition = ">= 10.4.2" + fail_if_not_match = false +} + +output "is_gte_1042" { + value = data.vcd_version.gte_1042.matches_condition # Will show false if we're using a VCD version < 10.4.2 +} + +# This data source will assert that the VCD version is less than 10.5.0 +data "vcd_version" "lt_1050" { + condition = "< 10.5.0" + fail_if_not_match = true +} + +# This data source will assert that the VCD version is 10.5.X +data "vcd_version" "is_105" { + condition = "~> 10.5" + fail_if_not_match = true +} + +# This data source will assert that the VCD version is not 10.5.1 +data "vcd_version" "not_1051" { + condition = "!= 10.5.1" + fail_if_not_match = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `condition` - (Optional) A version constraint to check against the VCD version +* `fail_if_not_match` - (Optional) Required if `condition` is set. Throws an error if the version constraint set in `condition` is not met + +## Attribute Reference + +* `matches_condition` - It is true if the VCD version matches the constraint set in `condition` +* `vcd_version` - The VCD version +* `api_version` - The maximum supported API version diff --git a/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown b/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown index 3bb1de3c7..5e1392a9d 100644 --- a/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown +++ b/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown @@ -8,6 +8,11 @@ description: |- # Container Service Extension v4.1 Kubernetes clusters management +~> **This guide is DEPRECATED since v3.12+**. You should use the resource +[`vcd_cse_kubernetes_cluster`](/providers/vmware/vcd/latest/docs/resources/cse_kubernetes_cluster) +to provision and manage Kubernetes clusters in a VCD appliance where Container Service Extension is installed +and running. + ## About This guide explains how to create, update and delete **Tanzu Kubernetes Grid multicloud (TKGm)** clusters in a VCD appliance with Container Service Extension v4.1 diff --git a/website/docs/guides/container_service_extension_4_x_install.html.markdown b/website/docs/guides/container_service_extension_4_x_install.html.markdown index 8c7343cb5..d87d4c58c 100644 --- a/website/docs/guides/container_service_extension_4_x_install.html.markdown +++ b/website/docs/guides/container_service_extension_4_x_install.html.markdown @@ -1,19 +1,19 @@ --- layout: "vcd" -page_title: "VMware Cloud Director: Container Service Extension v4.1 installation" +page_title: "VMware Cloud Director: Container Service Extension 4.2 installation" sidebar_current: "docs-vcd-guides-cse-4-x-install" description: |- - Provides guidance on configuring VCD to be able to install and use Container Service Extension v4.1 + Provides guidance on configuring VCD to be able to install and use Container Service Extension 4.2 --- -# Container Service Extension v4.1 installation +# Container Service Extension 4.2 installation ## About -This guide describes the required steps to configure VCD to install the Container Service Extension (CSE) v4.1, that +This guide describes the required steps to configure VCD to install the Container Service Extension (CSE) 4.2, that will allow tenant users to deploy **Tanzu Kubernetes Grid Multi-cloud (TKGm)** clusters on VCD using Terraform or the UI. -To know more about CSE v4.1, you can visit [the documentation][cse_docs]. +To know more about CSE 4.2, you can visit [the documentation][cse_docs]. ## Pre-requisites @@ -21,15 +21,15 @@ To know more about CSE v4.1, you can visit [the documentation][cse_docs]. In order to complete the steps described in this guide, please be aware: -* CSE v4.1 is supported from VCD v10.4.2 or above, as specified in the [Product Interoperability Matrix][product_matrix]. +* CSE 4.2 is supported from VCD v10.4.2 or above, as specified in the [Product Interoperability Matrix][product_matrix]. Please check that the target VCD appliance matches the criteria. -* Terraform provider needs to be v3.11.0 or above. +* Terraform provider needs to be v3.12.0 or above. * Both CSE Server and the Bootstrap clusters require outbound Internet connectivity. -* CSE v4.1 makes use of [ALB](/providers/vmware/vcd/latest/docs/guides/nsxt_alb) capabilities. +* CSE 4.2 makes use of [ALB](/providers/vmware/vcd/latest/docs/guides/nsxt_alb) capabilities. ## Installation process --> To install CSE v4.1, this guide will make use of the example Terraform configuration located [here](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/install). +-> To install CSE 4.2, this guide will make use of the example Terraform configuration located [here](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/install). You can check it, customise it to your needs and apply. However, reading this guide first is recommended to understand what it does and how to use it. The installation process is split in two independent steps that must be run one after the other: @@ -57,7 +57,7 @@ modified and be applied as they are. #### RDE Interfaces, Types and Behaviors -CSE v4.1 requires a set of Runtime Defined Entity items, such as [Interfaces][rde_interface], [Types][rde_type] and [Behaviors][rde_interface_behavior]. +CSE 4.2 requires a set of Runtime Defined Entity items, such as [Interfaces][rde_interface], [Types][rde_type] and [Behaviors][rde_interface_behavior]. In the [step 1 configuration][step1] you can find the following: * The required `VCDKEConfig` [RDE Interface][rde_interface] and [RDE Type][rde_type]. These two resources specify the schema of the **CSE Server @@ -76,14 +76,15 @@ To customise it, the [step 1 configuration][step1] asks for the following variab * `vcdkeconfig_template_filepath` references a local file that defines the `VCDKEConfig` [RDE][rde] contents. It should be a JSON file with template variables that Terraform can interpret, like - [the RDE template file for CSE v4.1](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/entities/vcdkeconfig.json.template) + [the RDE template file for CSE 4.2](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/entities/vcdkeconfig.json.template) used in the step 1 configuration, that can be rendered correctly with the Terraform built-in function `templatefile`. - (Note: In `terraform.tfvars.example` the path for the CSE v4.1 RDE contents is already provided). -* `capvcd_version`: The version for CAPVCD. The default value is **"1.1.0"** for CSE v4.1. + (Note: In `terraform.tfvars.example` the path for the CSE 4.2 RDE contents is already provided). +* `capvcd_version`: The version for CAPVCD. The default value is **"1.1.0"** for CSE 4.2. (Note: Do not confuse with the version of the `capvcdCluster` [RDE Type][rde_type], - which **must be "1.2.0"** for CSE v4.1 and cannot be changed through a variable). -* `cpi_version`: The version for CPI (Cloud Provider Interface). The default value is **"1.4.0"** for CSE v4.1. -* `csi_version`: The version for CSI (Cloud Storage Interface). The default value is **"1.4.0"** for CSE v4.1. + which **must be "1.3.0"** for CSE 4.2 and cannot be changed through a variable). +* `cpi_version`: The version for CPI (Cloud Provider Interface). The default value is **"1.5.0"** for CSE 4.2. +* `csi_version`: The version for CSI (Cloud Storage Interface). The default value is **"1.5.0"** for CSE 4.2. +* `rde_projector_version`: The version for the RDE Projector. The default value is **"0.7.0"** for CSE 4.2. * `github_personal_access_token`: Create this one [here](https://github.com/settings/tokens), this will avoid installation errors caused by GitHub rate limiting, as the TKGm cluster creation process requires downloading some Kubernetes components from GitHub. @@ -106,7 +107,7 @@ To customise it, the [step 1 configuration][step1] asks for the following variab #### Rights, Roles and VM Sizing Policies -CSE v4.1 requires a set of new [Rights Bundles][rights_bundle], [Roles][role] and [VM Sizing Policies][sizing] that are also created +CSE 4.2 requires a set of new [Rights Bundles][rights_bundle], [Roles][role] and [VM Sizing Policies][sizing] that are also created in this step of the [step 1 configuration][step1]. Nothing should be customised here, except for the "CSE Administrator" account to be created, where you can provide a username of your choice (`cse_admin_username`) and its password (`cse_admin_password`). @@ -120,7 +121,7 @@ Once all variables are reviewed and set, you can start the installation with `te ~> Make sure that the previous step is successfully completed. -This step will create all the remaining elements to install CSE v4.1 in VCD. You can read the subsequent sections +This step will create all the remaining elements to install CSE 4.2 in VCD. You can read the subsequent sections to have a better understanding of the building blocks that are described in the [step 2 Terraform configuration][step2]. In this [configuration][step2] you can also find a file named `terraform.tfvars.example` that needs to be updated with correct values and renamed to `terraform.tfvars` @@ -171,7 +172,7 @@ Then it will upload the required OVAs to them. The OVAs can be specified in `ter * `tkgm_ova_folder`: This will reference the path to the TKGm OVA, as an absolute or relative path. It should **not** end with a trailing `/`. * `tkgm_ova_files`: This will reference the file names of the TKGm OVAs, like `[ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc.ova, ubuntu-2004-kube-v1.24.11+vmware.1-tkg.1-2ccb2a001f8bd8f15f1bfbc811071830.ova]`. * `cse_ova_folder`: This will reference the path to the CSE OVA, as an absolute or relative path. It should **not** end with a trailing `/`. -* `cse_ova_file`: This will reference the file name of the CSE OVA, like `VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova`. +* `cse_ova_file`: This will reference the file name of the CSE OVA, like `VMware_Cloud_Director_Container_Service_Extension-4.2.1.ova`. -> To download the required OVAs, please refer to the [CSE documentation][cse_docs]. You can also check the [Product Interoperability Matrix][product_matrix] to confirm the appropriate version of TKGm. @@ -182,7 +183,7 @@ In case you're using a pre-uploaded OVA, leverage the [vcd_catalog_vapp_template #### Networking -The [step 2 configuration][step2] prepares a basic networking layout that will make CSE v4.1 work. However, it is +The [step 2 configuration][step2] prepares a basic networking layout that will make CSE 4.2 work. However, it is recommended that you review the code and adapt the different parts to your needs, specially for the resources like `vcd_nsxt_firewall`. The configuration will create the following: @@ -283,7 +284,7 @@ or if your tenant users are not familiar with Terraform, they will be still able with the UI. If you decide to install it, `k8s_container_clusters_ui_plugin_path` should point to the -[Kubernetes Container Clusters UI plug-in v4.1][cse_docs] ZIP file that you can download in the [CSE documentation][cse_docs]. +[Kubernetes Container Clusters UI plug-in 4.2][cse_docs] ZIP file that you can download in the [CSE documentation][cse_docs]. ### Final considerations @@ -337,65 +338,24 @@ The most common issues are: * Cluster creation is failing: * Please visit the [CSE documentation][cse_docs] to learn how to monitor the logs and troubleshoot possible problems. -## Upgrade from CSE v4.0 to v4.1 +## Upgrade from CSE 4.1 to 4.2 -In this section you can find the required steps to update from CSE v4.0 to v4.1. +In this section you can find the required steps to update from CSE 4.1 to 4.2. -~> This section assumes that the old CSE v4.0 installation was done with Terraform by following the v4.0 guide steps. +~> This section assumes that the old CSE 4.1 installation was done with Terraform by following the 4.1 guide steps. Also, you need to meet [the pre-requisites criteria](#pre-requisites). ### Create the new RDE elements -A new [RDE Interface][rde_interface] needs to be created, which is required by the new v4.1 version: - -```hcl -resource "vcd_rde_interface" "cse_interface" { - vendor = "cse" - nss = "capvcd" - version = "1.0.0" - name = "cseInterface" -} -``` - -CSE v4.1 also requires the usage of [RDE Interface Behaviors][rde_interface_behavior] and -[RDE Behavior Access Controls][rde_type_behavior_acl] that can be created with the following snippets (these can -also be found in the [step 1 configuration][step1]): - -```hcl -resource "vcd_rde_interface_behavior" "capvcd_behavior" { - rde_interface_id = vcd_rde_interface.cse_interface.id - name = "getFullEntity" - execution = { - "type" : "noop" - "id" : "getFullEntity" - } -} - -resource "vcd_rde_type_behavior_acl" "capvcd_behavior_acl" { - rde_type_id = vcd_rde_type.capvcdcluster_type_v120.id # This definition is below - behavior_id = vcd_rde_interface_behavior.capvcd_behavior.id - access_level_ids = ["urn:vcloud:accessLevel:FullControl"] -} -``` - -Create a new version of the [RDE Types][rde_type] that were used in v4.0. This will allow them to co-exist with the old ones, +Create a new version of the [RDE Types][rde_type] that were used in 4.1. This will allow them to co-exist with the old ones, so we can perform a smooth upgrade. ```hcl -resource "vcd_rde_type" "vcdkeconfig_type_v110" { - # Same attributes as v4.1, except for: - version = "1.1.0" # New version - # New schema: - schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.1/schemas/vcdkeconfig-type-schema-v1.1.0.json" -} - -resource "vcd_rde_type" "capvcdcluster_type_v120" { - # Same attributes as v4.1, except for: - version = "1.2.0" # New version +resource "vcd_rde_type" "capvcdcluster_type_v130" { + # Same attributes as 4.1, except for: + version = "1.3.0" # New version # New schema: - schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.1/schemas/capvcd-type-schema-v1.2.0.json" - # Notice that the new interface cse:capvcd:1.0.0 is used - interface_ids = [data.vcd_rde_interface.kubernetes_interface.id, vcd_rde_interface.cse_interface.id] + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json" # Behaviors need to be created before any RDE Type depends_on = [vcd_rde_interface_behavior.capvcd_behavior] } @@ -404,22 +364,15 @@ resource "vcd_rde_type" "capvcdcluster_type_v120" { ### Upgrade the VCDKEConfig RDE (CSE Server configuration) With the new [RDE Types][rde_type] in place, you need to perform an upgrade of the existing `VCDKEConfig` [RDE][rde], which -stores the CSE Server configuration. By using the v3.11.0 of the VCD Terraform Provider, you can do this update without forcing +stores the CSE Server configuration. By using the v3.12.0 of the VCD Terraform Provider, you can do this update without forcing a replacement: ```hcl resource "vcd_rde" "vcdkeconfig_instance" { # Same values as before, except: - rde_type_id = vcd_rde_type.vcdkeconfig_type_v110.id # Update to the new RDE Type input_entity = templatefile(var.vcdkeconfig_template_filepath, { # Same values as before, except: - node_startup_timeout = var.node_startup_timeout - node_not_ready_timeout = var.node_not_ready_timeout - node_unknown_timeout = var.node_unknown_timeout - max_unhealthy_node_percentage = var.max_unhealthy_node_percentage - container_registry_url = var.container_registry_url - k8s_cluster_certificates = join(",", var.k8s_cluster_certificates) - bootstrap_vm_certificates = join(",", var.bootstrap_vm_certificates) + rde_projector_version = "0.7.0" }) } ``` @@ -427,24 +380,19 @@ resource "vcd_rde" "vcdkeconfig_instance" { You can find the meaning of these values in the section ["RDE (CSE Server configuration / VCDKEConfig)"](#rde-cse-server-configuration--vcdkeconfig). Please notice that you need to upgrade the CAPVCD, CPI and CSI versions. The new values are stated in the same section. -### Update Rights and Roles - -There are differences between the rights needed in v4.0 and v4.1. You can check the resources `vcd_rights_bundle.k8s_clusters_rights_bundle` and -`vcd_global_role.k8s_cluster_author` in the [step 1 configuration][step1] to see the new required set of rights. - -### Upload the new CSE v4.1 OVA +### Upload the new CSE 4.2 OVA -You need to upload the new CSE v4.1 OVA to the `cse_catalog` that already hosts the CSE v4.0 one. +You need to upload the new CSE 4.2 OVA to the `cse_catalog` that already hosts the CSE 4.1 one. To download the required OVAs, please refer to the [CSE documentation][cse_docs]. ```hcl -resource "vcd_catalog_vapp_template" "cse_ova_v4_1" { - org = vcd_org.solutions_organization.name # References the Solutions Organization that already exists from v4.0 - catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog that already exists from v4.0 +resource "vcd_catalog_vapp_template" "cse_ova_4_2" { + org = vcd_org.solutions_organization.name # References the Solutions Organization that already exists from 4.1 + catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog that already exists from 4.1 - name = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - description = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - ova_path = "VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova" + name = "VMware_Cloud_Director_Container_Service_Extension-4.2.0" + description = "VMware_Cloud_Director_Container_Service_Extension-4.2.0" + ova_path = "VMware_Cloud_Director_Container_Service_Extension-4.2.0.ova" } ``` @@ -455,11 +403,11 @@ To update the CSE Server, just change the referenced OVA: ```hcl resource "vcd_vapp_vm" "cse_server_vm" { # All values remain the same, except: - vapp_template_id = vcd_catalog_vapp_template.cse_ova_v4_1.id # Reference the v4.1 OVA + vapp_template_id = vcd_catalog_vapp_template.cse_ova_4_2.id # Reference the 4.2 OVA } ``` -This will re-deploy the VM with the new CSE v4.1 Server. +This will re-deploy the VM with the new CSE 4.2 Server. ## Update CSE Server Configuration @@ -501,14 +449,14 @@ In the [step 2 configuration][step2], you can find the `cse_ova` [vApp Template] Then you can create a new `vcd_catalog_vapp_template` and modify `cse_server_vm` to reference it: ```hcl -# Uploads a new CSE Server OVA. In the example below, we upload version 4.1.0 +# Uploads a new CSE Server OVA. In the example below, we upload version 4.2.1 resource "vcd_catalog_vapp_template" "new_cse_ova" { org = vcd_org.solutions_organization.name # References the Solutions Organization catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog - name = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - description = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - ova_path = "/home/bob/cse/VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova" + name = "VMware_Cloud_Director_Container_Service_Extension-4.2.1" + description = "VMware_Cloud_Director_Container_Service_Extension-4.2.1" + ova_path = "/home/bob/cse/VMware_Cloud_Director_Container_Service_Extension-4.2.1.ova" } # ... @@ -556,8 +504,8 @@ Once all clusters are removed in the background by CSE Server, you may destroy t [role]: /providers/vmware/vcd/latest/docs/resources/role [routed_network]: /providers/vmware/vcd/latest/docs/resources/network_routed_v2 [sizing]: /providers/vmware/vcd/latest/docs/resources/vm_sizing_policy -[step1]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/install/step1 -[step2]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/install/step2 +[step1]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/install/step1 +[step2]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/install/step2 [tkgm_docs]: https://docs.vmware.com/en/VMware-Tanzu-Kubernetes-Grid/index.html [user]: /providers/vmware/vcd/latest/docs/resources/org_user [ui_plugin]: /providers/vmware/vcd/latest/docs/resources/ui_plugin diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown new file mode 100644 index 000000000..66934d7d0 --- /dev/null +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -0,0 +1,381 @@ +--- +layout: "vcd" +page_title: "VMware Cloud Director: vcd_cse_kubernetes_cluster" +sidebar_current: "docs-vcd-resource-cse-kubernetes-cluster" +description: |- + Provides a resource to manage Kubernetes clusters in VMware Cloud Director with Container Service Extension installed and running. +--- + +# vcd\_cse\_kubernetes\_cluster + +Provides a resource to manage Kubernetes clusters in VMware Cloud Director with Container Service Extension (CSE) installed and running. + +Supported in provider *v3.12+* + +Supports the following **Container Service Extension** versions: + +* 4.1.0 +* 4.1.1 / 4.1.1a +* 4.2.0 +* 4.2.1 + +-> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) + +## Example Usage + +```hcl +data "vcd_catalog" "tkg_catalog" { + org = "solutions_org" # The catalog is shared with 'tenant_org', so it is visible for tenants + name = "tkgm_catalog" +} + +# Fetch a valid Kubernetes template OVA. If it's not valid, cluster creation will fail. +data "vcd_catalog_vapp_template" "tkg_ova" { + org = data.vcd_catalog.tkg_catalog.org + catalog_id = data.vcd_catalog.tkg_catalog.id + name = "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc" +} + +data "vcd_org_vdc" "vdc" { + org = "tenant_org" + name = "tenant_vdc" +} + +data "vcd_nsxt_edgegateway" "egw" { + org = data.vcd_org_vdc.vdc.org + owner_id = data.vcd_org_vdc.vdc.id + name = "tenant_edgegateway" +} + +data "vcd_network_routed_v2" "routed" { + org = data.vcd_nsxt_edgegateway.egw.org + edge_gateway_id = data.vcd_nsxt_edgegateway.egw.id + name = "tenant_net_routed" +} + +# Fetch a valid Sizing policy created during CSE installation. +# Refer to the CSE installation guide for more information. +data "vcd_vm_sizing_policy" "tkg_small" { + name = "TKG small" +} + +data "vcd_storage_profile" "sp" { + org = data.vcd_org_vdc.vdc.org + vdc = data.vcd_org_vdc.vdc.name + name = "*" +} + +# The token file is required, and it should be safely stored +resource "vcd_api_token" "token" { + name = "myClusterToken" + file_name = "/home/Bob/safely_stored_token.json" + allow_token_file = true +} + +resource "vcd_cse_kubernetes_cluster" "my_cluster" { + cse_version = "4.2.1" + runtime = "tkg" + name = "test2" + kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id + org = data.vcd_org_vdc.vdc.org + vdc_id = data.vcd_org_vdc.vdc.id + network_id = data.vcd_network_routed_v2.routed.id + api_token_file = vcd_api_token.token.file_name + + control_plane { + machine_count = 1 + disk_size_gi = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + worker_pool { + name = "node-pool-1" + machine_count = 1 + disk_size_gi = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + default_storage_class { + name = "sc-1" + storage_profile_id = data.vcd_storage_profile.sp.id + reclaim_policy = "delete" + filesystem = "ext4" + } + + auto_repair_on_errors = true + node_health_check = true + + operations_timeout_minutes = 0 +} + +output "kubeconfig" { + value = vcd_cse_kubernetes_cluster.my_cluster.kubeconfig +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cse_version` - (Required) Specifies the CSE version to use. Accepted versions: `4.1.0`, `4.1.1` (also for *4.1.1a*), `4.2.0` and `4.2.1` +* `runtime` - (Optional) Specifies the Kubernetes runtime to use. Defaults to `tkg` (Tanzu Kubernetes Grid) +* `name` - (Required) The name of the Kubernetes cluster. It must contain only lowercase alphanumeric characters or "-", + start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters +* `kubernetes_template_id` - (Required) The ID of the vApp Template that corresponds to a Kubernetes template OVA +* `org` - (Optional) The name of organization that will host the Kubernetes cluster, optional if defined in the provider configuration +* `vdc_id` - (Required) The ID of the VDC that hosts the Kubernetes cluster +* `network_id` - (Required) The ID of the network that the Kubernetes cluster will use +* `owner` - (Optional) The user that creates the cluster and owns the API token specified in `api_token`. + It must have the `Kubernetes Cluster Author` role that was created during CSE installation. + If not specified, it assumes it's the user from the provider configuration +* `api_token_file` - (Required) Must be a file generated by [`vcd_api_token` resource](/providers/vmware/vcd/latest/docs/resources/api_token), + or a file that follows the same formatting, that stores the API token used to create and manage the cluster, + owned by the user specified in `owner`. Be careful about this file, as it contains sensitive information +* `ssh_public_key` - (Optional) The SSH public key used to log in into the cluster nodes +* `control_plane` - (Required) See [**Control Plane**](#control-plane) +* `worker_pool` - (Required) See [**Worker Pools**](#worker-pools) +* `default_storage_class` - (Optional) See [**Default Storage Class**](#default-storage-class) +* `pods_cidr` - (Optional) A CIDR block for the pods to use. Defaults to `100.96.0.0/11` +* `services_cidr` - (Optional) A CIDR block for the services to use. Defaults to `100.64.0.0/13` +* `virtual_ip_subnet` - (Optional) A virtual IP subnet for the cluster +* `auto_repair_on_errors` - (Optional) If errors occur before the Kubernetes cluster becomes available, and this argument is `true`, + CSE Server will automatically attempt to repair the cluster. Defaults to `false`. + Since CSE 4.1.1, when the cluster is available/provisioned, this flag is set automatically to false. +* `node_health_check` - (Optional) After the Kubernetes cluster becomes available, nodes that become unhealthy will be + remediated according to unhealthy node conditions and remediation rules. Defaults to `false` +* `operations_timeout_minutes` - (Optional) The time, in minutes, to wait for the cluster operations to be successfully completed. + For example, during cluster creation, it should be in `provisioned` state before the timeout is reached, otherwise the + operation will return an error. For cluster deletion, this timeout specifies the time to wait until the cluster is completely deleted. + Setting this argument to `0` means to wait indefinitely (not recommended as it could hang Terraform if the cluster can't be created + due to a configuration error if `auto_repair_on_errors=true`). Defaults to `60` + +### Control Plane + +The `control_plane` block is **required** and unique per resource, meaning that there must be **exactly one** of these +in every resource. + +This block asks for the following arguments: + +* `machine_count` - (Optional) The number of nodes that the control plane has. Must be an odd number and higher than `0`. Defaults to `3` +* `disk_size_gi` - (Optional) Disk size, in **Gibibytes (Gi)**, for the control plane VMs. Must be at least `20`. Defaults to `20` +* `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation +* `placement_policy_id` - (Optional) VM Placement policy for the control plane VMs +* `storage_profile_id` - (Optional) Storage profile for the control plane VMs +* `ip` - (Optional) IP for the control plane. It will be automatically assigned during cluster creation if left empty + +### Worker Pools + +The `worker_pool` block is **required**, and every cluster should have **at least one** of them. + +Each block asks for the following arguments: + +* `name` - (Required) The name of the worker pool. It must be unique per cluster, and must contain only lowercase alphanumeric characters or "-", + start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters +* `machine_count` - (Optional) The number of VMs that the worker pool has. Must be higher than `0`. Defaults to `1` +* `disk_size_gi` - (Optional) Disk size, in **Gibibytes (Gi)**, for the worker pool VMs. Must be at least `20`. Defaults to `20` +* `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation +* `placement_policy_id` - (Optional) VM Placement policy for the worker pool VMs. If this one is set, `vgpu_policy_id` must be empty +* `vgpu_policy_id` - (Optional) vGPU policy for the worker pool VMs. If this one is set, `placement_policy_id` must be empty +* `storage_profile_id` - (Optional) Storage profile for the worker pool VMs + +### Default Storage Class + +The `default_storage_class` block is **optional**, and every cluster should have **at most one** of them. + +If defined, the block asks for the following arguments: + +* `name` - (Required) The name of the default storage class. It must contain only lowercase alphanumeric characters or "-", + start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters +* `storage_profile_id` - (Required) Storage profile for the default storage class +* `reclaim_policy` - (Required) A value of `delete` deletes the volume when the PersistentVolumeClaim is deleted. `retain` does not, + and the volume can be manually reclaimed +* `filesystem` - (Required) Filesystem of the storage class, can be either `ext4` or `xfs` + +## Attribute Reference + +The following attributes are available for consumption as read-only attributes after a successful cluster creation: + +* `kubernetes_version` - The version of Kubernetes installed in this cluster +* `tkg_product_version` - The version of TKG installed in this cluster +* `capvcd_version` - The version of CAPVCD used by this cluster +* `cluster_resource_set_bindings` - The cluster resource set bindings of this cluster +* `cpi_version` - The version of the Cloud Provider Interface used by this cluster +* `csi_version` - The version of the Container Storage Interface used by this cluster +* `state` - The Kubernetes cluster status, can be `provisioning` when it is being created, `provisioned` when it was successfully + created and ready to use, or `error` when an error occurred. `provisioning` can only be obtained when a timeout happens during + cluster creation. `error` can only be obtained either with a timeout or when `auto_repair_on_errors=false`. +* `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` +* `supported_upgrades` - A set of vApp Template names that can be fetched with a + [`vcd_catalog_vapp_template` data source](/providers/vmware/vcd/latest/docs/data-sources/catalog_vapp_template) to upgrade the cluster. +* `events` - A set of events that happened during the Kubernetes cluster lifecycle. They're ordered from most recent to least. Each event has: + * `name` - Name of the event + * `resource_id` - ID of the resource that caused the event + * `type` - Type of the event, either `event` or `error` + * `details` - Details of the event + * `occurred_at` - When the event happened + +## Updating + +Only the following arguments can be updated: + +* `kubernetes_template_id`: The cluster must allow upgrading to the new TKG version. You can check `supported_upgrades` attribute to know + the available OVAs. +* `machine_count` of the `control_plane`: Supports scaling up and down. Nothing else can be updated. +* `machine_count` of any `worker_pool`: Supports scaling up and down. Use caution when resizing down to 0 nodes. + The cluster must always have at least 1 running node, or else the cluster will enter an unrecoverable error state. +* `auto_repair_on_errors`: Can only be updated in CSE 4.1.0, and it is recommended to set it to `false` when the cluster is created. + In versions higher than 4.1.0, this is automatically done by the CSE Server, so this flag cannot be updated. +* `node_health_check`: Can be turned on/off. +* `operations_timeout_minutes`: Does not require modifying the existing cluster + +You can also add more `worker_pool` blocks to add more Worker Pools to the cluster. **You can't delete Worker Pools**, but they can +be scaled down to zero. + +Updating any other argument will delete the existing cluster and create a new one, when the Terraform plan is applied. + +Upgrading CSE version with `cse_version` is not supported as this operation would require human intervention, +as stated [in the official documentation](https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1/VMware-Cloud-Director-Container-Service-Extension-Using-Tenant-4.1/GUID-092C40B4-D0BA-4B90-813F-D36929F2F395.html). + +## Accessing the Kubernetes cluster + +To retrieve the Kubeconfig of a created cluster, you may set it as an output: + +```hcl +output "kubeconfig" { + value = vcd_cse_kubernetes_cluster.my_cluster.kubeconfig +} +``` + +Then, creating a file turns out to be trivial: + +```shell +terraform output -raw kubeconfig > $HOME/kubeconfig +``` + +The Kubeconfig can now be used with `kubectl` and the Kubernetes cluster can be used. + +## Importing + +An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the **Cluster ID** for it. +The ID can be easily obtained in VCD UI, in the CSE Kubernetes Container Clusters plugin. + +An example is below. During import, none of the mentioned arguments are required, but they will be in subsequent Terraform commands +such as `terraform plan`. Each comment in the code gives some context about how to obtain them to have a completely manageable cluster: + +```hcl +# This is just a snippet of code that will host the imported cluster that already exists in VCD. +# This must NOT be created with Terraform beforehand, it is just a shell that will receive the information +# None of the arguments are required during the Import phase, but they will be asked when operating it afterwards +resource "vcd_cse_kubernetes_cluster" "imported_cluster" { + name = "test2" # The name of the existing cluster + cse_version = "4.2.1" # The CSE version installed in your VCD + kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id # See below data sources + vdc_id = data.vcd_org_vdc.vdc.id # See below data sources + network_id = data.vcd_network_routed_v2.routed.id # See below data sources + node_health_check = true # Whether the existing cluster has Machine Health Check enabled or not, this can be checked in UI + + control_plane { + machine_count = 5 # This is optional, but not setting it to the current value will make subsequent plans to try to scale our existing cluster to the default one + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id # See below data sources + storage_profile_id = data.vcd_storage_profile.sp.id # See below data sources + } + + worker_pool { + name = "node-pool-1" # The name of the existing worker pool of the existing cluster. Retrievable from UI + machine_count = 40 # This is optional, but not setting it to the current value will make subsequent plans to try to scale our existing cluster to the default one + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id # See below data sources + storage_profile_id = data.vcd_storage_profile.sp.id # See below data sources + } + + # While optional, we cannot change the Default Storage Class after an import, so we need + # to set the information of the existing cluster to avoid re-creation. + # The information can be retrieved from UI + default_storage_class { + filesystem = "ext4" + name = "sc-1" + reclaim_policy = "delete" + storage_profile_id = data.vcd_storage_profile.sp.id # See below data sources + } +} + +# The below data sources are needed to retrieve the required IDs. They are not needed +# during the Import phase, but they will be asked when operating it afterwards + +# The VDC and Organization where the existing cluster is located +data "vcd_org_vdc" "vdc" { + org = "tenant_org" + name = "tenant_vdc" +} + +# The OVA that the existing cluster is using. You can obtain the OVA by inspecting +# the existing cluster TKG/Kubernetes version. +data "vcd_catalog_vapp_template" "tkg_ova" { + org = data.vcd_catalog.tkg_catalog.org + catalog_id = data.vcd_catalog.tkg_catalog.id + name = "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc" +} + +# The network that the existing cluster is using +data "vcd_network_routed_v2" "routed" { + org = data.vcd_nsxt_edgegateway.egw.org + edge_gateway_id = data.vcd_nsxt_edgegateway.egw.id + name = "tenant_net_routed" +} + +# The VM Sizing Policy of the existing cluster nodes +data "vcd_vm_sizing_policy" "tkg_small" { + name = "TKG small" +} + +# The Storage Profile that the existing cluster uses +data "vcd_storage_profile" "sp" { + org = data.vcd_org_vdc.vdc.org + vdc = data.vcd_org_vdc.vdc.name + name = "*" +} + +data "vcd_catalog" "tkg_catalog" { + org = "solutions_org" # The Organization that shares the TKGm OVAs with the tenants + name = "tkgm_catalog" # The Catalog name +} + +data "vcd_nsxt_edgegateway" "egw" { + org = data.vcd_org_vdc.vdc.org + owner_id = data.vcd_org_vdc.vdc.id + name = "tenant_edgegateway" +} +``` + +```sh +terraform import vcd_cse_kubernetes_cluster.imported_cluster urn:vcloud:entity:vmware:capvcdCluster:1d24af33-6e5a-4d47-a6ea-06d76f3ee5c9 +``` + +-> The ID is required as it is the only way to unequivocally identify a Kubernetes cluster inside VCD. To obtain the ID +you can check the Kubernetes Container Clusters UI plugin, where all the available clusters are listed. + +After that, you can expand the configuration file and either update or delete the Kubernetes cluster. Running `terraform plan` +at this stage will show the difference between the minimal configuration file and the Kubernetes cluster stored properties. + +### Importing with Import blocks (Terraform v1.5+) + +~> Terraform warns that this procedure is considered **experimental**. Read more [here](/providers/vmware/vcd/latest/docs/guides/importing_resources) + +Given a Cluster ID, like `urn:vcloud:entity:vmware:capvcdCluster:f2d88194-3745-47ef-a6e1-5ee0bbce38f6`, you can write +the following HCL block in your Terraform configuration: + +```hcl +import { + to = vcd_cse_kubernetes_cluster.imported_cluster + id = "urn:vcloud:entity:vmware:capvcdCluster:f2d88194-3745-47ef-a6e1-5ee0bbce38f6" +} +``` + +Instead of using the suggested snippet in the section above, executing the command +`terraform plan -generate-config-out=generated_resources.tf` will generate a similar code, automatically. + +Once the code is validated, running `terraform apply` will perform the import operation and save the Kubernetes cluster +into the Terraform state. The Kubernetes cluster can now be operated with Terraform. + +[docs-import]:https://www.terraform.io/docs/import/ diff --git a/website/vcd.erb b/website/vcd.erb index 9d64957ed..f92ec18e7 100644 --- a/website/vcd.erb +++ b/website/vcd.erb @@ -407,7 +407,13 @@ vcd_vgpu_profile > - vcd_vm_vgpu_policy + vcd_vm_vgpu_policy + + > + vcd_cse_kubernetes_cluster + + > + vcd_version @@ -723,6 +729,9 @@ > vcd_vm_vgpu_policy + > + vcd_cse_kubernetes_cluster +