From 53d6bd7595d66d9e3e042d5f2e5a0f1861bcd207 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 4 Jan 2024 11:32:05 +0100 Subject: [PATCH 001/156] Init Signed-off-by: abarreiro --- vcd/provider.go | 1 + vcd/resource_vcd_cse_kubernetes_cluster.go | 147 +++++++++++++++++++++ 2 files changed, 148 insertions(+) create mode 100644 vcd/resource_vcd_cse_kubernetes_cluster.go diff --git a/vcd/provider.go b/vcd/provider.go index adaa9e2f5..24fb80c9f 100644 --- a/vcd/provider.go +++ b/vcd/provider.go @@ -262,6 +262,7 @@ var globalResourceMap = map[string]*schema.Resource{ "vcd_network_pool": resourceVcdNetworkPool(), // 3.11 "vcd_nsxt_edgegateway_dns": resourceVcdNsxtEdgeGatewayDns(), // 3.11 "vcd_vm_vgpu_policy": resourceVcdVmVgpuPolicy(), // 3.11 + "vcd_cse_kubernetes_cluster": resourceVcdCseKubernetesCluster(), // 3.12 } // Provider returns a terraform.ResourceProvider. diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go new file mode 100644 index 000000000..d198ce026 --- /dev/null +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -0,0 +1,147 @@ +package vcd + +import ( + "context" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/vmware/go-vcloud-director/v2/govcd" + "github.com/vmware/go-vcloud-director/v2/types/v56" +) + +func resourceVcdCseKubernetesCluster() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceVcdCseKubernetesClusterCreate, + ReadContext: resourceVcdCseKubernetesRead, + UpdateContext: resourceVcdCseKubernetesUpdate, + DeleteContext: resourceVcdCseKubernetesDelete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the Kubernetes cluster", + }, + "capvcd_rde_type_id": { + Type: schema.TypeString, + Required: true, + Description: "The CAPVCD RDE Type ID", + }, + "org": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of organization that will own this Runtime Defined Entity, optional if defined at provider " + + "level. Useful when connected as sysadmin working across different organizations", + }, + "vdc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the VDC that hosts the Kubernetes cluster", + }, + "network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the network that the Kubernetes cluster will use", + }, + "api_token": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The API token used to create and manage the cluster. The owner must have the 'Kubernetes Cluster Author' role", + }, + "ssh_public_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The SSH public key used to login into the cluster nodes", + }, + "control_plane_machine_count": { + Type: schema.TypeInt, + Required: true, + Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", + ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v' for control plane nodes", v) + } + if value < 1 || value%2 == 0 { + return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) + } + return nil + }, + }, + "worker_machine_count": { + Type: schema.TypeInt, + Required: true, + Description: "The number of worker nodes, where the workloads are run", + ValidateFunc: IsIntAndAtLeast(1), + }, + }, + } +} + +func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + name := d.Get("name").(string) + + capvcdRdeTypeId := d.Get("capvcd_rde_type_id").(string) + rdeType, err := vcdClient.GetRdeTypeById(capvcdRdeTypeId) + if err != nil { + return diag.Errorf("could not create Kubernetes cluster with name '%s', could not retrieve CAPVCD RDE Type with ID '%s': %s", name, capvcdRdeTypeId, err) + } + + tenantContext := govcd.TenantContext{} + org, err := vcdClient.GetOrgFromResource(d) + if err != nil { + return diag.Errorf("could not create Kubernetes cluster with name '%s', error retrieving Org: %s", name, err) + } + tenantContext.OrgId = org.Org.ID + tenantContext.OrgName = org.Org.Name + + err = validateCseKubernetesCluster(d) + if err != nil { + return diag.Errorf("could not create Kubernetes cluster with name '%s', error validating the payload: %s", name, err) + } + + _, err = rdeType.CreateRde(types.DefinedEntity{ + EntityType: rdeType.DefinedEntityType.ID, + Name: name, + Entity: nil, + }, &tenantContext) + if err != nil { + return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", name, err) + } + + return resourceVcdCseKubernetesRead(ctx, d, meta) +} + +func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + status, ok := rde.DefinedEntity.Entity["status"].(map[string]interface{}) + if !ok { + return diag.Errorf("could not read the status of the Kubernetes cluster with ID '%s'", d.Id()) + } + dSet(d, "asd", status[""]) + + return nil +} + +func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func validateCseKubernetesCluster(d *schema.ResourceData) error { + return nil +} From 2411dd0ccf922a8741066b52444556792e03e7be Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 4 Jan 2024 17:13:20 +0100 Subject: [PATCH 002/156] # Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 172 ++++++++++++++++++++- 1 file changed, 166 insertions(+), 6 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index d198ce026..fb65e63ca 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" ) @@ -16,12 +17,25 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { UpdateContext: resourceVcdCseKubernetesUpdate, DeleteContext: resourceVcdCseKubernetesDelete, Schema: map[string]*schema.Schema{ + "runtime": { + Type: schema.TypeString, + Default: "tkg", + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"tkg"}, false), // May add others in future releases of CSE + Description: "The Kubernetes runtime for the cluster. Only 'tkg' (Tanzu Kubernetes Grid) is supported", + }, "name": { Type: schema.TypeString, Required: true, ForceNew: true, Description: "The name of the Kubernetes cluster", }, + "ova_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", + }, "capvcd_rde_type_id": { Type: schema.TypeString, Required: true, @@ -73,11 +87,145 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { return nil }, }, - "worker_machine_count": { - Type: schema.TypeInt, - Required: true, - Description: "The number of worker nodes, where the workloads are run", - ValidateFunc: IsIntAndAtLeast(1), + "control_plane_disk_size": { + Type: schema.TypeInt, + Required: true, + Description: "Disk size for the control plane nodes", + }, + "control_plane_sizing_policy_id": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "control_plane_placement_policy_id": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "control_plane_storage_profile": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "worker_pool": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Required: true, + Type: schema.TypeString, + + Description: "Network type to use: 'vapp', 'org' or 'none'. Use 'vapp' for vApp network, 'org' to attach Org VDC network. 'none' for empty NIC.", + }, + "machine_count": { + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + "disk_size": { + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + "sizing_policy_id": { + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + "placement_policy_id": { + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + "vgpu_policy_id": { + Optional: true, + Type: schema.TypeString, + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + "storage_profile": { + Optional: true, + Type: schema.TypeString, + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + }, + }, + }, + "storage_class": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_profile": { + Required: true, + Type: schema.TypeString, + Description: "Network type to use: 'vapp', 'org' or 'none'. Use 'vapp' for vApp network, 'org' to attach Org VDC network. 'none' for empty NIC.", + }, + "name": { + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + "reclaim_policy": { + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + "filesystem": { + Optional: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), + Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + }, + }, + }, + }, + "pods_cidr": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "services_cidr": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "control_plane_ip": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "virtual_ip_subnet": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "autorepair_on_errors": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "node_healthcheck": { + Type: schema.TypeString, + Required: true, + Description: "", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "", + }, + "raw_cluster_rde": { + Type: schema.TypeString, + Computed: true, + Description: "", }, }, } @@ -125,11 +273,23 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met if err != nil { return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } + jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) + if err != nil { + return diag.Errorf("could not save the cluster '%s' raw RDE contents into state: %s", rde.DefinedEntity.ID, err) + } + dSet(d, "raw_rde", jsonEntity) + status, ok := rde.DefinedEntity.Entity["status"].(map[string]interface{}) if !ok { return diag.Errorf("could not read the status of the Kubernetes cluster with ID '%s'", d.Id()) } - dSet(d, "asd", status[""]) + + vcdKe, ok := status["vcdKe"].(map[string]interface{}) + if !ok { + return diag.Errorf("could not read the status.vcdKe of the Kubernetes cluster with ID '%s'", d.Id()) + } + + dSet(d, "state", vcdKe["state"]) return nil } From 510869746370a6e398598b23ed173f2d9d30941b Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 8 Jan 2024 14:23:54 +0100 Subject: [PATCH 003/156] Refactor Signed-off-by: abarreiro --- vcd/cse/4.2/capvcd.tmpl | 24 +++ vcd/cse/4.2/default_storage_class.tmpl | 6 + vcd/resource_vcd_cse_kubernetes_cluster.go | 162 ++++++++++++--------- 3 files changed, 124 insertions(+), 68 deletions(-) create mode 100644 vcd/cse/4.2/capvcd.tmpl create mode 100644 vcd/cse/4.2/default_storage_class.tmpl diff --git a/vcd/cse/4.2/capvcd.tmpl b/vcd/cse/4.2/capvcd.tmpl new file mode 100644 index 000000000..afa51d523 --- /dev/null +++ b/vcd/cse/4.2/capvcd.tmpl @@ -0,0 +1,24 @@ +{ + "apiVersion": "capvcd.vmware.com/v1.1", + "kind": "CAPVCDCluster", + "name": "{{.Name}}", + "metadata": { + "name": "{{.Name}}", + "orgName": "{{.Org}}", + "site": "{{.VcdUrl}}", + "virtualDataCenterName": "{{.Vdc}}" + }, + "spec": { + "vcdKe": { + "isVCDKECluster": true, + "markForDelete": {{.Delete}}, + "forceDelete": {{.ForceDelete}}, + "autoRepairOnErrors": {{.AutoRepairOnErrors}}, + "defaultStorageClassOptions": {{.DefaultStorageClassOptions}} + "secure": { + "apiToken": "${.ApiToken}" + } + }, + "capiYaml": {{.CapiYaml}} + } +} diff --git a/vcd/cse/4.2/default_storage_class.tmpl b/vcd/cse/4.2/default_storage_class.tmpl new file mode 100644 index 000000000..7d386509c --- /dev/null +++ b/vcd/cse/4.2/default_storage_class.tmpl @@ -0,0 +1,6 @@ +{ + "filesystem": "${default_storage_class_filesystem}", + "k8sStorageClassName": "${default_storage_class_name}", + "vcdStorageProfileName": "${default_storage_class_storage_profile}", + "useDeleteReclaimPolicy": ${default_storage_class_delete_reclaim_policy} +} \ No newline at end of file diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index fb65e63ca..24aa97bb3 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -2,6 +2,7 @@ package vcd import ( "context" + _ "embed" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -10,6 +11,12 @@ import ( "github.com/vmware/go-vcloud-director/v2/types/v56" ) +////go:embed cse/4.2/capvcd.tmpl +//var capvcdTemplate string + +////go:embed cse/4.2/default_storage_class.tmpl +//var defaultStorageClass string + func resourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ CreateContext: resourceVcdCseKubernetesClusterCreate, @@ -45,7 +52,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Description: "The name of organization that will own this Runtime Defined Entity, optional if defined at provider " + + Description: "The name of organization that will own this Kubernetes cluster, optional if defined at provider " + "level. Useful when connected as sysadmin working across different organizations", }, "vdc_id": { @@ -72,85 +79,109 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ForceNew: true, Description: "The SSH public key used to login into the cluster nodes", }, - "control_plane_machine_count": { - Type: schema.TypeInt, - Required: true, - Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", - ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { - value, ok := v.(int) - if !ok { - return diag.Errorf("could not parse int value '%v' for control plane nodes", v) - } - if value < 1 || value%2 == 0 { - return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) - } - return nil + "control_plane": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_count": { + Type: schema.TypeInt, + Required: true, + Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", + ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v' for control plane nodes", v) + } + if value < 1 || value%2 == 0 { + return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) + } + return nil + }, + }, + "disk_size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Disk size for the control plane nodes", + }, + "sizing_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Sizing policy for the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Placement policy for the control plane nodes", + }, + "storage_profile": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Storage profile for the control plane nodes", + }, + "ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "IP for the control plane", + }, + }, }, }, - "control_plane_disk_size": { - Type: schema.TypeInt, - Required: true, - Description: "Disk size for the control plane nodes", - }, - "control_plane_sizing_policy_id": { - Type: schema.TypeString, - Required: true, - Description: "", - }, - "control_plane_placement_policy_id": { - Type: schema.TypeString, - Required: true, - Description: "", - }, - "control_plane_storage_profile": { - Type: schema.TypeString, - Required: true, - Description: "", - }, - "worker_pool": { + "node_pool": { Type: schema.TypeSet, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "name": { - Required: true, - Type: schema.TypeString, - - Description: "Network type to use: 'vapp', 'org' or 'none'. Use 'vapp' for vApp network, 'org' to attach Org VDC network. 'none' for empty NIC.", - }, "machine_count": { - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Type: schema.TypeInt, + Required: true, + Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", + ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v' for control plane nodes", v) + } + if value < 1 || value%2 == 0 { + return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) + } + return nil + }, }, "disk_size": { - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: "Disk size for the control plane nodes", }, "sizing_policy_id": { - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Sizing policy for the control plane nodes", }, "placement_policy_id": { - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Placement policy for the control plane nodes", }, "vgpu_policy_id": { - Optional: true, Type: schema.TypeString, - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Optional: true, + ForceNew: true, + Description: "vGPU policy for the control plane nodes", }, "storage_profile": { - Optional: true, Type: schema.TypeString, - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Optional: true, + ForceNew: true, + Description: "Storage profile for the control plane nodes", }, }, }, @@ -197,22 +228,17 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Required: true, Description: "", }, - "control_plane_ip": { - Type: schema.TypeString, - Required: true, - Description: "", - }, "virtual_ip_subnet": { Type: schema.TypeString, Required: true, Description: "", }, - "autorepair_on_errors": { + "auto_repair_on_errors": { Type: schema.TypeString, Required: true, Description: "", }, - "node_healthcheck": { + "node_health_check": { Type: schema.TypeString, Required: true, Description: "", From e2a7ab74a2d45c9072a23d5a1fcbc0acf9d964cb Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 8 Jan 2024 16:20:28 +0100 Subject: [PATCH 004/156] Add delete Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 52 +++++++++++++++++++--- 1 file changed, 45 insertions(+), 7 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 24aa97bb3..4d627cc68 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" + "time" ) ////go:embed cse/4.2/capvcd.tmpl @@ -101,10 +102,11 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "disk_size": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - Description: "Disk size for the control plane nodes", + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: IsIntAndAtLeast(20), + Description: "Disk size for the control plane nodes", }, "sizing_policy_id": { Type: schema.TypeString, @@ -307,16 +309,16 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met status, ok := rde.DefinedEntity.Entity["status"].(map[string]interface{}) if !ok { - return diag.Errorf("could not read the status of the Kubernetes cluster with ID '%s'", d.Id()) + return diag.Errorf("could not read the 'status' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) } vcdKe, ok := status["vcdKe"].(map[string]interface{}) if !ok { - return diag.Errorf("could not read the status.vcdKe of the Kubernetes cluster with ID '%s'", d.Id()) + return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) } dSet(d, "state", vcdKe["state"]) - + d.SetId(rde.DefinedEntity.ID) return nil } @@ -324,7 +326,43 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return nil } +// resourceVcdCseKubernetesDelete deletes a CSE Kubernetes cluster. To delete a Kubernetes cluster, one must send +// the flags "markForDelete" and "forceDelete" back to true, so the CSE Server is able to delete all cluster elements +// and perform a cleanup. Hence, this function sends these properties and waits for deletion. func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + spec, ok := rde.DefinedEntity.Entity["spec"].(map[string]interface{}) + if !ok { + return diag.Errorf("could not delete the cluster, JSON object 'spec' is not correct in the RDE") + } + + spec["markForDelete"] = true + spec["forceDelete"] = true + rde.DefinedEntity.Entity["spec"] = spec + + err = rde.Update(*rde.DefinedEntity) + if err != nil { + return diag.Errorf("could not delete the cluster '%s': %s", rde.DefinedEntity.ID, err) + } + + // TODO: Add a timeout + deletionComplete := false + for !deletionComplete { + _, err = vcdClient.GetRdeById(d.Id()) + if err != nil { + if govcd.IsNotFound(err) { + deletionComplete = true + } + return diag.Errorf("could not check whether the cluster '%s' is deleted: %s", d.Id(), err) + } + time.Sleep(30 * time.Second) + } return nil } From b4db508c37bddefdd1f7444efee572321a5a796a Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 9 Jan 2024 13:28:24 +0100 Subject: [PATCH 005/156] # Signed-off-by: abarreiro --- go.mod | 2 + go.sum | 4 +- vcd/cse/4.2/default_storage_class.tmpl | 8 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 110 +++++++++++++++++---- 4 files changed, 99 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index bcb6ce895..dc5be918a 100644 --- a/go.mod +++ b/go.mod @@ -65,3 +65,5 @@ require ( google.golang.org/grpc v1.57.1 // indirect google.golang.org/protobuf v1.31.0 // indirect ) + +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240109121343-95a1ac946ee7 diff --git a/go.sum b/go.sum index eeb21be01..9d3a8c4b6 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs= github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240109121343-95a1ac946ee7 h1:XMoXiETV7rYBwgRNo9ue9J+S0AytotVgnSQw6Z0loDs= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240109121343-95a1ac946ee7/go.mod h1:QPxGFgrUcSyzy9IlpwDE4UNT3tsOy2047tJOPEJ4nlw= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -124,8 +126,6 @@ github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9 github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= -github.com/vmware/go-vcloud-director/v2 v2.22.0 h1:i1yFCoQZl/mTKViWLpT8mC9tlOAbupip703K0q1gQT0= -github.com/vmware/go-vcloud-director/v2 v2.22.0/go.mod h1:QPxGFgrUcSyzy9IlpwDE4UNT3tsOy2047tJOPEJ4nlw= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.0 h1:/Xrd39K7DXbHzlisFP9c4pHao4yyf+/Ug9LEz+Y/yhc= diff --git a/vcd/cse/4.2/default_storage_class.tmpl b/vcd/cse/4.2/default_storage_class.tmpl index 7d386509c..946d4b772 100644 --- a/vcd/cse/4.2/default_storage_class.tmpl +++ b/vcd/cse/4.2/default_storage_class.tmpl @@ -1,6 +1,6 @@ { - "filesystem": "${default_storage_class_filesystem}", - "k8sStorageClassName": "${default_storage_class_name}", - "vcdStorageProfileName": "${default_storage_class_storage_profile}", - "useDeleteReclaimPolicy": ${default_storage_class_delete_reclaim_policy} + "filesystem": "{{.FileSystem}}", + "k8sStorageClassName": "{{.Name}}", + "vcdStorageProfileName": "{{.StorageProfile}}", + "useDeleteReclaimPolicy": {{.ReclaimPolicy}} } \ No newline at end of file diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 4d627cc68..1370088b6 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -1,22 +1,26 @@ package vcd import ( + "bytes" "context" _ "embed" + "encoding/json" + "fmt" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" + "text/template" "time" ) -////go:embed cse/4.2/capvcd.tmpl -//var capvcdTemplate string +//go:embed cse/4.2/capvcd.tmpl +var capvcdTemplate string -////go:embed cse/4.2/default_storage_class.tmpl -//var defaultStorageClass string +//go:embed cse/4.2/default_storage_class.tmpl +var defaultStorageClass string func resourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ @@ -191,31 +195,30 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "storage_class": { Type: schema.TypeList, MaxItems: 1, - Required: true, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "storage_profile": { + "storage_profile_id": { Required: true, Type: schema.TypeString, - Description: "Network type to use: 'vapp', 'org' or 'none'. Use 'vapp' for vApp network, 'org' to attach Org VDC network. 'none' for empty NIC.", + Description: "ID of the storage profile to use for the storage class", }, "name": { - Optional: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"POOL", "DHCP", "MANUAL", "NONE"}, false), - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Required: true, + Type: schema.TypeString, + Description: "Name to give to this storage class", }, "reclaim_policy": { - Optional: true, + Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", }, "filesystem": { - Optional: true, + Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), - Description: "IP address allocation mode. One of POOL, DHCP, MANUAL, NONE", + Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", }, }, }, @@ -282,10 +285,15 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diag.Errorf("could not create Kubernetes cluster with name '%s', error validating the payload: %s", name, err) } + entityMap, err := getCseKubernetesClusterEntityMap(d, vcdClient, "create") + if err != nil { + return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", name, err) + } + _, err = rdeType.CreateRde(types.DefinedEntity{ EntityType: rdeType.DefinedEntityType.ID, Name: name, - Entity: nil, + Entity: entityMap, }, &tenantContext) if err != nil { return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", name, err) @@ -294,6 +302,70 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return resourceVcdCseKubernetesRead(ctx, d, meta) } +func getCseKubernetesClusterEntityMap(d *schema.ResourceData, vcdClient *VCDClient, operation string) (StringMap, error) { + name := d.Get("name").(string) + + _, isStorageClassSet := d.GetOk("storage_class") + storageClass := "{}" + if isStorageClassSet { + storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) + storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s': %s", storageProfileId, err) + } + storageClassEmpty := template.Must(template.New(name + "_StorageClass").Parse(defaultStorageClass)) + storageClassName := d.Get("storage_class.0.name").(string) + reclaimPolicy := d.Get("storage_class.0.reclaim_policy").(string) + filesystem := d.Get("storage_class.0.filesystem").(string) + + buf := &bytes.Buffer{} + if err := storageClassEmpty.Execute(buf, map[string]string{ + "FileSystem": filesystem, + "Name": storageClassName, + "StorageProfile": storageProfile.ID, + "ReclaimPolicy": reclaimPolicy, + }); err != nil { + return nil, fmt.Errorf("could not generate a correct storage class JSON block: %s", err) + } + storageClass = buf.String() + } + deleteFlag := "false" + forceDelete := "false" + if operation == "delete" { + deleteFlag = "true" + forceDelete = "true" + } + + capvcdEmpty := template.Must(template.New(name).Parse(capvcdTemplate)) + buf := &bytes.Buffer{} + if err := capvcdEmpty.Execute(buf, map[string]string{ + "Name": name, + "Org": "", + "VcdUrl": vcdClient.Client.VCDHREF.String(), + "Vdc": "", + "Delete": deleteFlag, + "ForceDelete": forceDelete, + "AutoRepairOnErrors": "", + "DefaultStorageClassOptions": storageClass, + "ApiToken": "", + "CapiYaml": getCapiYamlPlaintext(d, vcdClient), + }); err != nil { + return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) + } + + result := map[string]interface{}{} + err := json.Unmarshal(buf.Bytes(), &result) + if err != nil { + return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) + } + + return result, nil +} + +func getCapiYamlPlaintext(d *schema.ResourceData, client *VCDClient) string { + return "" +} + func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) @@ -307,12 +379,12 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met } dSet(d, "raw_rde", jsonEntity) - status, ok := rde.DefinedEntity.Entity["status"].(map[string]interface{}) + status, ok := rde.DefinedEntity.Entity["status"].(StringMap) if !ok { return diag.Errorf("could not read the 'status' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) } - vcdKe, ok := status["vcdKe"].(map[string]interface{}) + vcdKe, ok := status["vcdKe"].(StringMap) if !ok { return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) } @@ -337,7 +409,7 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) } - spec, ok := rde.DefinedEntity.Entity["spec"].(map[string]interface{}) + spec, ok := rde.DefinedEntity.Entity["spec"].(StringMap) if !ok { return diag.Errorf("could not delete the cluster, JSON object 'spec' is not correct in the RDE") } From 8b3413dbf896c096d32f709d9f238d3d34649d6d Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 10 Jan 2024 10:50:21 +0100 Subject: [PATCH 006/156] # Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 35 ++++++++++++++-------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 1370088b6..ab00e8986 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -226,7 +226,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "pods_cidr": { Type: schema.TypeString, Required: true, - Description: "", + Description: "CIDR that the pods will use", }, "services_cidr": { Type: schema.TypeString, @@ -329,32 +329,41 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, vcdClient *VCDClie } storageClass = buf.String() } - deleteFlag := "false" - forceDelete := "false" - if operation == "delete" { - deleteFlag = "true" - forceDelete = "true" + + orgName, err := vcdClient.GetOrgNameFromResource(d) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Organization name to build the cluster JSON payload: %s", err) + } + org, err := vcdClient.GetOrg(orgName) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Organization '%s' to build the cluster JSON payload: %s", orgName, err) + } + + vdcId := d.Get("vdc_id").(string) + vdc, err := org.GetVDCById(vdcId, true) + if err != nil { + return nil, fmt.Errorf("could not retrieve the VDC '%s' to build the cluster JSON payload: %s", vdcId, err) } capvcdEmpty := template.Must(template.New(name).Parse(capvcdTemplate)) buf := &bytes.Buffer{} if err := capvcdEmpty.Execute(buf, map[string]string{ "Name": name, - "Org": "", + "Org": orgName, "VcdUrl": vcdClient.Client.VCDHREF.String(), - "Vdc": "", - "Delete": deleteFlag, - "ForceDelete": forceDelete, - "AutoRepairOnErrors": "", + "Vdc": vdc.Vdc.Name, + "Delete": "false", + "ForceDelete": "false", + "AutoRepairOnErrors": d.Get("auto_repair_on_errors").(string), "DefaultStorageClassOptions": storageClass, - "ApiToken": "", + "ApiToken": d.Get("api_token").(string), "CapiYaml": getCapiYamlPlaintext(d, vcdClient), }); err != nil { return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } result := map[string]interface{}{} - err := json.Unmarshal(buf.Bytes(), &result) + err = json.Unmarshal(buf.Bytes(), &result) if err != nil { return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } From a0ab3eb3787f4ed8ffcc8f502ac1f8a4048cf19e Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 10 Jan 2024 11:41:01 +0100 Subject: [PATCH 007/156] Refactor Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 196 ++++++++++++--------- 1 file changed, 113 insertions(+), 83 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index ab00e8986..790e5f941 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" + "net/url" "text/template" "time" ) @@ -92,7 +93,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "machine_count": { Type: schema.TypeInt, - Required: true, + Optional: true, + Default: 3, // As suggested in UI Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { value, ok := v.(int) @@ -107,7 +109,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "disk_size": { Type: schema.TypeInt, - Required: true, + Optional: true, + Default: 20, // As suggested in UI ForceNew: true, ValidateFunc: IsIntAndAtLeast(20), Description: "Disk size for the control plane nodes", @@ -145,23 +148,16 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "machine_count": { - Type: schema.TypeInt, - Required: true, - Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", - ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { - value, ok := v.(int) - if !ok { - return diag.Errorf("could not parse int value '%v' for control plane nodes", v) - } - if value < 1 || value%2 == 0 { - return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) - } - return nil - }, + Type: schema.TypeInt, + Optional: true, + Default: 1, // As suggested in UI + Description: "The number of nodes that this node pool has. Must be higher than 0", + ValidateFunc: IsIntAndAtLeast(1), }, "disk_size": { Type: schema.TypeInt, - Required: true, + Optional: true, + Default: 20, // As suggested in UI ForceNew: true, Description: "Disk size for the control plane nodes", }, @@ -225,38 +221,40 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "pods_cidr": { Type: schema.TypeString, - Required: true, - Description: "CIDR that the pods will use", + Optional: true, + Default: "100.96.0.0/11", // As suggested in UI + Description: "CIDR that the Kubernetes pods will use", }, "services_cidr": { Type: schema.TypeString, - Required: true, - Description: "", + Optional: true, + Default: "100.64.0.0/13", // As suggested in UI + Description: "CIDR that the Kubernetes services will use", }, "virtual_ip_subnet": { Type: schema.TypeString, Required: true, - Description: "", + Description: "Virtual IP subnet for the cluster", }, "auto_repair_on_errors": { Type: schema.TypeString, Required: true, - Description: "", + Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", }, "node_health_check": { Type: schema.TypeString, Required: true, - Description: "", + Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", }, "state": { Type: schema.TypeString, Computed: true, - Description: "", + Description: "The state of the cluster, can be 'provisioning', 'provisioned' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", }, - "raw_cluster_rde": { + "raw_cluster_rde_json": { Type: schema.TypeString, Computed: true, - Description: "", + Description: "The raw JSON that describes the cluster configuration inside the Runtime Defined Entity", }, }, } @@ -264,55 +262,37 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - name := d.Get("name").(string) - - capvcdRdeTypeId := d.Get("capvcd_rde_type_id").(string) - rdeType, err := vcdClient.GetRdeTypeById(capvcdRdeTypeId) - if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s', could not retrieve CAPVCD RDE Type with ID '%s': %s", name, capvcdRdeTypeId, err) - } - - tenantContext := govcd.TenantContext{} - org, err := vcdClient.GetOrgFromResource(d) - if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s', error retrieving Org: %s", name, err) - } - tenantContext.OrgId = org.Org.ID - tenantContext.OrgName = org.Org.Name - err = validateCseKubernetesCluster(d) + clusterDetails, err := createClusterInfoDto(d, vcdClient) if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s', error validating the payload: %s", name, err) + return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } - entityMap, err := getCseKubernetesClusterEntityMap(d, vcdClient, "create") + entityMap, err := getCseKubernetesClusterEntityMap(d, clusterDetails) if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", name, err) + return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } - _, err = rdeType.CreateRde(types.DefinedEntity{ - EntityType: rdeType.DefinedEntityType.ID, - Name: name, + _, err = clusterDetails.RdeType.CreateRde(types.DefinedEntity{ + EntityType: clusterDetails.RdeType.DefinedEntityType.ID, + Name: clusterDetails.Name, Entity: entityMap, - }, &tenantContext) + }, &govcd.TenantContext{ + OrgId: clusterDetails.Org.AdminOrg.ID, + OrgName: clusterDetails.Org.AdminOrg.Name, + }) if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", name, err) + return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } return resourceVcdCseKubernetesRead(ctx, d, meta) } -func getCseKubernetesClusterEntityMap(d *schema.ResourceData, vcdClient *VCDClient, operation string) (StringMap, error) { +func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *clusterInfoDto) (StringMap, error) { name := d.Get("name").(string) - _, isStorageClassSet := d.GetOk("storage_class") storageClass := "{}" - if isStorageClassSet { - storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) - storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) - if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s': %s", storageProfileId, err) - } + if clusterDetails.StorageProfile != nil { storageClassEmpty := template.Must(template.New(name + "_StorageClass").Parse(defaultStorageClass)) storageClassName := d.Get("storage_class.0.name").(string) reclaimPolicy := d.Get("storage_class.0.reclaim_policy").(string) @@ -322,7 +302,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, vcdClient *VCDClie if err := storageClassEmpty.Execute(buf, map[string]string{ "FileSystem": filesystem, "Name": storageClassName, - "StorageProfile": storageProfile.ID, + "StorageProfile": clusterDetails.StorageProfile.Name, "ReclaimPolicy": reclaimPolicy, }); err != nil { return nil, fmt.Errorf("could not generate a correct storage class JSON block: %s", err) @@ -330,40 +310,25 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, vcdClient *VCDClie storageClass = buf.String() } - orgName, err := vcdClient.GetOrgNameFromResource(d) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Organization name to build the cluster JSON payload: %s", err) - } - org, err := vcdClient.GetOrg(orgName) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Organization '%s' to build the cluster JSON payload: %s", orgName, err) - } - - vdcId := d.Get("vdc_id").(string) - vdc, err := org.GetVDCById(vdcId, true) - if err != nil { - return nil, fmt.Errorf("could not retrieve the VDC '%s' to build the cluster JSON payload: %s", vdcId, err) - } - capvcdEmpty := template.Must(template.New(name).Parse(capvcdTemplate)) buf := &bytes.Buffer{} if err := capvcdEmpty.Execute(buf, map[string]string{ "Name": name, - "Org": orgName, - "VcdUrl": vcdClient.Client.VCDHREF.String(), - "Vdc": vdc.Vdc.Name, + "Org": clusterDetails.Org.AdminOrg.Name, + "VcdUrl": clusterDetails.VcdUrl.String(), + "Vdc": clusterDetails.Vdc.Vdc.Name, "Delete": "false", "ForceDelete": "false", "AutoRepairOnErrors": d.Get("auto_repair_on_errors").(string), "DefaultStorageClassOptions": storageClass, "ApiToken": d.Get("api_token").(string), - "CapiYaml": getCapiYamlPlaintext(d, vcdClient), + "CapiYaml": getCapiYamlPlaintext(d, clusterDetails), }); err != nil { return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } result := map[string]interface{}{} - err = json.Unmarshal(buf.Bytes(), &result) + err := json.Unmarshal(buf.Bytes(), &result) if err != nil { return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } @@ -371,7 +336,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, vcdClient *VCDClie return result, nil } -func getCapiYamlPlaintext(d *schema.ResourceData, client *VCDClient) string { +func getCapiYamlPlaintext(d *schema.ResourceData, clusterDetails *clusterInfoDto) string { return "" } @@ -386,7 +351,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met if err != nil { return diag.Errorf("could not save the cluster '%s' raw RDE contents into state: %s", rde.DefinedEntity.ID, err) } - dSet(d, "raw_rde", jsonEntity) + dSet(d, "raw_cluster_rde_json", jsonEntity) status, ok := rde.DefinedEntity.Entity["status"].(StringMap) if !ok { @@ -447,6 +412,71 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m return nil } -func validateCseKubernetesCluster(d *schema.ResourceData) error { - return nil +// clusterInfoDto is a helper struct that contains all the required elements to successfully create and manage +// a Kubernetes cluster using CSE. +type clusterInfoDto struct { + Name string + VcdUrl url.URL + Org *govcd.AdminOrg + Vdc *govcd.Vdc + VAppTemplate *govcd.VAppTemplate + Network *govcd.OrgVDCNetwork + RdeType *govcd.DefinedEntityType + StorageProfile *types.VdcStorageProfile +} + +// createClusterInfoDto creates and returns a clusterInfoDto object by obtaining all the required information +// from th input Terraform resource data. +func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient) (*clusterInfoDto, error) { + result := &clusterInfoDto{} + + name := d.Get("name").(string) + result.Name = name + + org, err := vcdClient.GetAdminOrgFromResource(d) + if err != nil { + return nil, fmt.Errorf("could not retrieve the cluster Organization: %s", err) + } + result.Org = org + + vdcId := d.Get("vdc_id").(string) + vdc, err := org.GetVDCById(vdcId, true) + if err != nil { + return nil, fmt.Errorf("could not retrieve the VDC with ID '%s': %s", vdcId, err) + } + result.Vdc = vdc + + vAppTemplateId := d.Get("ova_id").(string) + vAppTemplate, err := vcdClient.GetVAppTemplateById(vAppTemplateId) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) + } + result.VAppTemplate = vAppTemplate + + networkId := d.Get("network_id").(string) + network, err := vdc.GetOrgVdcNetworkById(networkId, true) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Org VDC Network with ID '%s': %s", networkId, err) + } + result.Network = network + + rdeTypeId := d.Get("capvcd_rde_type_id").(string) + rdeType, err := vcdClient.GetRdeTypeById(rdeTypeId) + if err != nil { + return nil, fmt.Errorf("could not retrieve RDE Type with ID '%s': %s", rdeTypeId, err) + } + result.RdeType = rdeType + + var storageProfile *types.VdcStorageProfile + if _, isStorageClassSet := d.GetOk("storage_class"); isStorageClassSet { + storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) + storageProfile, err = vcdClient.GetStorageProfileById(storageProfileId) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s': %s", storageProfileId, err) + } + } + result.StorageProfile = storageProfile + + result.VcdUrl = vcdClient.VCDClient.Client.VCDHREF + return result, nil } From 26f78d41c44490092da27432d0f27925e6b16954 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 10 Jan 2024 16:04:16 +0100 Subject: [PATCH 008/156] Add test, doesnt run/pass Signed-off-by: abarreiro --- vcd/config_test.go | 15 +- vcd/cse/4.2/capi_yaml.tmpl | 217 ++++++++++++++++++ vcd/resource_vcd_cse_kubernetes_cluster.go | 178 +++++++++++--- ...esource_vcd_cse_kubernetes_cluster_test.go | 144 ++++++++++++ vcd/sample_vcd_test_config.json | 12 + 5 files changed, 532 insertions(+), 34 deletions(-) create mode 100644 vcd/cse/4.2/capi_yaml.tmpl create mode 100644 vcd/resource_vcd_cse_kubernetes_cluster_test.go diff --git a/vcd/config_test.go b/vcd/config_test.go index 5140b1068..1425805a7 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -231,6 +231,17 @@ type TestConfig struct { OrgUserPassword string `json:"orgUserPassword"` // Password for the Org User to be created within the organization } `json:"testEnvBuild"` EnvVariables map[string]string `json:"envVariables,omitempty"` + Cse struct { + Org string `json:"org,omitempty"` + Vdc string `json:"vdc,omitempty"` + OvaCatalog string `json:"ovaCatalog,omitempty"` + OvaName string `json:"ovaName,omitempty"` + CapVcdVersion string `json:"capVcdVersion,omitempty"` + RoutedNetwork string `json:"routedNetwork,omitempty"` + EdgeGateway string `json:"edgeGateway,omitempty"` + Owner string `json:"owner,omitempty"` + ApiTokenFile string `json:"apiTokenFile,omitempty"` + } `json:"cse,omitempty"` } // names for created resources for all the tests @@ -960,7 +971,7 @@ func importStateIdTopHierarchy(objectName string) resource.ImportStateIdFunc { } } -// Used by all entities that depend on Org (such as Catalog, OrgUser) +// Used by all entities that depend on Org (such as CatalogName, OrgUser) // If the orgName is empty, it uses the default Org from testConfig func importStateIdOrgObject(orgName string, objectName string) resource.ImportStateIdFunc { return func(*terraform.State) (string, error) { @@ -1037,7 +1048,7 @@ func importStateIdNsxtManagerObject(objectName string) resource.ImportStateIdFun } } -// Used by all entities that depend on Org + Catalog (such as catalog item, media item) +// Used by all entities that depend on Org + CatalogName (such as catalog item, media item) func importStateIdOrgCatalogObject(objectName string) resource.ImportStateIdFunc { return func(*terraform.State) (string, error) { if testConfig.VCD.Org == "" || testConfig.VCD.Catalog.Name == "" || objectName == "" { diff --git a/vcd/cse/4.2/capi_yaml.tmpl b/vcd/cse/4.2/capi_yaml.tmpl new file mode 100644 index 000000000..1189ca3c9 --- /dev/null +++ b/vcd/cse/4.2/capi_yaml.tmpl @@ -0,0 +1,217 @@ +# This file was downloaded from https://raw.githubusercontent.com/vmware/cluster-api-provider-cloud-director/main/templates/cluster-template-v1.25.7.yaml +# Only the commented lines were added manually. + +# The MachineHealthCheck was added manually. You can add this section if you want automatic health checks in your +# Kubernetes clusters. +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: {{.ClusterName}} + namespace: {{.TargetNamespace}} + labels: + clusterctl.cluster.x-k8s.io: "" + clusterctl.cluster.x-k8s.io/move: "" +spec: + clusterName: {{.ClusterName}} + maxUnhealthy: {{.MaxUnhealthyNodePercentage}}% + nodeStartupTimeout: {{.NodeStartupTimeout}}s + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: {{.ClusterName}} + unhealthyConditions: + - type: Ready + status: Unknown + timeout: {{.NodeUnknownTimeout}}s + - type: Ready + status: "False" + timeout: {{.NodeNotReadyTimeout}}s +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: {{.ClusterName}} + namespace: {{.TargetNamespace}} + labels: # The labels section was added manually, this is REQUIRED for the cluster to work + cluster-role.tkg.tanzu.vmware.com/management: "" + tanzuKubernetesRelease: {{.TkrVersion}} + tkg.tanzu.vmware.com/cluster-name: {{.ClusterName}} + annotations: # The annotations section was added manually, this is REQUIRED for the cluster to work + TKGVERSION: {{.TkgVersion}} +spec: + clusterNetwork: + pods: + cidrBlocks: + - {{.PodCidr}} + serviceDomain: cluster.local + services: + cidrBlocks: + - {{.ServiceCidr}} + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: {{.ClusterName}}-control-plane + namespace: {{.TargetNamespace}} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: VCDCluster + name: {{.ClusterName}} + namespace: {{.TargetNamespace}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: capi-user-credentials + namespace: {{.TargetNamespace}} +type: Opaque +data: + username: "{{.UsernameB64}}" + password: "" + refreshToken: "{{.ApiTokenB64}}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: VCDCluster +metadata: + name: {{.ClusterName}} + namespace: {{.TargetNamespace}} +spec: + site: {{.VcdSite}} + org: {{.Org}} + ovdc: {{.OrgVdc}} + ovdcNetwork: {{.OrgVdcNetwork}} + useAsManagementCluster: false + userContext: + secretRef: + name: capi-user-credentials + namespace: {{.TargetNamespace}} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: VCDMachineTemplate +metadata: + name: {{.ClusterName}}-control-plane + namespace: {{.TargetNamespace}} +spec: + template: + spec: + catalog: {{.Catalog}} + template: {{.VAppTemplateName}} + sizingPolicy: {{.ControlPlaneSizingPolicy}} + placementPolicy: {{.ControlPlanePlacementPolicy}} + storageProfile: "{{.ControlPlaneStorageProfile}}" + diskSize: {{.ControlPlaneDiskSize}} + enableNvidiaGPU: false +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: {{.ClusterName}}-control-plane + namespace: {{.TargetNamespace}} +spec: + kubeadmConfigSpec: + preKubeadmCommands: # preKubeadmCommands was added manually + - mv /etc/ssl/certs/custom_certificate_*.crt + /usr/local/share/ca-certificates && update-ca-certificates + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + controllerManager: # controllerManager was added manually + extraArgs: + enable-hostpath-provisioner: "true" + dns: + imageRepository: {{.ContainerRegistryUrl}}/tkg + imageTag: v1.9.3_vmware.8 + etcd: + local: + imageRepository: {{.ContainerRegistryUrl}}/tkg + imageTag: v3.5.6_vmware.9 + imageRepository: {{.ContainerRegistryUrl}}/tkg + users: + - name: root + sshAuthorizedKeys: + - "{{.SshPublicKey}}" + initConfiguration: + nodeRegistration: + criSocket: /run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cloud-provider: external + joinConfiguration: + nodeRegistration: + criSocket: /run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cloud-provider: external + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: VCDMachineTemplate + name: {{.ClusterName}}-control-plane + namespace: {{.TargetNamespace}} + replicas: {{.ControlPlaneMachineCount}} + version: v1.25.7+vmware.2 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: VCDMachineTemplate +metadata: + name: {{.ClusterName}}-md-0 + namespace: {{.TargetNamespace}} +spec: + template: + spec: + catalog: {{.Catalog}} + template: {{.VAppTemplateName}} + sizingPolicy: {{.NodePool0SizingPolicy}} + placementPolicy: {{.NodePool0PlacementPolicy}} + storageProfile: "{{.NodePool0StorageProfile}}" + diskSize: {{.NodePool0DiskSize}} + enableNvidiaGPU: false +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: {{.ClusterName}}-md-0 + namespace: {{.TargetNamespace}} +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: + - "{{.SshPublicKey}}" + useExperimentalRetryJoin: true # Added manually + preKubeadmCommands: # Added manually + - mv /etc/ssl/certs/custom_certificate_*.crt + /usr/local/share/ca-certificates && update-ca-certificates + joinConfiguration: + nodeRegistration: + criSocket: /run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cloud-provider: external +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: {{.ClusterName}}-md-0 + namespace: {{.TargetNamespace}} +spec: + clusterName: {{.ClusterName}} + replicas: {{.NodePool0MachineCount}} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: {{.ClusterName}}-md-0 + namespace: {{.TargetNamespace}} + clusterName: {{.ClusterName}} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: VCDMachineTemplate + name: {{.ClusterName}}-md-0 + namespace: {{.TargetNamespace}} + version: v1.25.7+vmware.2 \ No newline at end of file diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 790e5f941..53d353a05 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -4,6 +4,7 @@ import ( "bytes" "context" _ "embed" + "encoding/base64" "encoding/json" "fmt" "github.com/hashicorp/go-cty/cty" @@ -23,6 +24,9 @@ var capvcdTemplate string //go:embed cse/4.2/default_storage_class.tmpl var defaultStorageClass string +//go:embed cse/4.2/capi_yaml.tmpl +var capiYaml string + func resourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ CreateContext: resourceVcdCseKubernetesClusterCreate, @@ -73,11 +77,17 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ForceNew: true, Description: "The ID of the network that the Kubernetes cluster will use", }, - "api_token": { + "owner": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role", + }, + "api_token_file": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The API token used to create and manage the cluster. The owner must have the 'Kubernetes Cluster Author' role", + Description: "A file that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'", }, "ssh_public_key": { Type: schema.TypeString, @@ -145,6 +155,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "node_pool": { Type: schema.TypeSet, Required: true, + MinItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "machine_count": { @@ -179,7 +190,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ForceNew: true, Description: "vGPU policy for the control plane nodes", }, - "storage_profile": { + "storage_profile_id": { Type: schema.TypeString, Optional: true, ForceNew: true, @@ -263,7 +274,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - clusterDetails, err := createClusterInfoDto(d, vcdClient) + clusterDetails, err := createClusterInfoDto(d, vcdClient, "1.1.0") if err != nil { return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } @@ -289,11 +300,13 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *clusterInfoDto) (StringMap, error) { - name := d.Get("name").(string) - + capiYamlRendered, err := getCapiYamlPlaintext(d, clusterDetails) + if err != nil { + return nil, err + } storageClass := "{}" - if clusterDetails.StorageProfile != nil { - storageClassEmpty := template.Must(template.New(name + "_StorageClass").Parse(defaultStorageClass)) + if clusterDetails.StorageProfileName != "" { + storageClassEmpty := template.Must(template.New(clusterDetails.Name + "_StorageClass").Parse(defaultStorageClass)) storageClassName := d.Get("storage_class.0.name").(string) reclaimPolicy := d.Get("storage_class.0.reclaim_policy").(string) filesystem := d.Get("storage_class.0.filesystem").(string) @@ -302,7 +315,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl if err := storageClassEmpty.Execute(buf, map[string]string{ "FileSystem": filesystem, "Name": storageClassName, - "StorageProfile": clusterDetails.StorageProfile.Name, + "StorageProfile": clusterDetails.StorageProfileName, "ReclaimPolicy": reclaimPolicy, }); err != nil { return nil, fmt.Errorf("could not generate a correct storage class JSON block: %s", err) @@ -310,25 +323,25 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl storageClass = buf.String() } - capvcdEmpty := template.Must(template.New(name).Parse(capvcdTemplate)) + capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(capvcdTemplate)) buf := &bytes.Buffer{} if err := capvcdEmpty.Execute(buf, map[string]string{ - "Name": name, + "Name": clusterDetails.Name, "Org": clusterDetails.Org.AdminOrg.Name, "VcdUrl": clusterDetails.VcdUrl.String(), - "Vdc": clusterDetails.Vdc.Vdc.Name, + "Vdc": clusterDetails.VdcName, "Delete": "false", "ForceDelete": "false", "AutoRepairOnErrors": d.Get("auto_repair_on_errors").(string), "DefaultStorageClassOptions": storageClass, "ApiToken": d.Get("api_token").(string), - "CapiYaml": getCapiYamlPlaintext(d, clusterDetails), + "CapiYaml": capiYamlRendered, }); err != nil { return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } result := map[string]interface{}{} - err := json.Unmarshal(buf.Bytes(), &result) + err = json.Unmarshal(buf.Bytes(), &result) if err != nil { return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } @@ -336,8 +349,50 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl return result, nil } -func getCapiYamlPlaintext(d *schema.ResourceData, clusterDetails *clusterInfoDto) string { - return "" +func getCapiYamlPlaintext(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { + capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(capiYaml)) + + buf := &bytes.Buffer{} + args := map[string]string{ + "ClusterName": clusterDetails.Name, + "TargetNamespace": clusterDetails.Name + "-ns", + "MaxUnhealthyNodePercentage": clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage, + "NodeStartupTimeout": clusterDetails.VCDKEConfig.NodeStartupTimeout, + "NodeNotReadyTimeout": clusterDetails.VCDKEConfig.NodeNotReadyTimeout, + "TkrVersion": clusterDetails.TkrVersion, + "TkgVersion": clusterDetails.TkgVersion, + "PodCidr": d.Get("pods_cidr").(string), + "ServiceCidr": d.Get("service_cidr").(string), + "UsernameB64": base64.StdEncoding.EncodeToString([]byte(d.Get("owner").(string))), + "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(d.Get("api_token").(string))), + "VcdSite": clusterDetails.VcdUrl.String(), + "Org": clusterDetails.Org.AdminOrg.Name, + "OrgVdc": clusterDetails.VdcName, + "OrgVdcNetwork": clusterDetails.NetworkName, + "CatalogName": clusterDetails.CatalogName, + "VAppTemplateName": clusterDetails.OvaName, + "ControlPlaneSizingPolicy": d.Get("control_plane.0.sizing_policy").(string), + "ControlPlanePlacementPolicy": d.Get("control_plane.0.placement_policy").(string), + "ControlPlaneStorageProfile": d.Get("control_plane.0.storage_profile").(string), + "ControlPlaneDiskSize": d.Get("control_plane.0.sizing_policy").(string), + "ControlPlaneMachineCount": d.Get("control_plane.0.machine_count").(string), + "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, + "SshPublicKey": d.Get("ssh_public_key").(string), + } + for i, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + args[fmt.Sprintf("NodePool%dDiskSize", i)] = nodePool["disk_size"].(string) + args[fmt.Sprintf("NodePool%dMachineCount", i)] = nodePool["machine_count"].(string) + args[fmt.Sprintf("NodePool%dStorageProfile", i)] = nodePool["storage_profile"].(string) + args[fmt.Sprintf("NodePool%dPlacementPolicy", i)] = nodePool["placement_policy"].(string) + args[fmt.Sprintf("NodePool%dSizingPolicy", i)] = nodePool["sizing_policy"].(string) + args[fmt.Sprintf("NodePool%dMachineCount", i)] = nodePool["machine_count"].(string) + } + + if err := capiYamlEmpty.Execute(buf, args); err != nil { + return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) + } + return buf.String(), nil } func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -415,19 +470,29 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m // clusterInfoDto is a helper struct that contains all the required elements to successfully create and manage // a Kubernetes cluster using CSE. type clusterInfoDto struct { - Name string - VcdUrl url.URL - Org *govcd.AdminOrg - Vdc *govcd.Vdc - VAppTemplate *govcd.VAppTemplate - Network *govcd.OrgVDCNetwork - RdeType *govcd.DefinedEntityType - StorageProfile *types.VdcStorageProfile + Name string + VcdUrl url.URL + Org *govcd.AdminOrg + VdcName string + OvaName string + CatalogName string + NetworkName string + RdeType *govcd.DefinedEntityType + StorageProfileName string + VCDKEConfig struct { + MaxUnhealthyNodesPercentage string + NodeStartupTimeout string + NodeNotReadyTimeout string + NodeUnknownTimeout string + ContainerRegistryUrl string + } + TkgVersion string + TkrVersion string } // createClusterInfoDto creates and returns a clusterInfoDto object by obtaining all the required information // from th input Terraform resource data. -func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient) (*clusterInfoDto, error) { +func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigVersion string) (*clusterInfoDto, error) { result := &clusterInfoDto{} name := d.Get("name").(string) @@ -444,21 +509,29 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient) (*cluste if err != nil { return nil, fmt.Errorf("could not retrieve the VDC with ID '%s': %s", vdcId, err) } - result.Vdc = vdc + result.VdcName = vdc.Vdc.Name vAppTemplateId := d.Get("ova_id").(string) vAppTemplate, err := vcdClient.GetVAppTemplateById(vAppTemplateId) if err != nil { return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) } - result.VAppTemplate = vAppTemplate + result.OvaName = vAppTemplate.VAppTemplate.Name + result.TkgVersion = "" + result.TkrVersion = "" + + catalogName, err := vAppTemplate.GetCatalogName() + if err != nil { + return nil, fmt.Errorf("could not retrieve the CatalogName of the OVA '%s': %s", vAppTemplateId, err) + } + result.CatalogName = catalogName networkId := d.Get("network_id").(string) network, err := vdc.GetOrgVdcNetworkById(networkId, true) if err != nil { - return nil, fmt.Errorf("could not retrieve the Org VDC Network with ID '%s': %s", networkId, err) + return nil, fmt.Errorf("could not retrieve the Org VDC NetworkName with ID '%s': %s", networkId, err) } - result.Network = network + result.NetworkName = network.OrgVDCNetwork.Name rdeTypeId := d.Get("capvcd_rde_type_id").(string) rdeType, err := vcdClient.GetRdeTypeById(rdeTypeId) @@ -467,15 +540,56 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient) (*cluste } result.RdeType = rdeType - var storageProfile *types.VdcStorageProfile + storageProfileName := "" if _, isStorageClassSet := d.GetOk("storage_class"); isStorageClassSet { storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) - storageProfile, err = vcdClient.GetStorageProfileById(storageProfileId) + storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) if err != nil { return nil, fmt.Errorf("could not get a Storage Profile with ID '%s': %s", storageProfileId, err) } + storageProfileName = storageProfile.Name + } + result.StorageProfileName = storageProfileName + + rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigVersion, "VCDKEConfig") + if err != nil { + return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE: %s", err) + } + if len(rdes) != 1 { + return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) + } + vcdKeConfig := rdes[0].DefinedEntity.Entity + if _, ok := vcdKeConfig["profiles"]; !ok { + return nil, fmt.Errorf("expected array 'profiles' in VCDKEConfig, but it is nil") + } + if _, ok := vcdKeConfig["profiles"].([]map[string]interface{}); !ok { + return nil, fmt.Errorf("expected array 'profiles' in VCDKEConfig, but it is not an array") + } + if len(vcdKeConfig["profiles"].([]map[string]interface{})) != 1 { + return nil, fmt.Errorf("expected exactly one 'profiles' item in VCDKEConfig, but it has %d", len(vcdKeConfig["profiles"].([]map[string]interface{}))) + } + if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"]; !ok { + return nil, fmt.Errorf("expected item 'profiles[0].K8Config' in VCDKEConfig, but it is nil") + } + if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{}); !ok { + return nil, fmt.Errorf("expected an object 'profiles[0].K8Config' in VCDKEConfig, but it is not an object") + } + if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{})["mhc"]; !ok { + return nil, fmt.Errorf("expected item 'profiles[0].K8Config.mhc' in VCDKEConfig, but it is nil") + } + if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{})["mhc"].(map[string]interface{}); !ok { + return nil, fmt.Errorf("expected an object 'profiles[0].K8Config.mhc' in VCDKEConfig, but it is not an object") + } + mhc := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{})["mhc"].(map[string]interface{}) + result.VCDKEConfig.MaxUnhealthyNodesPercentage = mhc["maxUnhealthyNodes"].(string) + result.VCDKEConfig.NodeStartupTimeout = mhc["nodeStartupTimeout"].(string) + result.VCDKEConfig.NodeNotReadyTimeout = mhc["nodeNotReadyTimeout"].(string) + result.VCDKEConfig.NodeUnknownTimeout = mhc["nodeUnknownTimeout"].(string) + + if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["containerRegistryUrl"]; !ok { + return nil, fmt.Errorf("expected item 'profiles[0].containerRegistryUrl' in VCDKEConfig, but it is nil") } - result.StorageProfile = storageProfile + result.VCDKEConfig.ContainerRegistryUrl = vcdKeConfig["profiles"].([]map[string]interface{})[0]["containerRegistryUrl"].(string) result.VcdUrl = vcdClient.VCDClient.Client.VCDHREF return result, nil diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go new file mode 100644 index 000000000..695a5e2c8 --- /dev/null +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -0,0 +1,144 @@ +//go:build cse + +package vcd + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccVcdCseKubernetesCluster(t *testing.T) { + preTestChecks(t) + + var params = StringMap{ + "Name": t.Name(), + "OvaCatalog": testConfig.Cse.OvaCatalog, + "OvaName": testConfig.Cse.OvaName, + "Org": testConfig.Cse.Org, + "Vdc": testConfig.Cse.Vdc, + "EdgeGateway": testConfig.Cse.EdgeGateway, + "Network": testConfig.Cse.RoutedNetwork, + "CapVcdVersion": testConfig.Cse.CapVcdVersion, + "Owner": testConfig.Cse.Owner, + "ApiToken": testConfig.Cse.ApiTokenFile, + } + testParamsNotEmpty(t, params) + + configText := templateFill(testAccVcdCseKubernetesCluster, params) + + if vcdShortTest { + t.Skip(acceptanceTestsSkipped) + return + } + resource.Test(t, resource.TestCase{ + ProviderFactories: testAccProviders, + Steps: []resource.TestStep{ + { + Config: configText, + Check: resource.ComposeTestCheckFunc(), + }, + }, + }) + postTestChecks(t) +} + +const testAccVcdCseKubernetesCluster = ` +# skip-binary-test - This one requires a very special setup + +data "vcd_catalog" "tkg_catalog" { + org = "{{.Org}}" + name = "{{.OvaCatalog}}" +} + +data "vcd_catalog_vapp_template" "tkg_ova" { + org = data.vcd_catalog.tkg_catalog.org + catalog_id = data.vcd_catalog.tkg_catalog.id + name = "{{.OvaName}}" +} + +data "vcd_rde_type" "capvcdcluster_type" { + vendor = "vmware" + nss = "capvcdCluster" + version = "{{.CapVcdVersion}}" +} + +data "vcd_org_vdc" "vdc" { + org = data.vcd_catalog.tkg_catalog.org + name = "{{.Vdc}}" +} + +data "vcd_nsxt_edgegateway" "egw" { + org = data.vcd_org_vdc.vdc.org + name = "{{.EdgeGateway}}" +} + +data "vcd_network_routed_v2" "routed" { + org = data.vcd_nsxt_edgegateway.egw.org + edge_gateway_id = data.vcd_nsxt_edgegateway.egw.id + name = "{{.Network}}" +} + +data "vcd_vm_sizing_policy" "tkg_small" { + name = "TKG small" +} + +data "vcd_storage_profile" "sp" { + org = data.vcd_org_vdc.vdc.org + vdc = data.vcd_org_vdc.vdc.name + name = "*" +} + +resource "vcd_cse_kubernetes_cluster" "my_cluster" { + name = "{{.Name}}" + ova_id = data.vcd_catalog_vapp_template.tkg_ova.id + capvcd_rde_type_id = data.vcd_rde_type.capvcdcluster_type.id + org = "{{.Org}}" + vdc_id = data.vcd_org_vdc.vdc.id + network_id = data.vcd_network_routed_v2.routed.id + owner = "{{.Owner}}" + api_token_file = "{{.ApiTokenFile}}" + + control_plane { + machine_count = 1 + disk_size = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + } + + node_pool { + machine_count = 1 + disk_size = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + node_pool { + name = "node-pool-1" + machine_count = 1 + disk_size = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + node_pool { + name = "node-pool-2" + machine_count = 1 + disk_size = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + storage_class { + storage_profile_id = data.vcd_storage_profile.sp.id + name = "sc-1" + reclaim_policy = "delete" + filesystem = "ext4" + } + + pods_cidr = "100.10.0.0/11" + services_cidr = "100.90.0.0/11" + + auto_repair_on_errors = true + node_health_check = true +} +` diff --git a/vcd/sample_vcd_test_config.json b/vcd/sample_vcd_test_config.json index 40e0e77cc..737a33a2a 100644 --- a/vcd/sample_vcd_test_config.json +++ b/vcd/sample_vcd_test_config.json @@ -225,5 +225,17 @@ "VCD_SKIP_TEMPLATE_WRITING": "", "VCD_ADD_PROVIDER": "", "REMOVE_ORG_VDC_FROM_TEMPLATE": "" + }, + "cse" :{ + "//" : "Only needed to test Container Service Extension specific resources", + "org" : "tenant_org", + "vdc": "tenant_vdc", + "routedNetwork": "tenant_net_routed", + "edgeGateway": "tenant_edgegateway", + "ovaCatalog": "tkgm_catalog", + "ovaName": "", + "capVcdVersion": "1.2.0", + "owner": "cluster_author", + "apiTokenFile": "", } } From b098b63b8b6358d8562413b4cc67ffe7775e90f3 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 11 Jan 2024 12:45:51 +0100 Subject: [PATCH 009/156] Refactor node pools Signed-off-by: abarreiro --- vcd/config_test.go | 4 +- vcd/cse/4.2/{ => capi-yaml}/capi_yaml.tmpl | 16 - vcd/cse/4.2/capi-yaml/node_pool.tmpl | 15 + vcd/cse/4.2/{capvcd.tmpl => cluster.tmpl} | 0 vcd/resource_vcd_cse_kubernetes_cluster.go | 388 +++++++++++++++------ 5 files changed, 295 insertions(+), 128 deletions(-) rename vcd/cse/4.2/{ => capi-yaml}/capi_yaml.tmpl (93%) create mode 100644 vcd/cse/4.2/capi-yaml/node_pool.tmpl rename vcd/cse/4.2/{capvcd.tmpl => cluster.tmpl} (100%) diff --git a/vcd/config_test.go b/vcd/config_test.go index 1425805a7..04dde4aab 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -971,7 +971,7 @@ func importStateIdTopHierarchy(objectName string) resource.ImportStateIdFunc { } } -// Used by all entities that depend on Org (such as CatalogName, OrgUser) +// Used by all entities that depend on Org (such as Catalog, OrgUser) // If the orgName is empty, it uses the default Org from testConfig func importStateIdOrgObject(orgName string, objectName string) resource.ImportStateIdFunc { return func(*terraform.State) (string, error) { @@ -1048,7 +1048,7 @@ func importStateIdNsxtManagerObject(objectName string) resource.ImportStateIdFun } } -// Used by all entities that depend on Org + CatalogName (such as catalog item, media item) +// Used by all entities that depend on Org + Catalog (such as catalog item, media item) func importStateIdOrgCatalogObject(objectName string) resource.ImportStateIdFunc { return func(*terraform.State) (string, error) { if testConfig.VCD.Org == "" || testConfig.VCD.Catalog.Name == "" || objectName == "" { diff --git a/vcd/cse/4.2/capi_yaml.tmpl b/vcd/cse/4.2/capi-yaml/capi_yaml.tmpl similarity index 93% rename from vcd/cse/4.2/capi_yaml.tmpl rename to vcd/cse/4.2/capi-yaml/capi_yaml.tmpl index 1189ca3c9..ef5205680 100644 --- a/vcd/cse/4.2/capi_yaml.tmpl +++ b/vcd/cse/4.2/capi-yaml/capi_yaml.tmpl @@ -151,22 +151,6 @@ spec: replicas: {{.ControlPlaneMachineCount}} version: v1.25.7+vmware.2 --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: VCDMachineTemplate -metadata: - name: {{.ClusterName}}-md-0 - namespace: {{.TargetNamespace}} -spec: - template: - spec: - catalog: {{.Catalog}} - template: {{.VAppTemplateName}} - sizingPolicy: {{.NodePool0SizingPolicy}} - placementPolicy: {{.NodePool0PlacementPolicy}} - storageProfile: "{{.NodePool0StorageProfile}}" - diskSize: {{.NodePool0DiskSize}} - enableNvidiaGPU: false ---- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate metadata: diff --git a/vcd/cse/4.2/capi-yaml/node_pool.tmpl b/vcd/cse/4.2/capi-yaml/node_pool.tmpl new file mode 100644 index 000000000..674fee531 --- /dev/null +++ b/vcd/cse/4.2/capi-yaml/node_pool.tmpl @@ -0,0 +1,15 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: VCDMachineTemplate +metadata: + name: "{{.NodePoolName}}" + namespace: "{{.TargetNamespace}}" +spec: + template: + spec: + catalog: "{{.Catalog}}" + template: "{{.VAppTemplate}}" + sizingPolicy: "{{.NodePoolSizingPolicy}}" + placementPolicy: "{{.NodePoolPlacementPolicy}}" + storageProfile: "{{.NodePoolStorageProfile}}" + diskSize: "{{.NodePoolDiskSize}}" + enableNvidiaGPU: "{{.NodePoolEnableGpu}}" \ No newline at end of file diff --git a/vcd/cse/4.2/capvcd.tmpl b/vcd/cse/4.2/cluster.tmpl similarity index 100% rename from vcd/cse/4.2/capvcd.tmpl rename to vcd/cse/4.2/cluster.tmpl diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 53d353a05..5290d3465 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -14,18 +14,23 @@ import ( "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" "net/url" + "strconv" + "strings" "text/template" "time" ) -//go:embed cse/4.2/capvcd.tmpl -var capvcdTemplate string +//go:embed cse/4.2/cluster.tmpl +var cseClusterTemplate string //go:embed cse/4.2/default_storage_class.tmpl -var defaultStorageClass string +var cseDefaultStorageClassTemplate string -//go:embed cse/4.2/capi_yaml.tmpl -var capiYaml string +//go:embed cse/4.2/capi-yaml/capi_yaml.tmpl +var cseCapiYamlTemplate string + +//go:embed cse/4.2/capi-yaml/node_pool.tmpl +var cseNodePoolTemplate string func resourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ @@ -158,6 +163,11 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { MinItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of this node pool", + }, "machine_count": { Type: schema.TypeInt, Optional: true, @@ -299,14 +309,89 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return resourceVcdCseKubernetesRead(ctx, d, meta) } +func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) + if err != nil { + return diag.Errorf("could not save the cluster '%s' raw RDE contents into state: %s", rde.DefinedEntity.ID, err) + } + dSet(d, "raw_cluster_rde_json", jsonEntity) + + status, ok := rde.DefinedEntity.Entity["status"].(StringMap) + if !ok { + return diag.Errorf("could not read the 'status' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) + } + + vcdKe, ok := status["vcdKe"].(StringMap) + if !ok { + return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) + } + + dSet(d, "state", vcdKe["state"]) + d.SetId(rde.DefinedEntity.ID) + return nil +} + +func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +// resourceVcdCseKubernetesDelete deletes a CSE Kubernetes cluster. To delete a Kubernetes cluster, one must send +// the flags "markForDelete" and "forceDelete" back to true, so the CSE Server is able to delete all cluster elements +// and perform a cleanup. Hence, this function sends these properties and waits for deletion. +func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + spec, ok := rde.DefinedEntity.Entity["spec"].(StringMap) + if !ok { + return diag.Errorf("could not delete the cluster, JSON object 'spec' is not correct in the RDE") + } + + spec["markForDelete"] = true + spec["forceDelete"] = true + rde.DefinedEntity.Entity["spec"] = spec + + err = rde.Update(*rde.DefinedEntity) + if err != nil { + return diag.Errorf("could not delete the cluster '%s': %s", rde.DefinedEntity.ID, err) + } + + // TODO: Add a timeout + deletionComplete := false + for !deletionComplete { + _, err = vcdClient.GetRdeById(d.Id()) + if err != nil { + if govcd.IsNotFound(err) { + deletionComplete = true + } + return diag.Errorf("could not check whether the cluster '%s' is deleted: %s", d.Id(), err) + } + time.Sleep(30 * time.Second) + } + return nil +} + +// getCseKubernetesClusterEntityMap gets the payload for the RDE that manages the Kubernetes cluster, so it +// can be created or updated. func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *clusterInfoDto) (StringMap, error) { - capiYamlRendered, err := getCapiYamlPlaintext(d, clusterDetails) + capiYamlRendered, err := generateCapiYaml(d, clusterDetails) if err != nil { return nil, err } storageClass := "{}" - if clusterDetails.StorageProfileName != "" { - storageClassEmpty := template.Must(template.New(clusterDetails.Name + "_StorageClass").Parse(defaultStorageClass)) + if _, isStorageClassSet := d.GetOk("storage_class"); isStorageClassSet { + storageClassEmpty := template.Must(template.New(clusterDetails.Name + "_StorageClass").Parse(cseDefaultStorageClassTemplate)) + storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) storageClassName := d.Get("storage_class.0.name").(string) reclaimPolicy := d.Get("storage_class.0.reclaim_policy").(string) filesystem := d.Get("storage_class.0.filesystem").(string) @@ -315,7 +400,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl if err := storageClassEmpty.Execute(buf, map[string]string{ "FileSystem": filesystem, "Name": storageClassName, - "StorageProfile": clusterDetails.StorageProfileName, + "StorageProfile": clusterDetails.StorageProfiles[storageProfileId], "ReclaimPolicy": reclaimPolicy, }); err != nil { return nil, fmt.Errorf("could not generate a correct storage class JSON block: %s", err) @@ -323,7 +408,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl storageClass = buf.String() } - capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(capvcdTemplate)) + capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(cseClusterTemplate)) buf := &bytes.Buffer{} if err := capvcdEmpty.Execute(buf, map[string]string{ "Name": clusterDetails.Name, @@ -349,8 +434,16 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl return result, nil } -func getCapiYamlPlaintext(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { - capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(capiYaml)) +// generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded +// in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the clusterInfoDto to +// populate several Go templates and build a final YAML. +func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { + capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(cseCapiYamlTemplate)) + + nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) + if err != nil { + return "", err + } buf := &bytes.Buffer{} args := map[string]string{ @@ -359,8 +452,8 @@ func getCapiYamlPlaintext(d *schema.ResourceData, clusterDetails *clusterInfoDto "MaxUnhealthyNodePercentage": clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage, "NodeStartupTimeout": clusterDetails.VCDKEConfig.NodeStartupTimeout, "NodeNotReadyTimeout": clusterDetails.VCDKEConfig.NodeNotReadyTimeout, - "TkrVersion": clusterDetails.TkrVersion, - "TkgVersion": clusterDetails.TkgVersion, + "TkrVersion": clusterDetails.TkgVersion.Tkr, + "TkgVersion": clusterDetails.TkgVersion.Tkg[0], "PodCidr": d.Get("pods_cidr").(string), "ServiceCidr": d.Get("service_cidr").(string), "UsernameB64": base64.StdEncoding.EncodeToString([]byte(d.Get("owner").(string))), @@ -369,129 +462,121 @@ func getCapiYamlPlaintext(d *schema.ResourceData, clusterDetails *clusterInfoDto "Org": clusterDetails.Org.AdminOrg.Name, "OrgVdc": clusterDetails.VdcName, "OrgVdcNetwork": clusterDetails.NetworkName, - "CatalogName": clusterDetails.CatalogName, - "VAppTemplateName": clusterDetails.OvaName, + "Catalog": clusterDetails.CatalogName, + "VAppTemplate": clusterDetails.OvaName, "ControlPlaneSizingPolicy": d.Get("control_plane.0.sizing_policy").(string), "ControlPlanePlacementPolicy": d.Get("control_plane.0.placement_policy").(string), - "ControlPlaneStorageProfile": d.Get("control_plane.0.storage_profile").(string), + "ControlPlaneStorageProfile": clusterDetails.StorageProfiles[d.Get("control_plane.0.storage_profile").(string)], "ControlPlaneDiskSize": d.Get("control_plane.0.sizing_policy").(string), "ControlPlaneMachineCount": d.Get("control_plane.0.machine_count").(string), "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, "SshPublicKey": d.Get("ssh_public_key").(string), } - for i, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { - nodePool := nodePoolRaw.(map[string]interface{}) - args[fmt.Sprintf("NodePool%dDiskSize", i)] = nodePool["disk_size"].(string) - args[fmt.Sprintf("NodePool%dMachineCount", i)] = nodePool["machine_count"].(string) - args[fmt.Sprintf("NodePool%dStorageProfile", i)] = nodePool["storage_profile"].(string) - args[fmt.Sprintf("NodePool%dPlacementPolicy", i)] = nodePool["placement_policy"].(string) - args[fmt.Sprintf("NodePool%dSizingPolicy", i)] = nodePool["sizing_policy"].(string) - args[fmt.Sprintf("NodePool%dMachineCount", i)] = nodePool["machine_count"].(string) - } if err := capiYamlEmpty.Execute(buf, args); err != nil { return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) } - return buf.String(), nil -} - -func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - vcdClient := meta.(*VCDClient) - - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) - if err != nil { - return diag.Errorf("could not save the cluster '%s' raw RDE contents into state: %s", rde.DefinedEntity.ID, err) - } - dSet(d, "raw_cluster_rde_json", jsonEntity) - - status, ok := rde.DefinedEntity.Entity["status"].(StringMap) - if !ok { - return diag.Errorf("could not read the 'status' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) - } - vcdKe, ok := status["vcdKe"].(StringMap) - if !ok { - return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) - } - - dSet(d, "state", vcdKe["state"]) - d.SetId(rde.DefinedEntity.ID) - return nil -} - -func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - return nil + result := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) + return result, nil } -// resourceVcdCseKubernetesDelete deletes a CSE Kubernetes cluster. To delete a Kubernetes cluster, one must send -// the flags "markForDelete" and "forceDelete" back to true, so the CSE Server is able to delete all cluster elements -// and perform a cleanup. Hence, this function sends these properties and waits for deletion. -func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - vcdClient := meta.(*VCDClient) - - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - - spec, ok := rde.DefinedEntity.Entity["spec"].(StringMap) - if !ok { - return diag.Errorf("could not delete the cluster, JSON object 'spec' is not correct in the RDE") - } +// generateNodePoolYaml generates YAML blocks corresponding to the Kubernetes node pools. +func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { + nodePoolEmptyTmpl := template.Must(template.New(clusterDetails.Name + "_NodePool").Parse(cseNodePoolTemplate)) + resultYaml := "" + buf := &bytes.Buffer{} - spec["markForDelete"] = true - spec["forceDelete"] = true - rde.DefinedEntity.Entity["spec"] = spec + // We can have many node pool blocks, we build a YAML object for each one of them. + for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + name := nodePool["name"].(string) - err = rde.Update(*rde.DefinedEntity) - if err != nil { - return diag.Errorf("could not delete the cluster '%s': %s", rde.DefinedEntity.ID, err) - } + // Check the correctness of the compute policies in the node pool block + placementPolicyId, isSetPlacement := nodePool["placement_policy_id"] + vpguPolicyId, isSetVgpu := nodePool["vgpu_policy_id"] + if isSetPlacement && isSetVgpu { + return "", fmt.Errorf("the node pool '%s' should have either a Placement Policy or a vGPU Policy, not both", name) + } + if isSetVgpu { + placementPolicyId = vpguPolicyId // For convenience, we just use one of them as both cannot be set at same time + } - // TODO: Add a timeout - deletionComplete := false - for !deletionComplete { - _, err = vcdClient.GetRdeById(d.Id()) - if err != nil { - if govcd.IsNotFound(err) { - deletionComplete = true - } - return diag.Errorf("could not check whether the cluster '%s' is deleted: %s", d.Id(), err) + if err := nodePoolEmptyTmpl.Execute(buf, map[string]string{ + "NodePoolName": name, + "TargetNamespace": clusterDetails.Name + "-ns", + "Catalog": clusterDetails.CatalogName, + "VAppTemplate": clusterDetails.OvaName, + "NodePoolSizingPolicy": clusterDetails.ComputePolicies[nodePool["sizing_policy_id"].(string)], + "NodePoolPlacementPolicy": clusterDetails.ComputePolicies[placementPolicyId.(string)], + "NodePoolStorageProfile": clusterDetails.StorageProfiles[nodePool["storage_profile_id"].(string)], + "NodePoolDiskSize": strconv.Itoa(nodePool["disk_size"].(int)), + "NodePoolEnableGpu": strconv.FormatBool(isSetVgpu), + }); err != nil { + return "", fmt.Errorf("could not generate a correct Node Pool YAML: %s", err) } - time.Sleep(30 * time.Second) + resultYaml += fmt.Sprintf("%s\n---\n", buf.String()) + buf.Reset() } - return nil + return resultYaml, nil } // clusterInfoDto is a helper struct that contains all the required elements to successfully create and manage // a Kubernetes cluster using CSE. type clusterInfoDto struct { - Name string - VcdUrl url.URL - Org *govcd.AdminOrg - VdcName string - OvaName string - CatalogName string - NetworkName string - RdeType *govcd.DefinedEntityType - StorageProfileName string - VCDKEConfig struct { + Name string + VcdUrl url.URL + Org *govcd.AdminOrg + VdcName string + OvaName string + CatalogName string + NetworkName string + RdeType *govcd.DefinedEntityType + StorageProfiles map[string]string // Maps IDs with names + ComputePolicies map[string]string // Maps IDs with names + VCDKEConfig struct { MaxUnhealthyNodesPercentage string NodeStartupTimeout string NodeNotReadyTimeout string NodeUnknownTimeout string ContainerRegistryUrl string } - TkgVersion string - TkrVersion string + TkgVersion *tkgVersion +} + +// tkgVersion is an auxiliary structure used by the tkgMap variable to map +// a Kubernetes template OVA to some specific TKG components versions. +type tkgVersion struct { + Tkg []string + Tkr string + Etcd string + CoreDns string +} + +// tkgMap maps specific Kubernetes template OVAs to specific TKG components versions. +var tkgMap = map[string]tkgVersion{ + "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { + Tkg: []string{"v2.4.0"}, + Tkr: "v1.27.5---vmware.1-tkg.1", + Etcd: "v3.5.7_vmware.6", + CoreDns: "v1.10.1_vmware.7", + }, + "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { + Tkg: []string{"v2.4.0"}, + Tkr: "v1.26.8---vmware.1-tkg.1", + Etcd: "v3.5.6_vmware.20", + CoreDns: "v1.10.1_vmware.7", + }, + "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { + Tkg: []string{"v2.3.1"}, + Tkr: "v1.26.8---vmware.1-tkg.2", + Etcd: "v3.5.6_vmware.20", + CoreDns: "v1.9.3_vmware.16", + }, } // createClusterInfoDto creates and returns a clusterInfoDto object by obtaining all the required information -// from th input Terraform resource data. +// from the Terraform resource data and the target VCD. func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigVersion string) (*clusterInfoDto, error) { result := &clusterInfoDto{} @@ -517,8 +602,14 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) } result.OvaName = vAppTemplate.VAppTemplate.Name - result.TkgVersion = "" - result.TkrVersion = "" + + // Searches for the TKG components versions in the tkgMap with the OVA name details + ovaCode := vAppTemplate.VAppTemplate.Name[strings.LastIndex(vAppTemplate.VAppTemplate.Name, "kube-")+len("kube-") : strings.LastIndex(vAppTemplate.VAppTemplate.Name, ".ova")] + tkgVersion, ok := tkgMap[ovaCode] + if !ok { + return nil, fmt.Errorf("could not retrieve the TKG version details from Kubernetes template '%s'. Please check whether the OVA '%s' is compatible", ovaCode, vAppTemplate.VAppTemplate.Name) + } + result.TkgVersion = &tkgVersion catalogName, err := vAppTemplate.GetCatalogName() if err != nil { @@ -540,16 +631,90 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon } result.RdeType = rdeType - storageProfileName := "" + // Builds a map that relates storage profiles IDs (the schema uses them to build a healthy Terraform dependency graph) + // with their corresponding names (the cluster YAML and CSE in general uses names only). + // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, + // as there can be a lot of them. + result.StorageProfiles = make(map[string]string) if _, isStorageClassSet := d.GetOk("storage_class"); isStorageClassSet { storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s': %s", storageProfileId, err) + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Storage Class: %s", storageProfileId, err) + } + result.StorageProfiles[storageProfileId] = storageProfile.Name + } + controlPlaneStorageProfileId := d.Get("control_plane.0.storage_profile_id").(string) + if _, ok := result.StorageProfiles[controlPlaneStorageProfileId]; !ok { // Only query if not already present + storageProfile, err := vcdClient.GetStorageProfileById(controlPlaneStorageProfileId) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) + } + result.StorageProfiles[controlPlaneStorageProfileId] = storageProfile.Name + } + for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + nodePoolStorageProfileId := nodePool["storage_profile_id"].(string) + if _, ok := result.StorageProfiles[nodePoolStorageProfileId]; !ok { // Only query if not already present + storageProfile, err := vcdClient.GetStorageProfileById(nodePoolStorageProfileId) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.StorageProfiles[nodePoolStorageProfileId] = storageProfile.Name + } + } + + // Builds a map that relates Compute Policies IDs (the schema uses them to build a healthy Terraform dependency graph) + // with their corresponding names (the cluster YAML and CSE in general uses names only). + // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, + // as there can be a lot of them. + result.ComputePolicies = make(map[string]string) + if controlPlaneSizingPolicyId, isSet := d.GetOk("control_plane.0.sizing_policy_id"); isSet { + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlaneSizingPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) + } + result.ComputePolicies[controlPlaneSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + if controlPlanePlacementPolicyId, isSet := d.GetOk("control_plane.0.placement_policy_id"); isSet { + if _, ok := result.ComputePolicies[controlPlanePlacementPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlanePlacementPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) + } + result.ComputePolicies[controlPlanePlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + if nodePoolSizingPolicyId, isSet := nodePool["sizing_policy_id"]; isSet { + if _, ok := result.ComputePolicies[nodePoolSizingPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolSizingPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.ComputePolicies[nodePoolSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + if nodePoolPlacementPolicyId, isSet := nodePool["placement_policy_id"]; isSet { + if _, ok := result.ComputePolicies[nodePoolPlacementPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolPlacementPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.ComputePolicies[nodePoolPlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + if nodePoolVGpuPolicyId, isSet := nodePool["vgpu_policy_id"]; isSet { + if _, ok := result.ComputePolicies[nodePoolVGpuPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolVGpuPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.ComputePolicies[nodePoolVGpuPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } } - storageProfileName = storageProfile.Name } - result.StorageProfileName = storageProfileName rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigVersion, "VCDKEConfig") if err != nil { @@ -558,6 +723,9 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon if len(rdes) != 1 { return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) } + + // Obtain some required elements from the CSE Server configuration (aka VCDKEConfig), so we don't have + // to deal with it again. vcdKeConfig := rdes[0].DefinedEntity.Entity if _, ok := vcdKeConfig["profiles"]; !ok { return nil, fmt.Errorf("expected array 'profiles' in VCDKEConfig, but it is nil") From 94fb63454dbc16768a8ece4d944f597b2f0df401 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 11 Jan 2024 14:11:19 +0100 Subject: [PATCH 010/156] All templates and placeholders done Signed-off-by: abarreiro --- vcd/cse/4.2/capi-yaml/capi_yaml.tmpl | 201 ------------------- vcd/cse/4.2/capi-yaml/node_pool.tmpl | 15 -- vcd/cse/capi-yaml/cluster.tmpl | 153 ++++++++++++++ vcd/cse/capi-yaml/machine_health_check.tmpl | 22 ++ vcd/cse/capi-yaml/node_pool.tmpl | 41 ++++ vcd/cse/{4.2 => }/default_storage_class.tmpl | 0 vcd/cse/{4.2/cluster.tmpl => rde.tmpl} | 0 vcd/resource_vcd_cse_kubernetes_cluster.go | 184 +++++++++++------ 8 files changed, 343 insertions(+), 273 deletions(-) delete mode 100644 vcd/cse/4.2/capi-yaml/capi_yaml.tmpl delete mode 100644 vcd/cse/4.2/capi-yaml/node_pool.tmpl create mode 100644 vcd/cse/capi-yaml/cluster.tmpl create mode 100644 vcd/cse/capi-yaml/machine_health_check.tmpl create mode 100644 vcd/cse/capi-yaml/node_pool.tmpl rename vcd/cse/{4.2 => }/default_storage_class.tmpl (100%) rename vcd/cse/{4.2/cluster.tmpl => rde.tmpl} (100%) diff --git a/vcd/cse/4.2/capi-yaml/capi_yaml.tmpl b/vcd/cse/4.2/capi-yaml/capi_yaml.tmpl deleted file mode 100644 index ef5205680..000000000 --- a/vcd/cse/4.2/capi-yaml/capi_yaml.tmpl +++ /dev/null @@ -1,201 +0,0 @@ -# This file was downloaded from https://raw.githubusercontent.com/vmware/cluster-api-provider-cloud-director/main/templates/cluster-template-v1.25.7.yaml -# Only the commented lines were added manually. - -# The MachineHealthCheck was added manually. You can add this section if you want automatic health checks in your -# Kubernetes clusters. -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: {{.ClusterName}} - namespace: {{.TargetNamespace}} - labels: - clusterctl.cluster.x-k8s.io: "" - clusterctl.cluster.x-k8s.io/move: "" -spec: - clusterName: {{.ClusterName}} - maxUnhealthy: {{.MaxUnhealthyNodePercentage}}% - nodeStartupTimeout: {{.NodeStartupTimeout}}s - selector: - matchLabels: - cluster.x-k8s.io/cluster-name: {{.ClusterName}} - unhealthyConditions: - - type: Ready - status: Unknown - timeout: {{.NodeUnknownTimeout}}s - - type: Ready - status: "False" - timeout: {{.NodeNotReadyTimeout}}s ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: {{.ClusterName}} - namespace: {{.TargetNamespace}} - labels: # The labels section was added manually, this is REQUIRED for the cluster to work - cluster-role.tkg.tanzu.vmware.com/management: "" - tanzuKubernetesRelease: {{.TkrVersion}} - tkg.tanzu.vmware.com/cluster-name: {{.ClusterName}} - annotations: # The annotations section was added manually, this is REQUIRED for the cluster to work - TKGVERSION: {{.TkgVersion}} -spec: - clusterNetwork: - pods: - cidrBlocks: - - {{.PodCidr}} - serviceDomain: cluster.local - services: - cidrBlocks: - - {{.ServiceCidr}} - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: KubeadmControlPlane - name: {{.ClusterName}}-control-plane - namespace: {{.TargetNamespace}} - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: VCDCluster - name: {{.ClusterName}} - namespace: {{.TargetNamespace}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: capi-user-credentials - namespace: {{.TargetNamespace}} -type: Opaque -data: - username: "{{.UsernameB64}}" - password: "" - refreshToken: "{{.ApiTokenB64}}" ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: VCDCluster -metadata: - name: {{.ClusterName}} - namespace: {{.TargetNamespace}} -spec: - site: {{.VcdSite}} - org: {{.Org}} - ovdc: {{.OrgVdc}} - ovdcNetwork: {{.OrgVdcNetwork}} - useAsManagementCluster: false - userContext: - secretRef: - name: capi-user-credentials - namespace: {{.TargetNamespace}} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: VCDMachineTemplate -metadata: - name: {{.ClusterName}}-control-plane - namespace: {{.TargetNamespace}} -spec: - template: - spec: - catalog: {{.Catalog}} - template: {{.VAppTemplateName}} - sizingPolicy: {{.ControlPlaneSizingPolicy}} - placementPolicy: {{.ControlPlanePlacementPolicy}} - storageProfile: "{{.ControlPlaneStorageProfile}}" - diskSize: {{.ControlPlaneDiskSize}} - enableNvidiaGPU: false ---- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: KubeadmControlPlane -metadata: - name: {{.ClusterName}}-control-plane - namespace: {{.TargetNamespace}} -spec: - kubeadmConfigSpec: - preKubeadmCommands: # preKubeadmCommands was added manually - - mv /etc/ssl/certs/custom_certificate_*.crt - /usr/local/share/ca-certificates && update-ca-certificates - clusterConfiguration: - apiServer: - certSANs: - - localhost - - 127.0.0.1 - controllerManager: # controllerManager was added manually - extraArgs: - enable-hostpath-provisioner: "true" - dns: - imageRepository: {{.ContainerRegistryUrl}}/tkg - imageTag: v1.9.3_vmware.8 - etcd: - local: - imageRepository: {{.ContainerRegistryUrl}}/tkg - imageTag: v3.5.6_vmware.9 - imageRepository: {{.ContainerRegistryUrl}}/tkg - users: - - name: root - sshAuthorizedKeys: - - "{{.SshPublicKey}}" - initConfiguration: - nodeRegistration: - criSocket: /run/containerd/containerd.sock - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - cloud-provider: external - joinConfiguration: - nodeRegistration: - criSocket: /run/containerd/containerd.sock - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - cloud-provider: external - machineTemplate: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: VCDMachineTemplate - name: {{.ClusterName}}-control-plane - namespace: {{.TargetNamespace}} - replicas: {{.ControlPlaneMachineCount}} - version: v1.25.7+vmware.2 ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: KubeadmConfigTemplate -metadata: - name: {{.ClusterName}}-md-0 - namespace: {{.TargetNamespace}} -spec: - template: - spec: - users: - - name: root - sshAuthorizedKeys: - - "{{.SshPublicKey}}" - useExperimentalRetryJoin: true # Added manually - preKubeadmCommands: # Added manually - - mv /etc/ssl/certs/custom_certificate_*.crt - /usr/local/share/ca-certificates && update-ca-certificates - joinConfiguration: - nodeRegistration: - criSocket: /run/containerd/containerd.sock - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - cloud-provider: external ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineDeployment -metadata: - name: {{.ClusterName}}-md-0 - namespace: {{.TargetNamespace}} -spec: - clusterName: {{.ClusterName}} - replicas: {{.NodePool0MachineCount}} - selector: - matchLabels: null - template: - spec: - bootstrap: - configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: KubeadmConfigTemplate - name: {{.ClusterName}}-md-0 - namespace: {{.TargetNamespace}} - clusterName: {{.ClusterName}} - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: VCDMachineTemplate - name: {{.ClusterName}}-md-0 - namespace: {{.TargetNamespace}} - version: v1.25.7+vmware.2 \ No newline at end of file diff --git a/vcd/cse/4.2/capi-yaml/node_pool.tmpl b/vcd/cse/4.2/capi-yaml/node_pool.tmpl deleted file mode 100644 index 674fee531..000000000 --- a/vcd/cse/4.2/capi-yaml/node_pool.tmpl +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: VCDMachineTemplate -metadata: - name: "{{.NodePoolName}}" - namespace: "{{.TargetNamespace}}" -spec: - template: - spec: - catalog: "{{.Catalog}}" - template: "{{.VAppTemplate}}" - sizingPolicy: "{{.NodePoolSizingPolicy}}" - placementPolicy: "{{.NodePoolPlacementPolicy}}" - storageProfile: "{{.NodePoolStorageProfile}}" - diskSize: "{{.NodePoolDiskSize}}" - enableNvidiaGPU: "{{.NodePoolEnableGpu}}" \ No newline at end of file diff --git a/vcd/cse/capi-yaml/cluster.tmpl b/vcd/cse/capi-yaml/cluster.tmpl new file mode 100644 index 000000000..bf193a53f --- /dev/null +++ b/vcd/cse/capi-yaml/cluster.tmpl @@ -0,0 +1,153 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "{{.ClusterName}}" + namespace: "{{.TargetNamespace}}" + labels: + cluster-role.tkg.tanzu.vmware.com/management: "" + tanzuKubernetesRelease: "{{.TkrVersion}}" + tkg.tanzu.vmware.com/cluster-name: "{{.ClusterName}}" + annotations: + osInfo: "ubuntu,20.04,amd64", + TKGVERSION: "{{.TkgVersion}}" +spec: + clusterNetwork: + pods: + cidrBlocks: + - "{{.PodCidr}}" + serviceDomain: cluster.local + services: + cidrBlocks: + - "{{.ServiceCidr}}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "{{.ClusterName}}-control-plane" + namespace: "{{.TargetNamespace}}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: VCDCluster + name: "{{.ClusterName}}" + namespace: "{{.TargetNamespace}}" +--- +apiVersion: v1 +kind: Secret +metadata: + name: capi-user-credentials + namespace: {{.TargetNamespace}} +type: Opaque +data: + username: "{{.UsernameB64}}" + refreshToken: "{{.ApiTokenB64}}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: VCDCluster +metadata: + name: "{{.ClusterName}}" + namespace: "{{.TargetNamespace}}" +spec: + site: "{{.VcdSite}}" + org: "{{.Org}}" + ovdc: "{{.OrgVdc}}" + ovdcNetwork: "{{.OrgVdcNetwork}}" + {{- if .ControlPlaneEndpoint}} + controlPlaneEndpoint: + host: "{{.ControlPlaneEndpoint}}" + port: 6443 + {{- end}} + {{- if .VirtualIpSubnet}} + loadBalancerConfigSpec: + vipSubnet: "{{.VirtualIpSubnet}}" + {{- end}} + useAsManagementCluster: false + userContext: + secretRef: + name: capi-user-credentials + namespace: "{{.TargetNamespace}}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: VCDMachineTemplate +metadata: + name: "{{.ClusterName}}-control-plane" + namespace: "{{.TargetNamespace}}" +spec: + template: + spec: + catalog: "{{.Catalog}}" + template: "{{.VAppTemplateName}}" + sizingPolicy: "{{.ControlPlaneSizingPolicy}}" + placementPolicy: "{{.ControlPlanePlacementPolicy}}" + storageProfile: "{{.ControlPlaneStorageProfile}}" + diskSize: {{.ControlPlaneDiskSize}} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "{{.ClusterName}}-control-plane" + namespace: "{{.TargetNamespace}}" +spec: + kubeadmConfigSpec: + preKubeadmCommands: + - mv /etc/ssl/certs/custom_certificate_*.crt /usr/local/share/ca-certificates && update-ca-certificates + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + dns: + imageRepository: "{{.ContainerRegistryUrl}}" + imageTag: "{{.DnsVersion}}" + etcd: + local: + imageRepository: "{{.ContainerRegistryUrl}}" + imageTag: "{{.EtcdVersion}}" + imageRepository: "{{.ContainerRegistryUrl}}" + users: + - name: root + sshAuthorizedKeys: + - "{{.SshPublicKey}}" + initConfiguration: + nodeRegistration: + criSocket: /run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cloud-provider: external + joinConfiguration: + nodeRegistration: + criSocket: /run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cloud-provider: external + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: VCDMachineTemplate + name: "{{.ClusterName}}-control-plane" + namespace: "{{.TargetNamespace}}" + replicas: {{.ControlPlaneMachineCount}} + version: "{{.KubernetesVersion}}" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "{{.ClusterName}}-kct" + namespace: "{{.TargetNamespace}}" +spec: + template: + spec: + users: + - name: root + sshAuthorizedKeys: + - "{{.SshPublicKey}}" + useExperimentalRetryJoin: true + preKubeadmCommands: # Added manually + - mv /etc/ssl/certs/custom_certificate_*.crt /usr/local/share/ca-certificates && update-ca-certificates + joinConfiguration: + nodeRegistration: + criSocket: /run/containerd/containerd.sock + kubeletExtraArgs: + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + cloud-provider: external \ No newline at end of file diff --git a/vcd/cse/capi-yaml/machine_health_check.tmpl b/vcd/cse/capi-yaml/machine_health_check.tmpl new file mode 100644 index 000000000..3180fb3f2 --- /dev/null +++ b/vcd/cse/capi-yaml/machine_health_check.tmpl @@ -0,0 +1,22 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: "{{.ClusterName}}" + namespace: "{{.TargetNamespace}}" + labels: + clusterctl.cluster.x-k8s.io: "" + clusterctl.cluster.x-k8s.io/move: "" +spec: + clusterName: "{{.ClusterName}}" + maxUnhealthy: "{{.MaxUnhealthyNodePercentage}}" + nodeStartupTimeout: {{.NodeStartupTimeout}} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: "{{.ClusterName}}" + unhealthyConditions: + - type: Ready + status: Unknown + timeout: {{.NodeUnknownTimeout}} + - type: Ready + status: "False" + timeout: {{.NodeNotReadyTimeout}} \ No newline at end of file diff --git a/vcd/cse/capi-yaml/node_pool.tmpl b/vcd/cse/capi-yaml/node_pool.tmpl new file mode 100644 index 000000000..7317f87a0 --- /dev/null +++ b/vcd/cse/capi-yaml/node_pool.tmpl @@ -0,0 +1,41 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: VCDMachineTemplate +metadata: + name: "{{.NodePoolName}}" + namespace: "{{.TargetNamespace}}" +spec: + template: + spec: + catalog: "{{.Catalog}}" + template: "{{.VAppTemplate}}" + sizingPolicy: "{{.NodePoolSizingPolicy}}" + placementPolicy: "{{.NodePoolPlacementPolicy}}" + storageProfile: "{{.NodePoolStorageProfile}}" + diskSize: "{{.NodePoolDiskSize}}" + enableNvidiaGPU: "{{.NodePoolEnableGpu}}" +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: "{{.NodePoolName}}" + namespace: "{{.TargetNamespace}}" +spec: + clusterName: "{{.ClusterName}}" + replicas: {{.NodePoolMachineCount}} + selector: + matchLabels: null + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "{{.ClusterName}}-kct" + namespace: "{{.TargetNamespace}}" + clusterName: "{{.ClusterName}}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: VCDMachineTemplate + name: "{{.NodePoolName}}" + namespace: "{{.TargetNamespace}}" + version: "{{.KubernetesVersion}}" \ No newline at end of file diff --git a/vcd/cse/4.2/default_storage_class.tmpl b/vcd/cse/default_storage_class.tmpl similarity index 100% rename from vcd/cse/4.2/default_storage_class.tmpl rename to vcd/cse/default_storage_class.tmpl diff --git a/vcd/cse/4.2/cluster.tmpl b/vcd/cse/rde.tmpl similarity index 100% rename from vcd/cse/4.2/cluster.tmpl rename to vcd/cse/rde.tmpl diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 5290d3465..647f00179 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -20,18 +20,21 @@ import ( "time" ) -//go:embed cse/4.2/cluster.tmpl -var cseClusterTemplate string +//go:embed cse/rde.tmpl +var cseRdeJsonTemplate string -//go:embed cse/4.2/default_storage_class.tmpl +//go:embed cse/default_storage_class.tmpl var cseDefaultStorageClassTemplate string -//go:embed cse/4.2/capi-yaml/capi_yaml.tmpl -var cseCapiYamlTemplate string +//go:embed cse/capi-yaml/cluster.tmpl +var cseClusterYamlTemplate string -//go:embed cse/4.2/capi-yaml/node_pool.tmpl +//go:embed cse/capi-yaml/node_pool.tmpl var cseNodePoolTemplate string +//go:embed cse/capi-yaml/machine_health_check.tmpl +var cseMachineHealthCheckTemplate string + func resourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ CreateContext: resourceVcdCseKubernetesClusterCreate, @@ -92,7 +95,18 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "A file that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'", + Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file and its contents, as it contains sensitive information", + ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(string) + if !ok { + return diag.Errorf("could not parse string value '%v'", v) + } + _, err := govcd.GetTokenFromFile(value) + if err != nil { + return diag.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) + } + return nil + }, }, "ssh_public_key": { Type: schema.TypeString, @@ -400,7 +414,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl if err := storageClassEmpty.Execute(buf, map[string]string{ "FileSystem": filesystem, "Name": storageClassName, - "StorageProfile": clusterDetails.StorageProfiles[storageProfileId], + "StorageProfile": clusterDetails.UrnToNamesCache[storageProfileId], "ReclaimPolicy": reclaimPolicy, }); err != nil { return nil, fmt.Errorf("could not generate a correct storage class JSON block: %s", err) @@ -408,7 +422,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl storageClass = buf.String() } - capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(cseClusterTemplate)) + capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(cseRdeJsonTemplate)) buf := &bytes.Buffer{} if err := capvcdEmpty.Execute(buf, map[string]string{ "Name": clusterDetails.Name, @@ -438,49 +452,89 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl // in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the clusterInfoDto to // populate several Go templates and build a final YAML. func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { - capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(cseCapiYamlTemplate)) + capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(cseClusterYamlTemplate)) nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) if err != nil { return "", err } + machineHealthCheckYaml, err := generateMachineHealthCheckYaml(d, clusterDetails) + if err != nil { + return "", err + } + + apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) + if err != nil { + return "", fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) + } + buf := &bytes.Buffer{} args := map[string]string{ "ClusterName": clusterDetails.Name, "TargetNamespace": clusterDetails.Name + "-ns", - "MaxUnhealthyNodePercentage": clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage, - "NodeStartupTimeout": clusterDetails.VCDKEConfig.NodeStartupTimeout, - "NodeNotReadyTimeout": clusterDetails.VCDKEConfig.NodeNotReadyTimeout, "TkrVersion": clusterDetails.TkgVersion.Tkr, "TkgVersion": clusterDetails.TkgVersion.Tkg[0], + "UsernameB64": base64.StdEncoding.EncodeToString([]byte(d.Get("owner").(string))), + "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(apiToken.RefreshToken)), "PodCidr": d.Get("pods_cidr").(string), "ServiceCidr": d.Get("service_cidr").(string), - "UsernameB64": base64.StdEncoding.EncodeToString([]byte(d.Get("owner").(string))), - "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(d.Get("api_token").(string))), "VcdSite": clusterDetails.VcdUrl.String(), "Org": clusterDetails.Org.AdminOrg.Name, "OrgVdc": clusterDetails.VdcName, "OrgVdcNetwork": clusterDetails.NetworkName, "Catalog": clusterDetails.CatalogName, "VAppTemplate": clusterDetails.OvaName, - "ControlPlaneSizingPolicy": d.Get("control_plane.0.sizing_policy").(string), - "ControlPlanePlacementPolicy": d.Get("control_plane.0.placement_policy").(string), - "ControlPlaneStorageProfile": clusterDetails.StorageProfiles[d.Get("control_plane.0.storage_profile").(string)], - "ControlPlaneDiskSize": d.Get("control_plane.0.sizing_policy").(string), + "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], + "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], + "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], + "ControlPlaneDiskSize": d.Get("control_plane.0.disk_size").(string), "ControlPlaneMachineCount": d.Get("control_plane.0.machine_count").(string), + "DnsVersion": clusterDetails.TkgVersion.CoreDns, + "EtcdVersion": clusterDetails.TkgVersion.Etcd, "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, + "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, "SshPublicKey": d.Get("ssh_public_key").(string), } + if _, ok := d.GetOk("control_plane.0.ip"); ok { + args["ControlPlaneEndpoint"] = d.Get("control_plane.0.ip").(string) + } + if _, ok := d.GetOk("virtual_ip_subnet"); ok { + args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) + } + if err := capiYamlEmpty.Execute(buf, args); err != nil { return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) } - result := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) + result := fmt.Sprintf("%s\n%s\n%s", nodePoolYaml, machineHealthCheckYaml, buf.String()) return result, nil } +// generateMachineHealthCheckYaml generates YAML blocks corresponding to the Kubernetes machine healtch check. +func generateMachineHealthCheckYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { + resultYaml := "" + if !d.Get("node_health_check").(bool) { + return resultYaml, nil + } + + machineHealthCheckEmptyTmpl := template.Must(template.New(clusterDetails.Name + "_MachineHealthCheck").Parse(cseMachineHealthCheckTemplate)) + buf := &bytes.Buffer{} + if err := machineHealthCheckEmptyTmpl.Execute(buf, map[string]string{ + "ClusterName": clusterDetails.Name, + "TargetNamespace": clusterDetails.Name + "-ns", + "MaxUnhealthyNodePercentage": fmt.Sprintf("%s%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage), // With the 'percentage' suffix + "NodeStartupTimeout": fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout), // With the 'second' suffix + "NodeUnknownTimeout": fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout), // With the 'second' suffix + "NodeNotReadyTimeout": fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout), // With the 'second' suffix + }); err != nil { + return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) + } + + return resultYaml, nil +} + // generateNodePoolYaml generates YAML blocks corresponding to the Kubernetes node pools. func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { nodePoolEmptyTmpl := template.Must(template.New(clusterDetails.Name + "_NodePool").Parse(cseNodePoolTemplate)) @@ -502,16 +556,30 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto placementPolicyId = vpguPolicyId // For convenience, we just use one of them as both cannot be set at same time } + // As this one is optional, we just ignore it if not populated (YAML will render an empty "") + sizingPolicyId, isSetSizing := nodePool["sizing_policy_id"] + if !isSetSizing { + sizingPolicyId = "" + } + + // As this one is optional, we just ignore it if not populated (YAML will render an empty "") + storageProfileId, isSetStorage := nodePool["storage_profile_id"] + if !isSetStorage { + storageProfileId = "" + } + if err := nodePoolEmptyTmpl.Execute(buf, map[string]string{ "NodePoolName": name, "TargetNamespace": clusterDetails.Name + "-ns", "Catalog": clusterDetails.CatalogName, "VAppTemplate": clusterDetails.OvaName, - "NodePoolSizingPolicy": clusterDetails.ComputePolicies[nodePool["sizing_policy_id"].(string)], - "NodePoolPlacementPolicy": clusterDetails.ComputePolicies[placementPolicyId.(string)], - "NodePoolStorageProfile": clusterDetails.StorageProfiles[nodePool["storage_profile_id"].(string)], + "NodePoolSizingPolicy": clusterDetails.UrnToNamesCache[sizingPolicyId.(string)], + "NodePoolPlacementPolicy": clusterDetails.UrnToNamesCache[placementPolicyId.(string)], + "NodePoolStorageProfile": clusterDetails.UrnToNamesCache[storageProfileId.(string)], "NodePoolDiskSize": strconv.Itoa(nodePool["disk_size"].(int)), "NodePoolEnableGpu": strconv.FormatBool(isSetVgpu), + "NodePoolMachineCount": strconv.Itoa(nodePool["machine_count"].(int)), + "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, }); err != nil { return "", fmt.Errorf("could not generate a correct Node Pool YAML: %s", err) } @@ -532,8 +600,7 @@ type clusterInfoDto struct { CatalogName string NetworkName string RdeType *govcd.DefinedEntityType - StorageProfiles map[string]string // Maps IDs with names - ComputePolicies map[string]string // Maps IDs with names + UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) VCDKEConfig struct { MaxUnhealthyNodesPercentage string NodeStartupTimeout string @@ -547,31 +614,35 @@ type clusterInfoDto struct { // tkgVersion is an auxiliary structure used by the tkgMap variable to map // a Kubernetes template OVA to some specific TKG components versions. type tkgVersion struct { - Tkg []string - Tkr string - Etcd string - CoreDns string + Tkg []string + Tkr string + Etcd string + CoreDns string + KubernetesVersion string } // tkgMap maps specific Kubernetes template OVAs to specific TKG components versions. var tkgMap = map[string]tkgVersion{ "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { - Tkg: []string{"v2.4.0"}, - Tkr: "v1.27.5---vmware.1-tkg.1", - Etcd: "v3.5.7_vmware.6", - CoreDns: "v1.10.1_vmware.7", + Tkg: []string{"v2.4.0"}, + Tkr: "v1.27.5---vmware.1-tkg.1", + Etcd: "v3.5.7_vmware.6", + CoreDns: "v1.10.1_vmware.7", + KubernetesVersion: "v1.25.7+vmware.2", }, "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { - Tkg: []string{"v2.4.0"}, - Tkr: "v1.26.8---vmware.1-tkg.1", - Etcd: "v3.5.6_vmware.20", - CoreDns: "v1.10.1_vmware.7", + Tkg: []string{"v2.4.0"}, + Tkr: "v1.26.8---vmware.1-tkg.1", + Etcd: "v3.5.6_vmware.20", + CoreDns: "v1.10.1_vmware.7", + KubernetesVersion: "v1.25.7+vmware.2", }, "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { - Tkg: []string{"v2.3.1"}, - Tkr: "v1.26.8---vmware.1-tkg.2", - Etcd: "v3.5.6_vmware.20", - CoreDns: "v1.9.3_vmware.16", + Tkg: []string{"v2.3.1"}, + Tkr: "v1.26.8---vmware.1-tkg.2", + Etcd: "v3.5.6_vmware.20", + CoreDns: "v1.9.3_vmware.16", + KubernetesVersion: "v1.25.7+vmware.2", }, } @@ -579,6 +650,7 @@ var tkgMap = map[string]tkgVersion{ // from the Terraform resource data and the target VCD. func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigVersion string) (*clusterInfoDto, error) { result := &clusterInfoDto{} + result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema name := d.Get("name").(string) result.Name = name @@ -635,32 +707,31 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon // with their corresponding names (the cluster YAML and CSE in general uses names only). // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, // as there can be a lot of them. - result.StorageProfiles = make(map[string]string) if _, isStorageClassSet := d.GetOk("storage_class"); isStorageClassSet { storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) if err != nil { return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Storage Class: %s", storageProfileId, err) } - result.StorageProfiles[storageProfileId] = storageProfile.Name + result.UrnToNamesCache[storageProfileId] = storageProfile.Name } controlPlaneStorageProfileId := d.Get("control_plane.0.storage_profile_id").(string) - if _, ok := result.StorageProfiles[controlPlaneStorageProfileId]; !ok { // Only query if not already present + if _, ok := result.UrnToNamesCache[controlPlaneStorageProfileId]; !ok { // Only query if not already present storageProfile, err := vcdClient.GetStorageProfileById(controlPlaneStorageProfileId) if err != nil { return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) } - result.StorageProfiles[controlPlaneStorageProfileId] = storageProfile.Name + result.UrnToNamesCache[controlPlaneStorageProfileId] = storageProfile.Name } for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { nodePool := nodePoolRaw.(map[string]interface{}) nodePoolStorageProfileId := nodePool["storage_profile_id"].(string) - if _, ok := result.StorageProfiles[nodePoolStorageProfileId]; !ok { // Only query if not already present + if _, ok := result.UrnToNamesCache[nodePoolStorageProfileId]; !ok { // Only query if not already present storageProfile, err := vcdClient.GetStorageProfileById(nodePoolStorageProfileId) if err != nil { return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) } - result.StorageProfiles[nodePoolStorageProfileId] = storageProfile.Name + result.UrnToNamesCache[nodePoolStorageProfileId] = storageProfile.Name } } @@ -668,50 +739,49 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon // with their corresponding names (the cluster YAML and CSE in general uses names only). // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, // as there can be a lot of them. - result.ComputePolicies = make(map[string]string) if controlPlaneSizingPolicyId, isSet := d.GetOk("control_plane.0.sizing_policy_id"); isSet { computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlaneSizingPolicyId.(string)) if err != nil { return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) } - result.ComputePolicies[controlPlaneSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + result.UrnToNamesCache[controlPlaneSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name } if controlPlanePlacementPolicyId, isSet := d.GetOk("control_plane.0.placement_policy_id"); isSet { - if _, ok := result.ComputePolicies[controlPlanePlacementPolicyId.(string)]; !ok { // Only query if not already present + if _, ok := result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)]; !ok { // Only query if not already present computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlanePlacementPolicyId.(string)) if err != nil { return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) } - result.ComputePolicies[controlPlanePlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name } } for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { nodePool := nodePoolRaw.(map[string]interface{}) if nodePoolSizingPolicyId, isSet := nodePool["sizing_policy_id"]; isSet { - if _, ok := result.ComputePolicies[nodePoolSizingPolicyId.(string)]; !ok { // Only query if not already present + if _, ok := result.UrnToNamesCache[nodePoolSizingPolicyId.(string)]; !ok { // Only query if not already present computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolSizingPolicyId.(string)) if err != nil { return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) } - result.ComputePolicies[nodePoolSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + result.UrnToNamesCache[nodePoolSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name } } if nodePoolPlacementPolicyId, isSet := nodePool["placement_policy_id"]; isSet { - if _, ok := result.ComputePolicies[nodePoolPlacementPolicyId.(string)]; !ok { // Only query if not already present + if _, ok := result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)]; !ok { // Only query if not already present computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolPlacementPolicyId.(string)) if err != nil { return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) } - result.ComputePolicies[nodePoolPlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name } } if nodePoolVGpuPolicyId, isSet := nodePool["vgpu_policy_id"]; isSet { - if _, ok := result.ComputePolicies[nodePoolVGpuPolicyId.(string)]; !ok { // Only query if not already present + if _, ok := result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)]; !ok { // Only query if not already present computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolVGpuPolicyId.(string)) if err != nil { return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) } - result.ComputePolicies[nodePoolVGpuPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name } } } From 713677ddc7cd00537b0b3070b68e31b473a8a138 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 11 Jan 2024 14:25:01 +0100 Subject: [PATCH 011/156] Fixes and cleanup Signed-off-by: abarreiro --- vcd/config_test.go | 2 -- vcd/resource_vcd_cse_kubernetes_cluster.go | 18 ++++-------- ...esource_vcd_cse_kubernetes_cluster_test.go | 28 +++++++------------ vcd/sample_vcd_test_config.json | 2 -- 4 files changed, 16 insertions(+), 34 deletions(-) diff --git a/vcd/config_test.go b/vcd/config_test.go index 04dde4aab..9aa1045dd 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -239,8 +239,6 @@ type TestConfig struct { CapVcdVersion string `json:"capVcdVersion,omitempty"` RoutedNetwork string `json:"routedNetwork,omitempty"` EdgeGateway string `json:"edgeGateway,omitempty"` - Owner string `json:"owner,omitempty"` - ApiTokenFile string `json:"apiTokenFile,omitempty"` } `json:"cse,omitempty"` } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 647f00179..8442d93fa 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -61,11 +61,6 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ForceNew: true, Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", }, - "capvcd_rde_type_id": { - Type: schema.TypeString, - Required: true, - Description: "The CAPVCD RDE Type ID", - }, "org": { Type: schema.TypeString, Optional: true, @@ -156,7 +151,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ForceNew: true, Description: "VM Placement policy for the control plane nodes", }, - "storage_profile": { + "storage_profile_id": { Type: schema.TypeString, Optional: true, ForceNew: true, @@ -298,7 +293,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - clusterDetails, err := createClusterInfoDto(d, vcdClient, "1.1.0") + clusterDetails, err := createClusterInfoDto(d, vcdClient, "1.1.0", "1.2.0") if err != nil { return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } @@ -648,7 +643,7 @@ var tkgMap = map[string]tkgVersion{ // createClusterInfoDto creates and returns a clusterInfoDto object by obtaining all the required information // from the Terraform resource data and the target VCD. -func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigVersion string) (*clusterInfoDto, error) { +func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigVersion, capvcdClusterVersion string) (*clusterInfoDto, error) { result := &clusterInfoDto{} result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema @@ -696,10 +691,9 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon } result.NetworkName = network.OrgVDCNetwork.Name - rdeTypeId := d.Get("capvcd_rde_type_id").(string) - rdeType, err := vcdClient.GetRdeTypeById(rdeTypeId) + rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", capvcdClusterVersion) if err != nil { - return nil, fmt.Errorf("could not retrieve RDE Type with ID '%s': %s", rdeTypeId, err) + return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", capvcdClusterVersion, err) } result.RdeType = rdeType @@ -788,7 +782,7 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigVersion, "VCDKEConfig") if err != nil { - return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE: %s", err) + return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", vcdKeConfigVersion, err) } if len(rdes) != 1 { return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 695a5e2c8..692c77ba0 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -20,8 +20,6 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, "CapVcdVersion": testConfig.Cse.CapVcdVersion, - "Owner": testConfig.Cse.Owner, - "ApiToken": testConfig.Cse.ApiTokenFile, } testParamsNotEmpty(t, params) @@ -57,12 +55,6 @@ data "vcd_catalog_vapp_template" "tkg_ova" { name = "{{.OvaName}}" } -data "vcd_rde_type" "capvcdcluster_type" { - vendor = "vmware" - nss = "capvcdCluster" - version = "{{.CapVcdVersion}}" -} - data "vcd_org_vdc" "vdc" { org = data.vcd_catalog.tkg_catalog.org name = "{{.Vdc}}" @@ -89,23 +81,23 @@ data "vcd_storage_profile" "sp" { name = "*" } +resource "vcd_api_token" "token" { + name = "{{.Name}}" + file_name = "{{.Name}}.json" + allow_token_file = true +} + resource "vcd_cse_kubernetes_cluster" "my_cluster" { + runtime = "tkg" name = "{{.Name}}" ova_id = data.vcd_catalog_vapp_template.tkg_ova.id - capvcd_rde_type_id = data.vcd_rde_type.capvcdcluster_type.id org = "{{.Org}}" vdc_id = data.vcd_org_vdc.vdc.id network_id = data.vcd_network_routed_v2.routed.id - owner = "{{.Owner}}" - api_token_file = "{{.ApiTokenFile}}" + owner = "administrator" + api_token_file = vcd_api_token.token.file_name control_plane { - machine_count = 1 - disk_size = 20 - sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id - } - - node_pool { machine_count = 1 disk_size = 20 sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id @@ -129,8 +121,8 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { } storage_class { + name = "sc-1" storage_profile_id = data.vcd_storage_profile.sp.id - name = "sc-1" reclaim_policy = "delete" filesystem = "ext4" } diff --git a/vcd/sample_vcd_test_config.json b/vcd/sample_vcd_test_config.json index 737a33a2a..2982a8a48 100644 --- a/vcd/sample_vcd_test_config.json +++ b/vcd/sample_vcd_test_config.json @@ -235,7 +235,5 @@ "ovaCatalog": "tkgm_catalog", "ovaName": "", "capVcdVersion": "1.2.0", - "owner": "cluster_author", - "apiTokenFile": "", } } From f4c1abfea8a176dc02318250d44c42fde8f991fc Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 12 Jan 2024 16:36:44 +0100 Subject: [PATCH 012/156] Fix bugs, unmarshaling still does odd things with bools Signed-off-by: abarreiro --- vcd/config_test.go | 5 +- vcd/cse/capi-yaml/cluster.tmpl | 31 +- vcd/cse/capi-yaml/machine_health_check.tmpl | 22 - vcd/cse/default_storage_class.tmpl | 6 - vcd/cse/rde.tmpl | 13 +- vcd/provider_test.go | 2 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 430 ++++++++++-------- ...esource_vcd_cse_kubernetes_cluster_test.go | 44 +- vcd/sample_vcd_test_config.json | 6 +- vcd/structure.go | 11 + vcd/testcheck_funcs_test.go | 2 +- vcd/validate_funcs.go | 14 + 12 files changed, 336 insertions(+), 250 deletions(-) delete mode 100644 vcd/cse/capi-yaml/machine_health_check.tmpl delete mode 100644 vcd/cse/default_storage_class.tmpl diff --git a/vcd/config_test.go b/vcd/config_test.go index 9aa1045dd..f7eb51de9 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -1,4 +1,4 @@ -//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbServiceMonitor || lbServerPool || lbAppProfile || lbAppRule || lbVirtualServer || access_control || user || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || ALL +//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbServiceMonitor || lbServerPool || lbAppProfile || lbAppRule || lbVirtualServer || access_control || user || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || cse || ALL package vcd @@ -232,7 +232,8 @@ type TestConfig struct { } `json:"testEnvBuild"` EnvVariables map[string]string `json:"envVariables,omitempty"` Cse struct { - Org string `json:"org,omitempty"` + SolutionsOrg string `json:"solutionsOrg,omitempty"` + TenantOrg string `json:"tenantOrg,omitempty"` Vdc string `json:"vdc,omitempty"` OvaCatalog string `json:"ovaCatalog,omitempty"` OvaName string `json:"ovaName,omitempty"` diff --git a/vcd/cse/capi-yaml/cluster.tmpl b/vcd/cse/capi-yaml/cluster.tmpl index bf193a53f..f9d619c85 100644 --- a/vcd/cse/capi-yaml/cluster.tmpl +++ b/vcd/cse/capi-yaml/cluster.tmpl @@ -8,7 +8,7 @@ metadata: tanzuKubernetesRelease: "{{.TkrVersion}}" tkg.tanzu.vmware.com/cluster-name: "{{.ClusterName}}" annotations: - osInfo: "ubuntu,20.04,amd64", + osInfo: "ubuntu,20.04,amd64" TKGVERSION: "{{.TkgVersion}}" spec: clusterNetwork: @@ -29,6 +29,31 @@ spec: kind: VCDCluster name: "{{.ClusterName}}" namespace: "{{.TargetNamespace}}" +{{- if .MaxUnhealthyNodePercentage }} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: "{{.ClusterName}}" + namespace: "{{.TargetNamespace}}" + labels: + clusterctl.cluster.x-k8s.io: "" + clusterctl.cluster.x-k8s.io/move: "" +spec: + clusterName: "{{.ClusterName}}" + maxUnhealthy: "{{.MaxUnhealthyNodePercentage}}" + nodeStartupTimeout: "{{.NodeStartupTimeout}}" + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: "{{.ClusterName}}" + unhealthyConditions: + - type: Ready + status: Unknown + timeout: "{{.NodeUnknownTimeout}}" + - type: Ready + status: "False" + timeout: "{{.NodeNotReadyTimeout}}" +{{- end }} --- apiVersion: v1 kind: Secret @@ -74,7 +99,7 @@ spec: template: spec: catalog: "{{.Catalog}}" - template: "{{.VAppTemplateName}}" + template: "{{.VAppTemplate}}" sizingPolicy: "{{.ControlPlaneSizingPolicy}}" placementPolicy: "{{.ControlPlanePlacementPolicy}}" storageProfile: "{{.ControlPlaneStorageProfile}}" @@ -143,7 +168,7 @@ spec: sshAuthorizedKeys: - "{{.SshPublicKey}}" useExperimentalRetryJoin: true - preKubeadmCommands: # Added manually + preKubeadmCommands: - mv /etc/ssl/certs/custom_certificate_*.crt /usr/local/share/ca-certificates && update-ca-certificates joinConfiguration: nodeRegistration: diff --git a/vcd/cse/capi-yaml/machine_health_check.tmpl b/vcd/cse/capi-yaml/machine_health_check.tmpl deleted file mode 100644 index 3180fb3f2..000000000 --- a/vcd/cse/capi-yaml/machine_health_check.tmpl +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: "{{.ClusterName}}" - namespace: "{{.TargetNamespace}}" - labels: - clusterctl.cluster.x-k8s.io: "" - clusterctl.cluster.x-k8s.io/move: "" -spec: - clusterName: "{{.ClusterName}}" - maxUnhealthy: "{{.MaxUnhealthyNodePercentage}}" - nodeStartupTimeout: {{.NodeStartupTimeout}} - selector: - matchLabels: - cluster.x-k8s.io/cluster-name: "{{.ClusterName}}" - unhealthyConditions: - - type: Ready - status: Unknown - timeout: {{.NodeUnknownTimeout}} - - type: Ready - status: "False" - timeout: {{.NodeNotReadyTimeout}} \ No newline at end of file diff --git a/vcd/cse/default_storage_class.tmpl b/vcd/cse/default_storage_class.tmpl deleted file mode 100644 index 946d4b772..000000000 --- a/vcd/cse/default_storage_class.tmpl +++ /dev/null @@ -1,6 +0,0 @@ -{ - "filesystem": "{{.FileSystem}}", - "k8sStorageClassName": "{{.Name}}", - "vcdStorageProfileName": "{{.StorageProfile}}", - "useDeleteReclaimPolicy": {{.ReclaimPolicy}} -} \ No newline at end of file diff --git a/vcd/cse/rde.tmpl b/vcd/cse/rde.tmpl index afa51d523..3c156843f 100644 --- a/vcd/cse/rde.tmpl +++ b/vcd/cse/rde.tmpl @@ -14,11 +14,18 @@ "markForDelete": {{.Delete}}, "forceDelete": {{.ForceDelete}}, "autoRepairOnErrors": {{.AutoRepairOnErrors}}, - "defaultStorageClassOptions": {{.DefaultStorageClassOptions}} + {{- if .DefaultStorageClassName }} + "defaultStorageClassOptions": { + "filesystem": "{{.DefaultStorageClassFileSystem}}", + "k8sStorageClassName": "{{.DefaultStorageClassName}}", + "vcdStorageProfileName": "{{.DefaultStorageClassStorageProfile}}", + "useDeleteReclaimPolicy": "{{.DefaultStorageClassReclaimPolicy}}" + }, + {{- end }} "secure": { - "apiToken": "${.ApiToken}" + "apiToken": "{{.ApiToken}}" } }, - "capiYaml": {{.CapiYaml}} + "capiYaml": "{{.CapiYaml}}" } } diff --git a/vcd/provider_test.go b/vcd/provider_test.go index 0d925f4d0..38c240f71 100644 --- a/vcd/provider_test.go +++ b/vcd/provider_test.go @@ -1,4 +1,4 @@ -//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbAppProfile || lbAppRule || lbServiceMonitor || lbServerPool || lbVirtualServer || user || access_control || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || ALL +//go:build api || functional || catalog || vapp || network || extnetwork || org || query || vm || vdc || gateway || disk || binary || lb || lbAppProfile || lbAppRule || lbServiceMonitor || lbServerPool || lbVirtualServer || user || access_control || standaloneVm || search || auth || nsxt || role || alb || certificate || vdcGroup || ldap || rde || uiPlugin || providerVdc || cse || ALL package vcd diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 8442d93fa..8d5ef9341 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -20,20 +20,22 @@ import ( "time" ) +// TODO: Split per CSE version: 4.1, 4.2... +// //go:embed cse/rde.tmpl var cseRdeJsonTemplate string -//go:embed cse/default_storage_class.tmpl -var cseDefaultStorageClassTemplate string - //go:embed cse/capi-yaml/cluster.tmpl var cseClusterYamlTemplate string //go:embed cse/capi-yaml/node_pool.tmpl var cseNodePoolTemplate string -//go:embed cse/capi-yaml/machine_health_check.tmpl -var cseMachineHealthCheckTemplate string +// Map of CSE version -> [VCDKEConfig RDE Type version, CAPVCD RDE Type version] +var cseVersions = map[string][]string{ + "4.1": {"1.1.0", "1.2.0"}, + "4.2": {"1.1.0", "1.2.0"}, +} func resourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ @@ -42,8 +44,16 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { UpdateContext: resourceVcdCseKubernetesUpdate, DeleteContext: resourceVcdCseKubernetesDelete, Schema: map[string]*schema.Schema{ + "cse_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(getKeys(cseVersions), false), + Description: "The CSE version to use", + }, "runtime": { Type: schema.TypeString, + Optional: true, Default: "tkg", ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"tkg"}, false), // May add others in future releases of CSE @@ -54,6 +64,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Required: true, ForceNew: true, Description: "The name of the Kubernetes cluster", + // TODO: Add validate func: must match regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$") }, "ova_id": { Type: schema.TypeString, @@ -82,26 +93,15 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "owner": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role", + Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role. If not specified, it assumes it's the user from the provider configuration", }, "api_token_file": { Type: schema.TypeString, Required: true, ForceNew: true, Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file and its contents, as it contains sensitive information", - ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { - value, ok := v.(string) - if !ok { - return diag.Errorf("could not parse string value '%v'", v) - } - _, err := govcd.GetTokenFromFile(value) - if err != nil { - return diag.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) - } - return nil - }, }, "ssh_public_key": { Type: schema.TypeString, @@ -132,12 +132,12 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "disk_size": { - Type: schema.TypeInt, - Optional: true, - Default: 20, // As suggested in UI - ForceNew: true, - ValidateFunc: IsIntAndAtLeast(20), - Description: "Disk size for the control plane nodes", + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + ForceNew: true, + ValidateDiagFunc: minimumValue(20, "disk size must be at least 20G"), + Description: "Disk size for the control plane nodes", }, "sizing_policy_id": { Type: schema.TypeString, @@ -158,10 +158,11 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Description: "Storage profile for the control plane nodes", }, "ip": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "IP for the control plane", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "IP for the control plane", + ValidateFunc: checkEmptyOrSingleIP(), }, }, }, @@ -176,20 +177,22 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, Description: "The name of this node pool", + // TODO: Add validate func: must match regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$") }, "machine_count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, // As suggested in UI - Description: "The number of nodes that this node pool has. Must be higher than 0", - ValidateFunc: IsIntAndAtLeast(1), + Type: schema.TypeInt, + Optional: true, + Default: 1, // As suggested in UI + Description: "The number of nodes that this node pool has. Must be higher than 0", + ValidateDiagFunc: minimumValue(1, "number of nodes must be higher than 0"), }, "disk_size": { - Type: schema.TypeInt, - Optional: true, - Default: 20, // As suggested in UI - ForceNew: true, - Description: "Disk size for the control plane nodes", + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + ForceNew: true, + Description: "Disk size for the control plane nodes", + ValidateDiagFunc: minimumValue(20, "disk size must be at least 20G"), }, "sizing_policy_id": { Type: schema.TypeString, @@ -218,7 +221,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, }, - "storage_class": { + "default_storage_class": { Type: schema.TypeList, MaxItems: 1, Optional: true, @@ -233,6 +236,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Required: true, Type: schema.TypeString, Description: "Name to give to this storage class", + // TODO: Add validate func: must match regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$") }, "reclaim_policy": { Required: true, @@ -263,24 +267,38 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "virtual_ip_subnet": { Type: schema.TypeString, - Required: true, + Optional: true, Description: "Virtual IP subnet for the cluster", }, "auto_repair_on_errors": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeBool, + Optional: true, + Default: false, Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", }, "node_health_check": { - Type: schema.TypeString, - Required: true, + Type: schema.TypeBool, + Optional: true, + Default: false, Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", }, + "delete_timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Default: 120, + Description: "The time, in seconds, to wait for the cluster to be deleted when it is marked for deletion", + ValidateDiagFunc: minimumValue(10, "timeout must be at least 10 seconds"), + }, "state": { Type: schema.TypeString, Computed: true, Description: "The state of the cluster, can be 'provisioning', 'provisioned' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", + }, "raw_cluster_rde_json": { Type: schema.TypeString, Computed: true, @@ -290,12 +308,19 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { } } +// getCseRdeTypeVersions gets the RDE Type versions. First returned parameter is VCDKEConfig, second is CAPVCDCluster +func getCseRdeTypeVersions(d *schema.ResourceData) (string, string) { + versions := cseVersions[d.Get("cse_version").(string)] + return versions[0], versions[1] +} + func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) + vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion := getCseRdeTypeVersions(d) - clusterDetails, err := createClusterInfoDto(d, vcdClient, "1.1.0", "1.2.0") + clusterDetails, err := createClusterInfoDto(d, vcdClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion) if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) + return diag.Errorf("could not create Kubernetes cluster: %s", err) } entityMap, err := getCseKubernetesClusterEntityMap(d, clusterDetails) @@ -303,7 +328,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } - _, err = clusterDetails.RdeType.CreateRde(types.DefinedEntity{ + rde, err := clusterDetails.RdeType.CreateRde(types.DefinedEntity{ EntityType: clusterDetails.RdeType.DefinedEntityType.ID, Name: clusterDetails.Name, Entity: entityMap, @@ -315,39 +340,56 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } + // We need to set the ID here to be able to distinguish this cluster from all the others that may have the same name and RDE Type. + // We could use some other ways of filtering, but ID is the best and most accurate way. + d.SetId(rde.DefinedEntity.ID) return resourceVcdCseKubernetesRead(ctx, d, meta) } -func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) + var status interface{} + var rde *govcd.DefinedEntity + + // TODO: Add timeout + for status == nil { + // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be + // many clusters with the same name and RDE Type. + var err error + rde, err = vcdClient.GetRdeById(d.Id()) + if err != nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + } - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + status = rde.DefinedEntity.Entity + time.Sleep(10 * time.Second) + } + if rde == nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': object is nil", d.Id()) } + jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) if err != nil { return diag.Errorf("could not save the cluster '%s' raw RDE contents into state: %s", rde.DefinedEntity.ID, err) } dSet(d, "raw_cluster_rde_json", jsonEntity) - status, ok := rde.DefinedEntity.Entity["status"].(StringMap) - if !ok { - return diag.Errorf("could not read the 'status' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) - } - - vcdKe, ok := status["vcdKe"].(StringMap) + vcdKe, ok := status.(map[string]interface{})["vcdKe"].(map[string]interface{}) // FIXME: Can be Nil pointer if !ok { return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) } + // TODO: Kubeconfig, invoke behavior and so + dSet(d, "state", vcdKe["state"]) - d.SetId(rde.DefinedEntity.ID) + d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability + return nil } func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - return nil + // TODO + return diag.Errorf("not implemented") } // resourceVcdCseKubernetesDelete deletes a CSE Kubernetes cluster. To delete a Kubernetes cluster, one must send @@ -361,7 +403,7 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) } - spec, ok := rde.DefinedEntity.Entity["spec"].(StringMap) + spec, ok := rde.DefinedEntity.Entity["spec"].(map[string]interface{}) if !ok { return diag.Errorf("could not delete the cluster, JSON object 'spec' is not correct in the RDE") } @@ -372,20 +414,31 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m err = rde.Update(*rde.DefinedEntity) if err != nil { - return diag.Errorf("could not delete the cluster '%s': %s", rde.DefinedEntity.ID, err) + return diag.Errorf("could not mark the cluster '%s' for deletion: %s", rde.DefinedEntity.ID, err) } - // TODO: Add a timeout - deletionComplete := false - for !deletionComplete { - _, err = vcdClient.GetRdeById(d.Id()) + timeout := float64(d.Get("delete_timeout_seconds").(int)) + start := time.Now() + elapsed := time.Since(start) + for { + rde, err = vcdClient.GetRdeById(d.Id()) if err != nil { if govcd.IsNotFound(err) { - deletionComplete = true + break // This means the cluster is completely deleted } return diag.Errorf("could not check whether the cluster '%s' is deleted: %s", d.Id(), err) } + if elapsed.Minutes() > timeout { + // TODO: Improve the message by saying whether it is marked for deletion or not + return diag.Errorf("timeout of %.0f seconds reached. The cluster was not deleted in time, please try again", timeout) + } + // TODO: Check if it's marked for deletion already to avoid re-calling + err = rde.Update(*rde.DefinedEntity) + if err != nil { + return diag.Errorf("could not mark the cluster '%s' for deletion: %s", rde.DefinedEntity.ID, err) + } time.Sleep(30 * time.Second) + elapsed += time.Since(start) } return nil } @@ -393,87 +446,70 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m // getCseKubernetesClusterEntityMap gets the payload for the RDE that manages the Kubernetes cluster, so it // can be created or updated. func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *clusterInfoDto) (StringMap, error) { - capiYamlRendered, err := generateCapiYaml(d, clusterDetails) + capiYaml, err := generateCapiYaml(d, clusterDetails) if err != nil { return nil, err } - storageClass := "{}" - if _, isStorageClassSet := d.GetOk("storage_class"); isStorageClassSet { - storageClassEmpty := template.Must(template.New(clusterDetails.Name + "_StorageClass").Parse(cseDefaultStorageClassTemplate)) - storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) - storageClassName := d.Get("storage_class.0.name").(string) - reclaimPolicy := d.Get("storage_class.0.reclaim_policy").(string) - filesystem := d.Get("storage_class.0.filesystem").(string) - - buf := &bytes.Buffer{} - if err := storageClassEmpty.Execute(buf, map[string]string{ - "FileSystem": filesystem, - "Name": storageClassName, - "StorageProfile": clusterDetails.UrnToNamesCache[storageProfileId], - "ReclaimPolicy": reclaimPolicy, - }); err != nil { - return nil, fmt.Errorf("could not generate a correct storage class JSON block: %s", err) - } - storageClass = buf.String() + + args := map[string]string{ + "Name": clusterDetails.Name, + "Org": clusterDetails.Org.AdminOrg.Name, + "VcdUrl": clusterDetails.VcdUrl.String(), + "Vdc": clusterDetails.VdcName, + "Delete": "false", + "ForceDelete": "false", + "AutoRepairOnErrors": strconv.FormatBool(d.Get("auto_repair_on_errors").(bool)), + "ApiToken": clusterDetails.ApiToken, + "CapiYaml": capiYaml, + } + + if _, isStorageClassSet := d.GetOk("default_storage_class"); isStorageClassSet { + args["DefaultStorageClassStorageProfile"] = clusterDetails.UrnToNamesCache[d.Get("default_storage_class.0.storage_profile_id").(string)] + args["DefaultStorageClassName"] = d.Get("default_storage_class.0.name").(string) + args["DefaultStorageClassReclaimPolicy"] = d.Get("default_storage_class.0.reclaim_policy").(string) + args["DefaultStorageClassFileSystem"] = d.Get("default_storage_class.0.filesystem").(string) } capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(cseRdeJsonTemplate)) buf := &bytes.Buffer{} - if err := capvcdEmpty.Execute(buf, map[string]string{ - "Name": clusterDetails.Name, - "Org": clusterDetails.Org.AdminOrg.Name, - "VcdUrl": clusterDetails.VcdUrl.String(), - "Vdc": clusterDetails.VdcName, - "Delete": "false", - "ForceDelete": "false", - "AutoRepairOnErrors": d.Get("auto_repair_on_errors").(string), - "DefaultStorageClassOptions": storageClass, - "ApiToken": d.Get("api_token").(string), - "CapiYaml": capiYamlRendered, - }); err != nil { - return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) + if err := capvcdEmpty.Execute(buf, args); err != nil { + return nil, fmt.Errorf("could not render the Go template with the CAPVCD JSON: %s", err) } - result := map[string]interface{}{} + var result interface{} err = json.Unmarshal(buf.Bytes(), &result) if err != nil { return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } - return result, nil + fmt.Printf("%v", result) + + return result.(map[string]interface{}), nil } // generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded // in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the clusterInfoDto to // populate several Go templates and build a final YAML. func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { - capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(cseClusterYamlTemplate)) + // This YAML snippet contains special strings, such as "%,", that render wrong using the Go template engine + sanitizedTemplate := strings.NewReplacer("%", "%%").Replace(cseClusterYamlTemplate) + capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(sanitizedTemplate)) nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) if err != nil { return "", err } - machineHealthCheckYaml, err := generateMachineHealthCheckYaml(d, clusterDetails) - if err != nil { - return "", err - } - - apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) - if err != nil { - return "", fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) - } - buf := &bytes.Buffer{} args := map[string]string{ "ClusterName": clusterDetails.Name, "TargetNamespace": clusterDetails.Name + "-ns", "TkrVersion": clusterDetails.TkgVersion.Tkr, "TkgVersion": clusterDetails.TkgVersion.Tkg[0], - "UsernameB64": base64.StdEncoding.EncodeToString([]byte(d.Get("owner").(string))), - "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(apiToken.RefreshToken)), + "UsernameB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.Owner)), + "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.ApiToken)), "PodCidr": d.Get("pods_cidr").(string), - "ServiceCidr": d.Get("service_cidr").(string), + "ServiceCidr": d.Get("services_cidr").(string), "VcdSite": clusterDetails.VcdUrl.String(), "Org": clusterDetails.Org.AdminOrg.Name, "OrgVdc": clusterDetails.VdcName, @@ -483,8 +519,8 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (s "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], - "ControlPlaneDiskSize": d.Get("control_plane.0.disk_size").(string), - "ControlPlaneMachineCount": d.Get("control_plane.0.machine_count").(string), + "ControlPlaneDiskSize": strconv.Itoa(d.Get("control_plane.0.disk_size").(int)), + "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), "DnsVersion": clusterDetails.TkgVersion.CoreDns, "EtcdVersion": clusterDetails.TkgVersion.Etcd, "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, @@ -499,35 +535,30 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (s args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) } + if d.Get("node_health_check").(bool) { + args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly + args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix + args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix + args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix + } + if err := capiYamlEmpty.Execute(buf, args); err != nil { return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) } - result := fmt.Sprintf("%s\n%s\n%s", nodePoolYaml, machineHealthCheckYaml, buf.String()) - return result, nil -} - -// generateMachineHealthCheckYaml generates YAML blocks corresponding to the Kubernetes machine healtch check. -func generateMachineHealthCheckYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { - resultYaml := "" - if !d.Get("node_health_check").(bool) { - return resultYaml, nil - } + prettyYaml := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) - machineHealthCheckEmptyTmpl := template.Must(template.New(clusterDetails.Name + "_MachineHealthCheck").Parse(cseMachineHealthCheckTemplate)) - buf := &bytes.Buffer{} - if err := machineHealthCheckEmptyTmpl.Execute(buf, map[string]string{ - "ClusterName": clusterDetails.Name, - "TargetNamespace": clusterDetails.Name + "-ns", - "MaxUnhealthyNodePercentage": fmt.Sprintf("%s%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage), // With the 'percentage' suffix - "NodeStartupTimeout": fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout), // With the 'second' suffix - "NodeUnknownTimeout": fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout), // With the 'second' suffix - "NodeNotReadyTimeout": fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout), // With the 'second' suffix - }); err != nil { - return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) + // This encoder is used instead of a standard json.Marshal as the YAML contains special + // characters that are not encoded properly, such as '<'. + buf.Reset() + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err = enc.Encode(prettyYaml) + if err != nil { + return "", fmt.Errorf("could not encode the CAPI YAML into JSON: %s", err) } - return resultYaml, nil + return strings.Trim(strings.TrimSpace(buf.String()), "\""), nil } // generateNodePoolYaml generates YAML blocks corresponding to the Kubernetes node pools. @@ -542,37 +573,26 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto name := nodePool["name"].(string) // Check the correctness of the compute policies in the node pool block - placementPolicyId, isSetPlacement := nodePool["placement_policy_id"] - vpguPolicyId, isSetVgpu := nodePool["vgpu_policy_id"] - if isSetPlacement && isSetVgpu { + placementPolicyId := nodePool["placement_policy_id"] + vpguPolicyId := nodePool["vgpu_policy_id"] + if placementPolicyId != "" && vpguPolicyId != "" { return "", fmt.Errorf("the node pool '%s' should have either a Placement Policy or a vGPU Policy, not both", name) } - if isSetVgpu { + if vpguPolicyId != "" { placementPolicyId = vpguPolicyId // For convenience, we just use one of them as both cannot be set at same time } - // As this one is optional, we just ignore it if not populated (YAML will render an empty "") - sizingPolicyId, isSetSizing := nodePool["sizing_policy_id"] - if !isSetSizing { - sizingPolicyId = "" - } - - // As this one is optional, we just ignore it if not populated (YAML will render an empty "") - storageProfileId, isSetStorage := nodePool["storage_profile_id"] - if !isSetStorage { - storageProfileId = "" - } - if err := nodePoolEmptyTmpl.Execute(buf, map[string]string{ + "ClusterName": clusterDetails.Name, "NodePoolName": name, "TargetNamespace": clusterDetails.Name + "-ns", "Catalog": clusterDetails.CatalogName, "VAppTemplate": clusterDetails.OvaName, - "NodePoolSizingPolicy": clusterDetails.UrnToNamesCache[sizingPolicyId.(string)], + "NodePoolSizingPolicy": clusterDetails.UrnToNamesCache[nodePool["sizing_policy_id"].(string)], "NodePoolPlacementPolicy": clusterDetails.UrnToNamesCache[placementPolicyId.(string)], - "NodePoolStorageProfile": clusterDetails.UrnToNamesCache[storageProfileId.(string)], + "NodePoolStorageProfile": clusterDetails.UrnToNamesCache[nodePool["storage_profile_id"].(string)], "NodePoolDiskSize": strconv.Itoa(nodePool["disk_size"].(int)), - "NodePoolEnableGpu": strconv.FormatBool(isSetVgpu), + "NodePoolEnableGpu": strconv.FormatBool(vpguPolicyId != ""), "NodePoolMachineCount": strconv.Itoa(nodePool["machine_count"].(int)), "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, }); err != nil { @@ -604,6 +624,8 @@ type clusterInfoDto struct { ContainerRegistryUrl string } TkgVersion *tkgVersion + Owner string + ApiToken string } // tkgVersion is an auxiliary structure used by the tkgMap variable to map @@ -618,6 +640,13 @@ type tkgVersion struct { // tkgMap maps specific Kubernetes template OVAs to specific TKG components versions. var tkgMap = map[string]tkgVersion{ + "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { + Tkg: []string{"v2.2.0"}, + Tkr: "v1.25.7---vmware.2-tkg.1", + Etcd: "v3.5.6_vmware.9", + CoreDns: "v1.9.3_vmware.8", + KubernetesVersion: "v1.25.7+vmware.2", + }, "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { Tkg: []string{"v2.4.0"}, Tkr: "v1.27.5---vmware.1-tkg.1", @@ -643,7 +672,7 @@ var tkgMap = map[string]tkgVersion{ // createClusterInfoDto creates and returns a clusterInfoDto object by obtaining all the required information // from the Terraform resource data and the target VCD. -func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigVersion, capvcdClusterVersion string) (*clusterInfoDto, error) { +func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion string) (*clusterInfoDto, error) { result := &clusterInfoDto{} result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema @@ -671,7 +700,8 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon result.OvaName = vAppTemplate.VAppTemplate.Name // Searches for the TKG components versions in the tkgMap with the OVA name details - ovaCode := vAppTemplate.VAppTemplate.Name[strings.LastIndex(vAppTemplate.VAppTemplate.Name, "kube-")+len("kube-") : strings.LastIndex(vAppTemplate.VAppTemplate.Name, ".ova")] + + ovaCode := strings.ReplaceAll(vAppTemplate.VAppTemplate.Name, ".ova", "")[strings.LastIndex(vAppTemplate.VAppTemplate.Name, "kube-")+len("kube-"):] tkgVersion, ok := tkgMap[ovaCode] if !ok { return nil, fmt.Errorf("could not retrieve the TKG version details from Kubernetes template '%s'. Please check whether the OVA '%s' is compatible", ovaCode, vAppTemplate.VAppTemplate.Name) @@ -691,9 +721,9 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon } result.NetworkName = network.OrgVDCNetwork.Name - rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", capvcdClusterVersion) + rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", capvcdClusterRdeTypeVersion) if err != nil { - return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", capvcdClusterVersion, err) + return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", capvcdClusterRdeTypeVersion, err) } result.RdeType = rdeType @@ -701,8 +731,8 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon // with their corresponding names (the cluster YAML and CSE in general uses names only). // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, // as there can be a lot of them. - if _, isStorageClassSet := d.GetOk("storage_class"); isStorageClassSet { - storageProfileId := d.Get("storage_class.0.storage_profile_id").(string) + if _, isStorageClassSet := d.GetOk("default_storage_class"); isStorageClassSet { + storageProfileId := d.Get("default_storage_class.0.storage_profile_id").(string) storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) if err != nil { return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Storage Class: %s", storageProfileId, err) @@ -780,9 +810,9 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon } } - rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigVersion, "VCDKEConfig") + rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigRdeTypeVersion, "vcdKeConfig") if err != nil { - return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", vcdKeConfigVersion, err) + return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", vcdKeConfigRdeTypeVersion, err) } if len(rdes) != 1 { return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) @@ -790,38 +820,56 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon // Obtain some required elements from the CSE Server configuration (aka VCDKEConfig), so we don't have // to deal with it again. - vcdKeConfig := rdes[0].DefinedEntity.Entity - if _, ok := vcdKeConfig["profiles"]; !ok { - return nil, fmt.Errorf("expected array 'profiles' in VCDKEConfig, but it is nil") - } - if _, ok := vcdKeConfig["profiles"].([]map[string]interface{}); !ok { - return nil, fmt.Errorf("expected array 'profiles' in VCDKEConfig, but it is not an array") - } - if len(vcdKeConfig["profiles"].([]map[string]interface{})) != 1 { - return nil, fmt.Errorf("expected exactly one 'profiles' item in VCDKEConfig, but it has %d", len(vcdKeConfig["profiles"].([]map[string]interface{}))) - } - if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"]; !ok { - return nil, fmt.Errorf("expected item 'profiles[0].K8Config' in VCDKEConfig, but it is nil") + type vcdKeConfigType struct { + Profiles []struct { + K8Config struct { + Mhc struct { + MaxUnhealthyNodes int `json:"maxUnhealthyNodes:omitempty"` + NodeStartupTimeout int `json:"nodeStartupTimeout:omitempty"` + NodeNotReadyTimeout int `json:"nodeNotReadyTimeout:omitempty"` + NodeUnknownTimeout int `json:"nodeUnknownTimeout:omitempty"` + } `json:"mhc:omitempty"` + } `json:"K8Config:omitempty"` + ContainerRegistryUrl string `json:"containerRegistryUrl,omitempty"` + } `json:"profiles,omitempty"` + } + + var vcdKeConfig vcdKeConfigType + rawData, err := json.Marshal(rdes[0].DefinedEntity.Entity) + if err != nil { + return nil, err } - if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{}); !ok { - return nil, fmt.Errorf("expected an object 'profiles[0].K8Config' in VCDKEConfig, but it is not an object") + + err = json.Unmarshal(rawData, &vcdKeConfig) + if err != nil { + return nil, err } - if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{})["mhc"]; !ok { - return nil, fmt.Errorf("expected item 'profiles[0].K8Config.mhc' in VCDKEConfig, but it is nil") + + if len(vcdKeConfig.Profiles) != 1 { + return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(vcdKeConfig.Profiles)) } - if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{})["mhc"].(map[string]interface{}); !ok { - return nil, fmt.Errorf("expected an object 'profiles[0].K8Config.mhc' in VCDKEConfig, but it is not an object") + + result.VCDKEConfig.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) + result.VCDKEConfig.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) + result.VCDKEConfig.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) + result.VCDKEConfig.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) + result.VCDKEConfig.ContainerRegistryUrl = vcdKeConfig.Profiles[0].ContainerRegistryUrl + + owner, ok := d.GetOk("owner") + if !ok { + sessionInfo, err := vcdClient.Client.GetSessionInfo() + if err != nil { + return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) + } + owner = sessionInfo.User.Name } - mhc := vcdKeConfig["profiles"].([]map[string]interface{})[0]["K8Config"].(map[string]interface{})["mhc"].(map[string]interface{}) - result.VCDKEConfig.MaxUnhealthyNodesPercentage = mhc["maxUnhealthyNodes"].(string) - result.VCDKEConfig.NodeStartupTimeout = mhc["nodeStartupTimeout"].(string) - result.VCDKEConfig.NodeNotReadyTimeout = mhc["nodeNotReadyTimeout"].(string) - result.VCDKEConfig.NodeUnknownTimeout = mhc["nodeUnknownTimeout"].(string) + result.Owner = owner.(string) - if _, ok := vcdKeConfig["profiles"].([]map[string]interface{})[0]["containerRegistryUrl"]; !ok { - return nil, fmt.Errorf("expected item 'profiles[0].containerRegistryUrl' in VCDKEConfig, but it is nil") + apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) + if err != nil { + return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) } - result.VCDKEConfig.ContainerRegistryUrl = vcdKeConfig["profiles"].([]map[string]interface{})[0]["containerRegistryUrl"].(string) + result.ApiToken = apiToken.RefreshToken result.VcdUrl = vcdClient.VCDClient.Client.VCDHREF return result, nil diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 692c77ba0..715930df9 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -1,8 +1,9 @@ -//go:build cse +//go:build cse || ALL package vcd import ( + "os" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -11,15 +12,20 @@ import ( func TestAccVcdCseKubernetesCluster(t *testing.T) { preTestChecks(t) + if cse := os.Getenv("TEST_VCD_CSE"); cse == "" { + t.Skip("CSE tests deactivated, skipping " + t.Name()) + } + var params = StringMap{ - "Name": t.Name(), - "OvaCatalog": testConfig.Cse.OvaCatalog, - "OvaName": testConfig.Cse.OvaName, - "Org": testConfig.Cse.Org, - "Vdc": testConfig.Cse.Vdc, - "EdgeGateway": testConfig.Cse.EdgeGateway, - "Network": testConfig.Cse.RoutedNetwork, - "CapVcdVersion": testConfig.Cse.CapVcdVersion, + "Name": t.Name(), + "OvaCatalog": testConfig.Cse.OvaCatalog, + "OvaName": testConfig.Cse.OvaName, + "SolutionsOrg": testConfig.Cse.SolutionsOrg, + "TenantOrg": testConfig.Cse.TenantOrg, + "Vdc": testConfig.Cse.Vdc, + "EdgeGateway": testConfig.Cse.EdgeGateway, + "Network": testConfig.Cse.RoutedNetwork, + "TokenFile": getCurrentDir() + t.Name() + ".json", } testParamsNotEmpty(t, params) @@ -45,7 +51,7 @@ const testAccVcdCseKubernetesCluster = ` # skip-binary-test - This one requires a very special setup data "vcd_catalog" "tkg_catalog" { - org = "{{.Org}}" + org = "{{.SolutionsOrg}}" name = "{{.OvaCatalog}}" } @@ -56,13 +62,14 @@ data "vcd_catalog_vapp_template" "tkg_ova" { } data "vcd_org_vdc" "vdc" { - org = data.vcd_catalog.tkg_catalog.org + org = "{{.TenantOrg}}" name = "{{.Vdc}}" } data "vcd_nsxt_edgegateway" "egw" { - org = data.vcd_org_vdc.vdc.org - name = "{{.EdgeGateway}}" + org = data.vcd_org_vdc.vdc.org + owner_id = data.vcd_org_vdc.vdc.id + name = "{{.EdgeGateway}}" } data "vcd_network_routed_v2" "routed" { @@ -82,19 +89,19 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}" - file_name = "{{.Name}}.json" + name = "{{.Name}}41" + file_name = "{{.TokenFile}}" allow_token_file = true } resource "vcd_cse_kubernetes_cluster" "my_cluster" { + cse_version = "4.2" runtime = "tkg" name = "{{.Name}}" ova_id = data.vcd_catalog_vapp_template.tkg_ova.id - org = "{{.Org}}" + org = data.vcd_org_vdc.vdc.org vdc_id = data.vcd_org_vdc.vdc.id network_id = data.vcd_network_routed_v2.routed.id - owner = "administrator" api_token_file = vcd_api_token.token.file_name control_plane { @@ -120,7 +127,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { storage_profile_id = data.vcd_storage_profile.sp.id } - storage_class { + default_storage_class { name = "sc-1" storage_profile_id = data.vcd_storage_profile.sp.id reclaim_policy = "delete" @@ -132,5 +139,6 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { auto_repair_on_errors = true node_health_check = true + delete_timeout_seconds = 10 } ` diff --git a/vcd/sample_vcd_test_config.json b/vcd/sample_vcd_test_config.json index 2982a8a48..56cc737b2 100644 --- a/vcd/sample_vcd_test_config.json +++ b/vcd/sample_vcd_test_config.json @@ -228,12 +228,12 @@ }, "cse" :{ "//" : "Only needed to test Container Service Extension specific resources", - "org" : "tenant_org", + "solutionsOrg" : "solutions_org", + "tenantOrg" : "tenant_org", "vdc": "tenant_vdc", "routedNetwork": "tenant_net_routed", "edgeGateway": "tenant_edgegateway", "ovaCatalog": "tkgm_catalog", - "ovaName": "", - "capVcdVersion": "1.2.0", + "ovaName": "" } } diff --git a/vcd/structure.go b/vcd/structure.go index 185577f8c..79dbd8b41 100644 --- a/vcd/structure.go +++ b/vcd/structure.go @@ -13,6 +13,17 @@ import ( "github.com/vmware/go-vcloud-director/v2/types/v56" ) +// getKeys retrieves all the keys from the given map and returns them as a slice +func getKeys[K comparable, V any](input map[K]V) []K { + result := make([]K, len(input)) + i := 0 + for k := range input { + result[i] = k + i++ + } + return result +} + func expandIPRange(configured []interface{}) (types.IPRanges, error) { ipRange := make([]*types.IPRange, 0, len(configured)) diff --git a/vcd/testcheck_funcs_test.go b/vcd/testcheck_funcs_test.go index d232c92f8..e5c82fc0a 100644 --- a/vcd/testcheck_funcs_test.go +++ b/vcd/testcheck_funcs_test.go @@ -1,4 +1,4 @@ -//go:build api || vapp || vm || user || nsxt || extnetwork || network || gateway || catalog || standaloneVm || alb || vdcGroup || ldap || vdc || access_control || rde || uiPlugin || org || disk || providerVdc || ALL || functional +//go:build api || vapp || vm || user || nsxt || extnetwork || network || gateway || catalog || standaloneVm || alb || vdcGroup || ldap || vdc || access_control || rde || uiPlugin || org || disk || providerVdc || cse || ALL || functional package vcd diff --git a/vcd/validate_funcs.go b/vcd/validate_funcs.go index 58393f2a9..d716ef077 100644 --- a/vcd/validate_funcs.go +++ b/vcd/validate_funcs.go @@ -171,6 +171,20 @@ func IsIntAndAtLeast(min int) schema.SchemaValidateFunc { } } +// minimumValue returns a SchemaValidateDiagFunc that tests if the provided value is at least min (inclusive) +func minimumValue(min int, errorMessage string) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v'", v) + } + if value < min { + return diag.Errorf("%s: %d < %d", errorMessage, value, min) + } + return nil + } +} + // IsFloatAndBetween returns a SchemaValidateFunc which tests if the provided value convertable to // float64 and is between min and max (inclusive). func IsFloatAndBetween(min, max float64) schema.SchemaValidateFunc { From 7780af455683c8634c5ba6779d2830e6f151f335 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 15 Jan 2024 14:58:57 +0100 Subject: [PATCH 013/156] Fixes, cluster starts being created Signed-off-by: abarreiro --- .../v4.1/cluster/variables.tf | 2 +- vcd/cse/capi-yaml/cluster.tmpl | 8 +- vcd/cse/capi-yaml/node_pool.tmpl | 2 +- vcd/cse/rde.tmpl | 2 +- vcd/resource_vcd_catalog_access_control.go | 2 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 166 ++++++++++-------- ...esource_vcd_cse_kubernetes_cluster_test.go | 16 +- vcd/validate_funcs.go | 18 ++ vcdTestAccVcdCseKubernetesCluster.json | 6 + 9 files changed, 135 insertions(+), 87 deletions(-) create mode 100644 vcdTestAccVcdCseKubernetesCluster.json diff --git a/examples/container-service-extension/v4.1/cluster/variables.tf b/examples/container-service-extension/v4.1/cluster/variables.tf index ac19d6f49..fa9dd7f1e 100644 --- a/examples/container-service-extension/v4.1/cluster/variables.tf +++ b/examples/container-service-extension/v4.1/cluster/variables.tf @@ -36,7 +36,7 @@ variable "k8s_cluster_name" { description = "The name of the Kubernetes cluster. Name must contain only lowercase alphanumeric characters or '-' start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters (Example: 'MyCluster')" type = string validation { - condition = can(regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$", var.k8s_cluster_name)) + condition = can(regex("^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$", var.k8s_cluster_name)) error_message = "Name must contain only lowercase alphanumeric characters or '-', start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters." } } diff --git a/vcd/cse/capi-yaml/cluster.tmpl b/vcd/cse/capi-yaml/cluster.tmpl index f9d619c85..e6065202d 100644 --- a/vcd/cse/capi-yaml/cluster.tmpl +++ b/vcd/cse/capi-yaml/cluster.tmpl @@ -22,7 +22,7 @@ spec: controlPlaneRef: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane - name: "{{.ClusterName}}-control-plane" + name: "{{.ClusterName}}-control-plane-node-pool" namespace: "{{.TargetNamespace}}" infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 @@ -93,7 +93,7 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: VCDMachineTemplate metadata: - name: "{{.ClusterName}}-control-plane" + name: "{{.ClusterName}}-control-plane-node-pool" namespace: "{{.TargetNamespace}}" spec: template: @@ -108,7 +108,7 @@ spec: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlane metadata: - name: "{{.ClusterName}}-control-plane" + name: "{{.ClusterName}}-control-plane-node-pool" namespace: "{{.TargetNamespace}}" spec: kubeadmConfigSpec: @@ -150,7 +150,7 @@ spec: infrastructureRef: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: VCDMachineTemplate - name: "{{.ClusterName}}-control-plane" + name: "{{.ClusterName}}-control-plane-node-pool" namespace: "{{.TargetNamespace}}" replicas: {{.ControlPlaneMachineCount}} version: "{{.KubernetesVersion}}" diff --git a/vcd/cse/capi-yaml/node_pool.tmpl b/vcd/cse/capi-yaml/node_pool.tmpl index 7317f87a0..e2292c7d7 100644 --- a/vcd/cse/capi-yaml/node_pool.tmpl +++ b/vcd/cse/capi-yaml/node_pool.tmpl @@ -12,7 +12,7 @@ spec: placementPolicy: "{{.NodePoolPlacementPolicy}}" storageProfile: "{{.NodePoolStorageProfile}}" diskSize: "{{.NodePoolDiskSize}}" - enableNvidiaGPU: "{{.NodePoolEnableGpu}}" + enableNvidiaGPU: {{.NodePoolEnableGpu}} --- apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment diff --git a/vcd/cse/rde.tmpl b/vcd/cse/rde.tmpl index 3c156843f..e5ea3e2b8 100644 --- a/vcd/cse/rde.tmpl +++ b/vcd/cse/rde.tmpl @@ -19,7 +19,7 @@ "filesystem": "{{.DefaultStorageClassFileSystem}}", "k8sStorageClassName": "{{.DefaultStorageClassName}}", "vcdStorageProfileName": "{{.DefaultStorageClassStorageProfile}}", - "useDeleteReclaimPolicy": "{{.DefaultStorageClassReclaimPolicy}}" + "useDeleteReclaimPolicy": {{.DefaultStorageClassUseDeleteReclaimPolicy}} }, {{- end }} "secure": { diff --git a/vcd/resource_vcd_catalog_access_control.go b/vcd/resource_vcd_catalog_access_control.go index 28d4e73e5..422b6c95a 100644 --- a/vcd/resource_vcd_catalog_access_control.go +++ b/vcd/resource_vcd_catalog_access_control.go @@ -352,5 +352,5 @@ func runWithRetry(operationDescription, errorMessage string, timeout time.Durati elapsed = time.Since(start) attempts++ } - return nil, fmt.Errorf(errorMessage+" :%s", err) + return nil, fmt.Errorf(errorMessage+": %s", err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 8d5ef9341..aebf2c522 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -13,7 +13,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" - "net/url" "strconv" "strings" "text/template" @@ -64,7 +63,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Required: true, ForceNew: true, Description: "The name of the Kubernetes cluster", - // TODO: Add validate func: must match regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$") + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "ova_id": { Type: schema.TypeString, @@ -131,13 +131,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { return nil }, }, - "disk_size": { + "disk_size_gi": { Type: schema.TypeInt, Optional: true, Default: 20, // As suggested in UI ForceNew: true, - ValidateDiagFunc: minimumValue(20, "disk size must be at least 20G"), - Description: "Disk size for the control plane nodes", + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), + Description: "Disk size, in Gibibytes, for the control plane nodes. Must be at least 20", }, "sizing_policy_id": { Type: schema.TypeString, @@ -177,7 +177,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, Description: "The name of this node pool", - // TODO: Add validate func: must match regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$") + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "machine_count": { Type: schema.TypeInt, @@ -186,13 +187,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Description: "The number of nodes that this node pool has. Must be higher than 0", ValidateDiagFunc: minimumValue(1, "number of nodes must be higher than 0"), }, - "disk_size": { + "disk_size_gi": { Type: schema.TypeInt, Optional: true, Default: 20, // As suggested in UI ForceNew: true, - Description: "Disk size for the control plane nodes", - ValidateDiagFunc: minimumValue(20, "disk size must be at least 20G"), + Description: "Disk size, in Gibibytes, for the control plane nodes", + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), }, "sizing_policy_id": { Type: schema.TypeString, @@ -236,7 +237,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Required: true, Type: schema.TypeString, Description: "Name to give to this storage class", - // TODO: Add validate func: must match regex("^[a-z][a-z0-9-]{0,29}[a-z0-9]$") + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "reclaim_policy": { Required: true, @@ -341,7 +343,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } // We need to set the ID here to be able to distinguish this cluster from all the others that may have the same name and RDE Type. - // We could use some other ways of filtering, but ID is the best and most accurate way. + // We could use some other ways of filtering, but ID is the best and most accurate. d.SetId(rde.DefinedEntity.ID) return resourceVcdCseKubernetesRead(ctx, d, meta) } @@ -361,29 +363,29 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - status = rde.DefinedEntity.Entity + status = rde.DefinedEntity.Entity["status"] time.Sleep(10 * time.Second) } if rde == nil { return diag.Errorf("could not read Kubernetes cluster with ID '%s': object is nil", d.Id()) } + vcdKe, ok := status.(map[string]interface{})["vcdKe"] + if !ok { + return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) + } + + // TODO: Kubeconfig, invoke behavior and so + + dSet(d, "state", vcdKe.(map[string]interface{})["state"]) + // This must be the last step, so it has the most possible elements jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) if err != nil { return diag.Errorf("could not save the cluster '%s' raw RDE contents into state: %s", rde.DefinedEntity.ID, err) } dSet(d, "raw_cluster_rde_json", jsonEntity) - vcdKe, ok := status.(map[string]interface{})["vcdKe"].(map[string]interface{}) // FIXME: Can be Nil pointer - if !ok { - return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) - } - - // TODO: Kubeconfig, invoke behavior and so - - dSet(d, "state", vcdKe["state"]) d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability - return nil } @@ -398,47 +400,67 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - - spec, ok := rde.DefinedEntity.Entity["spec"].(map[string]interface{}) - if !ok { - return diag.Errorf("could not delete the cluster, JSON object 'spec' is not correct in the RDE") - } + // We need to do this operation with retries due to the mutex mechanism VCD has (ETags). + // We may hit an error if CSE Server is doing any operation in the background and we attempt to mark the cluster for deletion, + // so we need to insist several times. + _, err := runWithRetry( + fmt.Sprintf("marking the cluster %s for deletion", d.Get("name").(string)), + "error marking the cluster for deletion", + 30*time.Second, + nil, + func() (any, error) { + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + spec, ok := rde.DefinedEntity.Entity["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("JSON object 'spec' is not correct in the RDE") + } - spec["markForDelete"] = true - spec["forceDelete"] = true - rde.DefinedEntity.Entity["spec"] = spec + spec["markForDelete"] = true + spec["forceDelete"] = true + rde.DefinedEntity.Entity["spec"] = spec - err = rde.Update(*rde.DefinedEntity) + err = rde.Update(*rde.DefinedEntity) + if err != nil { + return nil, err + } + rde, err = vcdClient.GetRdeById(d.Id()) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + spec, ok = rde.DefinedEntity.Entity["spec"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("JSON object 'spec' is not correct in the RDE") + } + if !spec["markForDelete"].(bool) && !spec["forceDelete"].(bool) { + return nil, fmt.Errorf("the cluster with ID '%s' was not marked for deletion correctly", d.Id()) + } + return nil, nil + }, + ) if err != nil { - return diag.Errorf("could not mark the cluster '%s' for deletion: %s", rde.DefinedEntity.ID, err) - } - - timeout := float64(d.Get("delete_timeout_seconds").(int)) - start := time.Now() - elapsed := time.Since(start) - for { - rde, err = vcdClient.GetRdeById(d.Id()) - if err != nil { - if govcd.IsNotFound(err) { - break // This means the cluster is completely deleted + return diag.FromErr(err) + } + _, err = runWithRetry( + fmt.Sprintf("checking the cluster %s is correctly marked for deletion", d.Get("name").(string)), + "error completing the deletion of the cluster", + time.Duration(d.Get("delete_timeout_seconds").(int))*time.Second, + nil, + func() (any, error) { + _, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + if govcd.IsNotFound(err) { + return nil, nil // All is correct, the cluster RDE is gone so the cluster is deleted + } + return nil, fmt.Errorf("the cluster with ID '%s' is still present in VCD but it is unreadable: %s", d.Id(), err) } - return diag.Errorf("could not check whether the cluster '%s' is deleted: %s", d.Id(), err) - } - if elapsed.Minutes() > timeout { - // TODO: Improve the message by saying whether it is marked for deletion or not - return diag.Errorf("timeout of %.0f seconds reached. The cluster was not deleted in time, please try again", timeout) - } - // TODO: Check if it's marked for deletion already to avoid re-calling - err = rde.Update(*rde.DefinedEntity) - if err != nil { - return diag.Errorf("could not mark the cluster '%s' for deletion: %s", rde.DefinedEntity.ID, err) - } - time.Sleep(30 * time.Second) - elapsed += time.Since(start) + return nil, fmt.Errorf("the cluster with ID '%s' is marked for deletion but still present in VCD", d.Id()) + }, + ) + if err != nil { + return diag.FromErr(err) } return nil } @@ -454,7 +476,7 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl args := map[string]string{ "Name": clusterDetails.Name, "Org": clusterDetails.Org.AdminOrg.Name, - "VcdUrl": clusterDetails.VcdUrl.String(), + "VcdUrl": clusterDetails.VcdUrl, "Vdc": clusterDetails.VdcName, "Delete": "false", "ForceDelete": "false", @@ -466,7 +488,11 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl if _, isStorageClassSet := d.GetOk("default_storage_class"); isStorageClassSet { args["DefaultStorageClassStorageProfile"] = clusterDetails.UrnToNamesCache[d.Get("default_storage_class.0.storage_profile_id").(string)] args["DefaultStorageClassName"] = d.Get("default_storage_class.0.name").(string) - args["DefaultStorageClassReclaimPolicy"] = d.Get("default_storage_class.0.reclaim_policy").(string) + if d.Get("default_storage_class.0.reclaim_policy").(string) == "delete" { + args["DefaultStorageClassUseDeleteReclaimPolicy"] = "true" + } else { + args["DefaultStorageClassUseDeleteReclaimPolicy"] = "false" + } args["DefaultStorageClassFileSystem"] = d.Get("default_storage_class.0.filesystem").(string) } @@ -482,8 +508,6 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) } - fmt.Printf("%v", result) - return result.(map[string]interface{}), nil } @@ -510,7 +534,7 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (s "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.ApiToken)), "PodCidr": d.Get("pods_cidr").(string), "ServiceCidr": d.Get("services_cidr").(string), - "VcdSite": clusterDetails.VcdUrl.String(), + "VcdSite": clusterDetails.VcdUrl, "Org": clusterDetails.Org.AdminOrg.Name, "OrgVdc": clusterDetails.VdcName, "OrgVdcNetwork": clusterDetails.NetworkName, @@ -519,7 +543,7 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (s "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], - "ControlPlaneDiskSize": strconv.Itoa(d.Get("control_plane.0.disk_size").(int)), + "ControlPlaneDiskSize": fmt.Sprintf("%dGi", d.Get("control_plane.0.disk_size_gi").(int)), "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), "DnsVersion": clusterDetails.TkgVersion.CoreDns, "EtcdVersion": clusterDetails.TkgVersion.Etcd, @@ -536,10 +560,10 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (s } if d.Get("node_health_check").(bool) { - args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly - args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix - args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix - args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix + args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly + args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix + args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix + args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix } if err := capiYamlEmpty.Execute(buf, args); err != nil { @@ -591,7 +615,7 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto "NodePoolSizingPolicy": clusterDetails.UrnToNamesCache[nodePool["sizing_policy_id"].(string)], "NodePoolPlacementPolicy": clusterDetails.UrnToNamesCache[placementPolicyId.(string)], "NodePoolStorageProfile": clusterDetails.UrnToNamesCache[nodePool["storage_profile_id"].(string)], - "NodePoolDiskSize": strconv.Itoa(nodePool["disk_size"].(int)), + "NodePoolDiskSize": fmt.Sprintf("%dGi", nodePool["disk_size_gi"].(int)), "NodePoolEnableGpu": strconv.FormatBool(vpguPolicyId != ""), "NodePoolMachineCount": strconv.Itoa(nodePool["machine_count"].(int)), "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, @@ -608,7 +632,7 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto // a Kubernetes cluster using CSE. type clusterInfoDto struct { Name string - VcdUrl url.URL + VcdUrl string Org *govcd.AdminOrg VdcName string OvaName string @@ -871,6 +895,6 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon } result.ApiToken = apiToken.RefreshToken - result.VcdUrl = vcdClient.VCDClient.Client.VCDHREF + result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) return result, nil } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 715930df9..4063591f8 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -4,6 +4,7 @@ package vcd import ( "os" + "strings" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -17,7 +18,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } var params = StringMap{ - "Name": t.Name(), + "Name": strings.ToLower(t.Name()), "OvaCatalog": testConfig.Cse.OvaCatalog, "OvaName": testConfig.Cse.OvaName, "SolutionsOrg": testConfig.Cse.SolutionsOrg, @@ -89,7 +90,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}41" + name = "{{.Name}}62" file_name = "{{.TokenFile}}" allow_token_file = true } @@ -106,7 +107,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { control_plane { machine_count = 1 - disk_size = 20 + disk_size_gi = 20 sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id storage_profile_id = data.vcd_storage_profile.sp.id } @@ -114,7 +115,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { node_pool { name = "node-pool-1" machine_count = 1 - disk_size = 20 + disk_size_gi = 20 sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id storage_profile_id = data.vcd_storage_profile.sp.id } @@ -122,7 +123,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { node_pool { name = "node-pool-2" machine_count = 1 - disk_size = 20 + disk_size_gi = 20 sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id storage_profile_id = data.vcd_storage_profile.sp.id } @@ -137,8 +138,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { pods_cidr = "100.10.0.0/11" services_cidr = "100.90.0.0/11" - auto_repair_on_errors = true - node_health_check = true - delete_timeout_seconds = 10 + auto_repair_on_errors = false + node_health_check = false } ` diff --git a/vcd/validate_funcs.go b/vcd/validate_funcs.go index d716ef077..79671267d 100644 --- a/vcd/validate_funcs.go +++ b/vcd/validate_funcs.go @@ -185,6 +185,24 @@ func minimumValue(min int, errorMessage string) schema.SchemaValidateDiagFunc { } } +// matchRegex returns a SchemaValidateDiagFunc that tests whether the provided value matches the regular expression +func matchRegex(regex, errorMessage string) schema.SchemaValidateDiagFunc { + return func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(string) + if !ok { + return diag.Errorf("could not parse string value '%v'", v) + } + r, err := regexp.Compile(regex) + if err != nil { + return diag.Errorf("could not compile regular expression '%s'", regex) + } + if !r.MatchString(value) { + return diag.Errorf("%s", errorMessage) + } + return nil + } +} + // IsFloatAndBetween returns a SchemaValidateFunc which tests if the provided value convertable to // float64 and is between min and max (inclusive). func IsFloatAndBetween(min, max float64) schema.SchemaValidateFunc { diff --git a/vcdTestAccVcdCseKubernetesCluster.json b/vcdTestAccVcdCseKubernetesCluster.json new file mode 100644 index 000000000..34a708bbd --- /dev/null +++ b/vcdTestAccVcdCseKubernetesCluster.json @@ -0,0 +1,6 @@ +{ + "token_type": "API Token", + "refresh_token": "Mj0soHAff2HYS1WWBan0gXkw5kVm9p8l", + "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", + "updated_on": "2024-01-15T14:51:33+01:00" + } \ No newline at end of file From 6e9f46cbd93b8cc0c91747b451b2164ae968925d Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 15 Jan 2024 16:45:13 +0100 Subject: [PATCH 014/156] Refactor delete and add navigateMap func Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 59 ++++---- ...esource_vcd_cse_kubernetes_cluster_test.go | 2 +- vcd/structure.go | 45 +++++- vcd/structure_unit_test.go | 141 ++++++++++++++++++ vcdTestAccVcdCseKubernetesCluster.json | 4 +- 5 files changed, 218 insertions(+), 33 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index aebf2c522..ca3f25a08 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -30,10 +30,9 @@ var cseClusterYamlTemplate string //go:embed cse/capi-yaml/node_pool.tmpl var cseNodePoolTemplate string -// Map of CSE version -> [VCDKEConfig RDE Type version, CAPVCD RDE Type version] +// Map of CSE version -> [VCDKEConfig RDE Type version, CAPVCD RDE Type version, CAPVCD Behavior version] var cseVersions = map[string][]string{ - "4.1": {"1.1.0", "1.2.0"}, - "4.2": {"1.1.0", "1.2.0"}, + "4.2": {"1.1.0", "1.2.0", "1.0.0"}, } func resourceVcdCseKubernetesCluster() *schema.Resource { @@ -288,8 +287,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 120, - Description: "The time, in seconds, to wait for the cluster to be deleted when it is marked for deletion", - ValidateDiagFunc: minimumValue(10, "timeout must be at least 10 seconds"), + Description: "The time, in seconds, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (unlimited)"), }, "state": { Type: schema.TypeString, @@ -311,14 +310,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { } // getCseRdeTypeVersions gets the RDE Type versions. First returned parameter is VCDKEConfig, second is CAPVCDCluster -func getCseRdeTypeVersions(d *schema.ResourceData) (string, string) { +func getCseRdeTypeVersions(d *schema.ResourceData) (string, string, string) { versions := cseVersions[d.Get("cse_version").(string)] - return versions[0], versions[1] + return versions[0], versions[1], versions[2] } func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion := getCseRdeTypeVersions(d) + vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion, _ := getCseRdeTypeVersions(d) clusterDetails, err := createClusterInfoDto(d, vcdClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion) if err != nil { @@ -350,6 +349,8 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) + _, _, capvcdBehaviorVersion := getCseRdeTypeVersions(d) + var status interface{} var rde *govcd.DefinedEntity @@ -374,10 +375,20 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) } - // TODO: Kubeconfig, invoke behavior and so - + // TODO: Add timeout + for vcdKe.(map[string]interface{})["state"] != nil && vcdKe.(map[string]interface{})["state"].(string) != "provisioned" { + if d.Get("auto_repair_on_errors").(bool) && vcdKe.(map[string]interface{})["state"].(string) == "error" { + return diag.Errorf("cluster creation finished with errors") + } + time.Sleep(30 * time.Second) + } dSet(d, "state", vcdKe.(map[string]interface{})["state"]) + _, err := rde.InvokeBehavior(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", capvcdBehaviorVersion), types.BehaviorInvocation{}) + if err != nil { + return diag.Errorf("could not retrieve Kubeconfig: %s", err) + } + // This must be the last step, so it has the most possible elements jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) if err != nil { @@ -413,30 +424,19 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m if err != nil { return nil, fmt.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) } - spec, ok := rde.DefinedEntity.Entity["spec"].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("JSON object 'spec' is not correct in the RDE") - } - spec["markForDelete"] = true - spec["forceDelete"] = true - rde.DefinedEntity.Entity["spec"] = spec + vcdKe, err := navigateMap[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") + if err != nil { + return nil, fmt.Errorf("JSON object 'spec.vcdKe' is not correct in the RDE") + } + vcdKe["markForDelete"] = true + vcdKe["forceDelete"] = true + rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"] = vcdKe err = rde.Update(*rde.DefinedEntity) if err != nil { return nil, err } - rde, err = vcdClient.GetRdeById(d.Id()) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - spec, ok = rde.DefinedEntity.Entity["spec"].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("JSON object 'spec' is not correct in the RDE") - } - if !spec["markForDelete"].(bool) && !spec["forceDelete"].(bool) { - return nil, fmt.Errorf("the cluster with ID '%s' was not marked for deletion correctly", d.Id()) - } return nil, nil }, ) @@ -452,10 +452,11 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m _, err := vcdClient.GetRdeById(d.Id()) if err != nil { if govcd.IsNotFound(err) { - return nil, nil // All is correct, the cluster RDE is gone so the cluster is deleted + return nil, nil // All is correct, the cluster RDE is gone, so it is deleted } return nil, fmt.Errorf("the cluster with ID '%s' is still present in VCD but it is unreadable: %s", d.Id(), err) } + return nil, fmt.Errorf("the cluster with ID '%s' is marked for deletion but still present in VCD", d.Id()) }, ) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 4063591f8..7d83fa94a 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -90,7 +90,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}62" + name = "{{.Name}}63" file_name = "{{.TokenFile}}" allow_token_file = true } diff --git a/vcd/structure.go b/vcd/structure.go index 79dbd8b41..e515182e7 100644 --- a/vcd/structure.go +++ b/vcd/structure.go @@ -24,6 +24,49 @@ func getKeys[K comparable, V any](input map[K]V) []K { return result } +// navigateMap traverses the input, which is map of maps, following the introduced path that should be +// in the syntax "keyA.keyB.keyC..." +func navigateMap[ResultType any](input interface{}, path string) (ResultType, error) { + var nothing ResultType + if input == nil { + return nothing, fmt.Errorf("the input is nil") + } + inputMap, ok := input.(map[string]interface{}) + if !ok { + return nothing, fmt.Errorf("the input is a %T, not a map[string]interface{}", input) + } + if len(inputMap) == 0 { + return nothing, fmt.Errorf("the map is empty") + } + pathUnits := strings.Split(path, ".") + completed := false + i := 0 + var result interface{} + for !completed { + subPath := pathUnits[i] + traversed, ok := inputMap[subPath] + if !ok { + return nothing, fmt.Errorf("key '%s' does not exist in input map", subPath) + } + if i < len(pathUnits)-1 { + traversedMap, ok := traversed.(map[string]interface{}) + if !ok { + return nothing, fmt.Errorf("key '%s' is a %T, not a map, but there are still %d paths to explore", subPath, traversed, len(pathUnits)-(i+1)) + } + inputMap = traversedMap + } else { + completed = true + result = traversed + } + i++ + } + resultTyped, ok := result.(ResultType) + if !ok { + return nothing, fmt.Errorf("could not convert obtained type %T to requested %T", result, nothing) + } + return resultTyped, nil +} + func expandIPRange(configured []interface{}) (types.IPRanges, error) { ipRange := make([]*types.IPRange, 0, len(configured)) @@ -116,7 +159,7 @@ func convertStringsToTypeSet(param []string) *schema.Set { return set } -// addrOf is a generic function to return the address of a variable +// addrOf is a wantType function to return the address of a variable // Note. It is mainly meant for converting literal values to pointers (e.g. `addrOf(true)`) or cases // for converting variables coming out straight from Terraform schema (e.g. // `addrOf(d.Get("name").(string))`). diff --git a/vcd/structure_unit_test.go b/vcd/structure_unit_test.go index 735b7ae9e..c3e4e330d 100644 --- a/vcd/structure_unit_test.go +++ b/vcd/structure_unit_test.go @@ -3,6 +3,7 @@ package vcd import ( + "reflect" "testing" ) @@ -164,3 +165,143 @@ func Test_areMarshaledJsonEqual(t *testing.T) { }) } } + +// Test_navigateMap tests navigateMap function +func Test_navigateMap(t *testing.T) { + type args struct { + input interface{} + path string + } + tests := []struct { + name string + args args + wantType string + want interface{} + wantErr string + }{ + { + name: "input is nil", + args: args{ + input: nil, + }, + wantErr: "the input is nil", + }, + { + name: "input is not a map", + args: args{ + input: "error", + }, + wantErr: "the input is a string, not a map[string]interface{}", + }, + { + name: "map is empty", + args: args{ + input: map[string]interface{}{}, + }, + wantErr: "the map is empty", + }, + { + name: "map does not have key", + args: args{ + input: map[string]interface{}{ + "keyA": "value", + }, + path: "keyB", + }, + wantErr: "key 'keyB' does not exist in input map", + }, + { + name: "map has a single simple key", + args: args{ + input: map[string]interface{}{ + "keyA": "value", + }, + path: "keyA", + }, + wantType: "string", + want: "value", + }, + { + name: "map has a single complex key", + args: args{ + input: map[string]interface{}{ + "keyA": map[string]interface{}{ + "keyB": "value", + }, + }, + path: "keyA", + }, + wantType: "map", + want: map[string]interface{}{ + "keyB": "value", + }, + }, + { + name: "map has a complex structure", + args: args{ + input: map[string]interface{}{ + "keyA": map[string]interface{}{ + "keyB": map[string]interface{}{ + "keyC": "value", + }, + }, + }, + path: "keyA.keyB.keyC", + }, + wantType: "string", + want: "value", + }, + { + name: "requested path is deeper than the map structure", + args: args{ + input: map[string]interface{}{ + "keyA": map[string]interface{}{ + "keyB": map[string]interface{}{ + "keyC": "value", + }, + }, + }, + path: "keyA.keyB.keyC.keyD", + }, + wantErr: "key 'keyC' is a string, not a map, but there are still 1 paths to explore", + }, + { + name: "obtained value does not correspond to the desired type", + args: args{ + input: map[string]interface{}{ + "keyA": map[string]interface{}{ + "keyB": map[string]interface{}{ + "keyC": map[string]interface{}{}, + }, + }, + }, + path: "keyA.keyB.keyC", + }, + wantType: "string", + wantErr: "could not convert obtained type map[string]interface {} to requested string", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var got interface{} + var err error + if tt.wantType == "string" { + got, err = navigateMap[string](tt.args.input, tt.args.path) + } else if tt.wantType == "map" { + got, err = navigateMap[map[string]interface{}](tt.args.input, tt.args.path) + } else { + t.Fatalf("wantType type not used in this test") + } + + if err != nil { + if tt.wantErr != err.Error() { + t.Errorf("navigateMap() error = %v, wantErr = %v", err, tt.wantErr) + } + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("navigateMap() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/vcdTestAccVcdCseKubernetesCluster.json b/vcdTestAccVcdCseKubernetesCluster.json index 34a708bbd..bb4458dc5 100644 --- a/vcdTestAccVcdCseKubernetesCluster.json +++ b/vcdTestAccVcdCseKubernetesCluster.json @@ -1,6 +1,6 @@ { "token_type": "API Token", - "refresh_token": "Mj0soHAff2HYS1WWBan0gXkw5kVm9p8l", + "refresh_token": "U2qkEbWFeknKc8OT1Mf1uLC9exbKNFD8", "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", - "updated_on": "2024-01-15T14:51:33+01:00" + "updated_on": "2024-01-15T15:11:50+01:00" } \ No newline at end of file From 9caa30ed68f6d0302be3d21a40641033daab6457 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 15 Jan 2024 16:50:45 +0100 Subject: [PATCH 015/156] Refactor delete and add navigateMap func Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 4 ++-- vcd/structure.go | 7 ++++--- vcd/structure_unit_test.go | 12 ++++++------ 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index ca3f25a08..58e885e59 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -425,9 +425,9 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m return nil, fmt.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) } - vcdKe, err := navigateMap[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") + vcdKe, err := traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") if err != nil { - return nil, fmt.Errorf("JSON object 'spec.vcdKe' is not correct in the RDE") + return nil, fmt.Errorf("JSON object 'spec.vcdKe' is not correct in the RDE: %s", err) } vcdKe["markForDelete"] = true vcdKe["forceDelete"] = true diff --git a/vcd/structure.go b/vcd/structure.go index e515182e7..487f21a46 100644 --- a/vcd/structure.go +++ b/vcd/structure.go @@ -24,9 +24,10 @@ func getKeys[K comparable, V any](input map[K]V) []K { return result } -// navigateMap traverses the input, which is map of maps, following the introduced path that should be -// in the syntax "keyA.keyB.keyC..." -func navigateMap[ResultType any](input interface{}, path string) (ResultType, error) { +// traverseMapAndGet traverses the input, which is map of maps, following the introduced path that should be +// in the syntax "keyA.keyB.keyC...". It obtains the value of type ResultType (generic one) that is inside +// the last requested subpath ("keyC" in the example). +func traverseMapAndGet[ResultType any](input interface{}, path string) (ResultType, error) { var nothing ResultType if input == nil { return nothing, fmt.Errorf("the input is nil") diff --git a/vcd/structure_unit_test.go b/vcd/structure_unit_test.go index c3e4e330d..5bcf3b338 100644 --- a/vcd/structure_unit_test.go +++ b/vcd/structure_unit_test.go @@ -166,8 +166,8 @@ func Test_areMarshaledJsonEqual(t *testing.T) { } } -// Test_navigateMap tests navigateMap function -func Test_navigateMap(t *testing.T) { +// Test_traverseMapAndGet tests traverseMapAndGet function +func Test_traverseMapAndGet(t *testing.T) { type args struct { input interface{} path string @@ -286,21 +286,21 @@ func Test_navigateMap(t *testing.T) { var got interface{} var err error if tt.wantType == "string" { - got, err = navigateMap[string](tt.args.input, tt.args.path) + got, err = traverseMapAndGet[string](tt.args.input, tt.args.path) } else if tt.wantType == "map" { - got, err = navigateMap[map[string]interface{}](tt.args.input, tt.args.path) + got, err = traverseMapAndGet[map[string]interface{}](tt.args.input, tt.args.path) } else { t.Fatalf("wantType type not used in this test") } if err != nil { if tt.wantErr != err.Error() { - t.Errorf("navigateMap() error = %v, wantErr = %v", err, tt.wantErr) + t.Errorf("traverseMapAndGet() error = %v, wantErr = %v", err, tt.wantErr) } return } if !reflect.DeepEqual(got, tt.want) { - t.Errorf("navigateMap() got = %v, want %v", got, tt.want) + t.Errorf("traverseMapAndGet() got = %v, want %v", got, tt.want) } }) } From b24e80c29bef631b1b943841e2c2902dd9273c06 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 15 Jan 2024 17:26:39 +0100 Subject: [PATCH 016/156] Fixes Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 41 ++++++++++++------- ...esource_vcd_cse_kubernetes_cluster_test.go | 2 +- vcd/structure.go | 8 ++-- vcdTestAccVcdCseKubernetesCluster.json | 4 +- 4 files changed, 34 insertions(+), 21 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 58e885e59..0fa8e71b0 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -351,11 +351,10 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m vcdClient := meta.(*VCDClient) _, _, capvcdBehaviorVersion := getCseRdeTypeVersions(d) - var status interface{} var rde *govcd.DefinedEntity - // TODO: Add timeout - for status == nil { + state := "provisioning" + for state == "provisioning" || state == "" { // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be // many clusters with the same name and RDE Type. var err error @@ -364,30 +363,42 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - status = rde.DefinedEntity.Entity["status"] - time.Sleep(10 * time.Second) + state, _ = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + time.Sleep(60 * time.Second) } if rde == nil { + // Should never reach this return return diag.Errorf("could not read Kubernetes cluster with ID '%s': object is nil", d.Id()) } - vcdKe, ok := status.(map[string]interface{})["vcdKe"] - if !ok { - return diag.Errorf("could not read the 'status.vcdKe' JSON object of the Kubernetes cluster with ID '%s'", d.Id()) - } // TODO: Add timeout - for vcdKe.(map[string]interface{})["state"] != nil && vcdKe.(map[string]interface{})["state"].(string) != "provisioned" { - if d.Get("auto_repair_on_errors").(bool) && vcdKe.(map[string]interface{})["state"].(string) == "error" { - return diag.Errorf("cluster creation finished with errors") + for state != "provisioned" { + if state == "error" && !d.Get("auto_repair_on_errors").(bool) { + return diag.Errorf("cluster is in state '%s' and auto_repair_on_errors=false", state) + } + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - time.Sleep(30 * time.Second) + + state, _ = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + time.Sleep(10 * time.Second) + } + + dSet(d, "state", state) + + // This can only be done if the cluster is in 'provisioned' state + invocationResult := map[string]interface{}{} + err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", capvcdBehaviorVersion), types.BehaviorInvocation{}, invocationResult) + if err != nil { + return diag.Errorf("could not retrieve Kubeconfig: %s", err) } - dSet(d, "state", vcdKe.(map[string]interface{})["state"]) - _, err := rde.InvokeBehavior(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", capvcdBehaviorVersion), types.BehaviorInvocation{}) + kubeconfig, err := traverseMapAndGet[string](invocationResult, "entity.status.capvcd.private.kubeConfig") if err != nil { return diag.Errorf("could not retrieve Kubeconfig: %s", err) } + dSet(d, "kubeconfig", kubeconfig) // This must be the last step, so it has the most possible elements jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 7d83fa94a..0216f1250 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -90,7 +90,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}63" + name = "{{.Name}}65" file_name = "{{.TokenFile}}" allow_token_file = true } diff --git a/vcd/structure.go b/vcd/structure.go index 487f21a46..19d8062cc 100644 --- a/vcd/structure.go +++ b/vcd/structure.go @@ -24,9 +24,11 @@ func getKeys[K comparable, V any](input map[K]V) []K { return result } -// traverseMapAndGet traverses the input, which is map of maps, following the introduced path that should be -// in the syntax "keyA.keyB.keyC...". It obtains the value of type ResultType (generic one) that is inside -// the last requested subpath ("keyC" in the example). +// traverseMapAndGet traverses the input interface{}, which should be a map of maps, by following the path specified as +// "keyA.keyB.keyC.keyD", doing something similar to, visually speaking, map["keyA"]["keyB"]["keyC"]["keyD"], or in other words, +// it goes inside every inner map, which are inside the initial map, until the given path is finished. +// The final value, "keyD" in the same example, should be of type ResultType, which is a generic type requested during the call +// to this function. func traverseMapAndGet[ResultType any](input interface{}, path string) (ResultType, error) { var nothing ResultType if input == nil { diff --git a/vcdTestAccVcdCseKubernetesCluster.json b/vcdTestAccVcdCseKubernetesCluster.json index bb4458dc5..7f251b343 100644 --- a/vcdTestAccVcdCseKubernetesCluster.json +++ b/vcdTestAccVcdCseKubernetesCluster.json @@ -1,6 +1,6 @@ { "token_type": "API Token", - "refresh_token": "U2qkEbWFeknKc8OT1Mf1uLC9exbKNFD8", + "refresh_token": "mKtvcgS9La327wFyPXcmK51qgHtySyPE", "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", - "updated_on": "2024-01-15T15:11:50+01:00" + "updated_on": "2024-01-15T17:15:48+01:00" } \ No newline at end of file From 652ccccd9ce3657f0e5dce1172a8cfc013a6bfc7 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 12:35:25 +0100 Subject: [PATCH 017/156] Modified creation to wait for clusters, Start update operation Signed-off-by: abarreiro --- go.mod | 1 + go.sum | 1 + vcd/resource_vcd_cse_kubernetes_cluster.go | 186 ++++++++++++++++----- 3 files changed, 143 insertions(+), 45 deletions(-) diff --git a/go.mod b/go.mod index 0717fc015..3424c0b2a 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/kr/pretty v0.2.1 github.com/vmware/go-vcloud-director/v2 v2.22.0 + gopkg.in/yaml.v2 v2.4.0 ) require ( diff --git a/go.sum b/go.sum index ec1f30035..62c13bbfa 100644 --- a/go.sum +++ b/go.sum @@ -209,5 +209,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 0fa8e71b0..ffc5c3b72 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -13,6 +13,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" + "github.com/vmware/go-vcloud-director/v2/util" + "gopkg.in/yaml.v2" "strconv" "strings" "text/template" @@ -283,11 +285,18 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Default: false, Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", }, - "delete_timeout_seconds": { + "create_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + Description: "The time, in minutes, to wait for the cluster to be completely created, with a ready-to-use Kubeconfig. 0 means wait indefinitely", + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (unlimited)"), + }, + "delete_timeout_minutes": { Type: schema.TypeInt, Optional: true, Default: 120, - Description: "The time, in seconds, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", + Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (unlimited)"), }, "state": { @@ -344,66 +353,97 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour // We need to set the ID here to be able to distinguish this cluster from all the others that may have the same name and RDE Type. // We could use some other ways of filtering, but ID is the best and most accurate. d.SetId(rde.DefinedEntity.ID) + + _, err = waitForClusterState(vcdClient, d, rde.DefinedEntity.ID, "provisioned", "error") + if err != nil { + return diag.Errorf("Kubernetes cluster creation finished with errors: %s", err) + } + return resourceVcdCseKubernetesRead(ctx, d, meta) } -func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - vcdClient := meta.(*VCDClient) - _, _, capvcdBehaviorVersion := getCseRdeTypeVersions(d) - - var rde *govcd.DefinedEntity - // TODO: Add timeout - state := "provisioning" - for state == "provisioning" || state == "" { - // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be - // many clusters with the same name and RDE Type. - var err error - rde, err = vcdClient.GetRdeById(d.Id()) +// waitForClusterState waits for the Kubernetes cluster to be in one of the specified states, either indefinitely (if "create_timeout_minutes=0") +// or until this timeout is reached. If one of the states is "error", this function also checks whether "auto_repair_on_errors=true" to keep +// waiting. +func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId string, statesToWaitFor ...string) (string, error) { + var elapsed time.Duration + timeout := d.Get("create_timeout_minutes").(int) + currentState := "" + + start := time.Now() + for elapsed <= time.Duration(timeout) || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever + rde, err := vcdClient.GetRdeById(rdeId) if err != nil { - return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - - state, _ = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") - time.Sleep(60 * time.Second) - } - if rde == nil { - // Should never reach this return - return diag.Errorf("could not read Kubernetes cluster with ID '%s': object is nil", d.Id()) - } - - // TODO: Add timeout - for state != "provisioned" { - if state == "error" && !d.Get("auto_repair_on_errors").(bool) { - return diag.Errorf("cluster is in state '%s' and auto_repair_on_errors=false", state) + return "", err } - rde, err := vcdClient.GetRdeById(d.Id()) + currentState, err = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") if err != nil { - return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + util.Logger.Printf("[DEBUG] Failed getting cluster state: %s", err) + time.Sleep(50 * time.Second) + continue } - - state, _ = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") - time.Sleep(10 * time.Second) + for _, stateToWaitFor := range statesToWaitFor { + if currentState == "error" && stateToWaitFor == "error" && d.Get("auto_repair_on_errors").(bool) { + // We do nothing, just keep waiting for the cluster to auto-recover and hopefully be in another currentState before timeout + break + } + if currentState == stateToWaitFor { + return currentState, nil + } + } + time.Sleep(50 * time.Second) + elapsed = time.Since(start) } + return "", fmt.Errorf("timeout of %d seconds reached, latest cluster state obtained was '%s'", timeout, currentState) +} - dSet(d, "state", state) +func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + var diags diag.Diagnostics + _, _, capvcdBehaviorVersion := getCseRdeTypeVersions(d) - // This can only be done if the cluster is in 'provisioned' state - invocationResult := map[string]interface{}{} - err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", capvcdBehaviorVersion), types.BehaviorInvocation{}, invocationResult) + // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be + // many clusters with the same name and RDE Type. + var err error + rde, err := vcdClient.GetRdeById(d.Id()) if err != nil { - return diag.Errorf("could not retrieve Kubeconfig: %s", err) + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - kubeconfig, err := traverseMapAndGet[string](invocationResult, "entity.status.capvcd.private.kubeConfig") + state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") if err != nil { - return diag.Errorf("could not retrieve Kubeconfig: %s", err) + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + dSet(d, "state", state) + + if state == "provisioned" { + // This can only be done if the cluster is in 'provisioned' state + invocationResult := map[string]interface{}{} + err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", capvcdBehaviorVersion), types.BehaviorInvocation{}, invocationResult) + if err != nil { + return diag.Errorf("could not retrieve Kubeconfig: %s", err) + } + + kubeconfig, err := traverseMapAndGet[string](invocationResult, "entity.status.capvcd.private.kubeConfig") + if err != nil { + return diag.Errorf("could not retrieve Kubeconfig: %s", err) + } + dSet(d, "kubeconfig", kubeconfig) + } else { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Kubernetes cluster not in 'provisioned' state", + Detail: fmt.Sprintf("Kubernetes cluster with ID is in '%s' state, won't be able to read/refresh the Kubeconfig", d.Id()), + }) } - dSet(d, "kubeconfig", kubeconfig) // This must be the last step, so it has the most possible elements jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) if err != nil { - return diag.Errorf("could not save the cluster '%s' raw RDE contents into state: %s", rde.DefinedEntity.ID, err) + diags = append(diags, diag.Errorf("could not save the cluster '%s' raw RDE contents into 'raw_cluster_rde_json' attribute: %s", rde.DefinedEntity.ID, err)...) + } + if diags != nil && diags.HasError() { + return diags } dSet(d, "raw_cluster_rde_json", jsonEntity) @@ -412,7 +452,63 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m } func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - // TODO + vcdClient := meta.(*VCDClient) + + // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be + // many clusters with the same name and RDE Type. + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + if err != nil { + return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + if state != "provisioned" { + return diag.Errorf("could not update the Kubernetes cluster with ID '%s': It is in '%s' state, but should be 'provisioned'", d.Id(), state) + } + // Only OVA and pool sizes can be changed. This is guaranteed by all ForceNew flags, but it's worth it to + // double-check + if d.HasChangesExcept("ova_id", "control_plane.0.machine_count", "node_pool") { + return diag.Errorf("only the Kubernetes template or the control plane/node machine pools can be modified") + } + + // Gets and unmarshals the CAPI YAML to update it + capiYaml, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "spec.capiYaml") + if err != nil { + return diag.Errorf("could not retrieve the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + capiMap := map[string]interface{}{} + err = yaml.Unmarshal([]byte(capiYaml), &capiMap) + if err != nil { + return diag.Errorf("could not unmarshal the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + // TODO: Change YAML here + if d.HasChange("ova_id") { + newOva := d.Get("ova_id") + _, err := vcdClient.GetVAppTemplateById(newOva.(string)) + if err != nil { + return diag.Errorf("could not retrieve the new Kubernetes OVA with ID '%s': %s", newOva, err) + } + // TODO: Check whether the update can be performed + } + if d.HasChange("control_plane.0.machine_count") { + util.Logger.Printf("not done but make static complains :)") + } + if d.HasChange("node_pool") { + util.Logger.Printf("not done but make static complains :)") + } + + updatedYaml := capiYaml // FIXME + rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = updatedYaml + + // FIXME: This must be done with retries due to ETag clash + err = rde.Update(*rde.DefinedEntity) + if err != nil { + return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + return diag.Errorf("not implemented") } @@ -457,7 +553,7 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m _, err = runWithRetry( fmt.Sprintf("checking the cluster %s is correctly marked for deletion", d.Get("name").(string)), "error completing the deletion of the cluster", - time.Duration(d.Get("delete_timeout_seconds").(int))*time.Second, + time.Duration(d.Get("delete_timeout_minutes").(int))*time.Minute, nil, func() (any, error) { _, err := vcdClient.GetRdeById(d.Id()) From f6cc5a93e5544cff0c083f812742aaf0e998ad21 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 12:41:09 +0100 Subject: [PATCH 018/156] Deprecate management guide and add changelog Signed-off-by: abarreiro --- .changes/v3.12.0/1195-features.md | 4 ++++ ...er_service_extension_4_x_cluster_management.html.markdown | 5 +++++ 2 files changed, 9 insertions(+) create mode 100644 .changes/v3.12.0/1195-features.md diff --git a/.changes/v3.12.0/1195-features.md b/.changes/v3.12.0/1195-features.md new file mode 100644 index 000000000..01af870a5 --- /dev/null +++ b/.changes/v3.12.0/1195-features.md @@ -0,0 +1,4 @@ +* **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension + installed and running [GH-1195] +* **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension + installed and running [GH-1195] \ No newline at end of file diff --git a/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown b/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown index 3bb1de3c7..ed9ba0d67 100644 --- a/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown +++ b/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown @@ -8,6 +8,11 @@ description: |- # Container Service Extension v4.1 Kubernetes clusters management +~> **This guide is DEPRECATED since v3.12+**. You should use the resource +[`vcd_cse_kubernetes_cluster`](/providers/vmware/vcd/latest/docs/resources/cse_kubernetes_cluster) +present in the VCD Provider since v3.12.0 to provision and manage Kubernetes clusters in a VCD appliance where Container Service Extension is installed +and running. + ## About This guide explains how to create, update and delete **Tanzu Kubernetes Grid multicloud (TKGm)** clusters in a VCD appliance with Container Service Extension v4.1 From f948624f654d228870b3c2d7850ee2c7e6b26316 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 12:41:29 +0100 Subject: [PATCH 019/156] Deprecate management guide and add changelog Signed-off-by: abarreiro --- ...ainer_service_extension_4_x_cluster_management.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown b/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown index ed9ba0d67..5e1392a9d 100644 --- a/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown +++ b/website/docs/guides/container_service_extension_4_x_cluster_management.html.markdown @@ -10,7 +10,7 @@ description: |- ~> **This guide is DEPRECATED since v3.12+**. You should use the resource [`vcd_cse_kubernetes_cluster`](/providers/vmware/vcd/latest/docs/resources/cse_kubernetes_cluster) -present in the VCD Provider since v3.12.0 to provision and manage Kubernetes clusters in a VCD appliance where Container Service Extension is installed +to provision and manage Kubernetes clusters in a VCD appliance where Container Service Extension is installed and running. ## About From 70ab7e31a88d43f9e8104fc6b7644c44a3e3de4e Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 13:33:05 +0100 Subject: [PATCH 020/156] Created docs Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 4 +- .../r/cse_kubernetes_cluster.html.markdown | 247 ++++++++++++++++++ 2 files changed, 249 insertions(+), 2 deletions(-) create mode 100644 website/docs/r/cse_kubernetes_cluster.html.markdown diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index ffc5c3b72..1073d14e0 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -102,7 +102,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file and its contents, as it contains sensitive information", + Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", }, "ssh_public_key": { Type: schema.TypeString, @@ -295,7 +295,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "delete_timeout_minutes": { Type: schema.TypeInt, Optional: true, - Default: 120, + Default: 10, Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (unlimited)"), }, diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown new file mode 100644 index 000000000..6c85af67b --- /dev/null +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -0,0 +1,247 @@ +--- +layout: "vcd" +page_title: "VMware Cloud Director: vcd_cse_kubernetes_cluster" +sidebar_current: "docs-vcd-resource-cse-kubernetes-cluster" +description: |- + Provides a resource to manage Kubernetes clusters in VMware Cloud Director with Container Service Extension installed and running. +--- + +# vcd\_cse\_kubernetes\_cluster + +Provides a resource to manage Kubernetes clusters in VMware Cloud Director with Container Service Extension (CSE) installed and running. + +Supported in provider *v3.12+* + +-> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) + +## Example Usage + +```hcl +data "vcd_catalog" "tkg_catalog" { + org = "solutions_org" # The catalog is shared with 'tenant_org', so it is visible for tenants + name = "tkgm_catalog" +} + +# Fetch a valid Kubernetes template OVA +data "vcd_catalog_vapp_template" "tkg_ova" { + org = data.vcd_catalog.tkg_catalog.org + catalog_id = data.vcd_catalog.tkg_catalog.id + name = "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc" +} + +data "vcd_org_vdc" "vdc" { + org = "tenant_org" + name = "tenant_vdc" +} + +data "vcd_nsxt_edgegateway" "egw" { + org = data.vcd_org_vdc.vdc.org + owner_id = data.vcd_org_vdc.vdc.id + name = "tenant_edgegateway" +} + +data "vcd_network_routed_v2" "routed" { + org = data.vcd_nsxt_edgegateway.egw.org + edge_gateway_id = data.vcd_nsxt_edgegateway.egw.id + name = "tenant_net_routed" +} + +# Fetch a valid Sizing policy created during CSE installation. +# Refer to the CSE installation guide for more information. +data "vcd_vm_sizing_policy" "tkg_small" { + name = "TKG small" +} + +data "vcd_storage_profile" "sp" { + org = data.vcd_org_vdc.vdc.org + vdc = data.vcd_org_vdc.vdc.name + name = "*" +} + +# The token file is required, and it should be safely stored +resource "vcd_api_token" "token" { + name = "myClusterToken" + file_name = "/home/Bob/vcdTestAccVcdCseKubernetesCluster.json" + allow_token_file = true +} + +resource "vcd_cse_kubernetes_cluster" "my_cluster" { + cse_version = "4.2" + runtime = "tkg" + name = "my-cluster" + ova_id = data.vcd_catalog_vapp_template.tkg_ova.id + org = data.vcd_org_vdc.vdc.org + vdc_id = data.vcd_org_vdc.vdc.id + network_id = data.vcd_network_routed_v2.routed.id + api_token_file = vcd_api_token.token.file_name + + control_plane { + machine_count = 1 + disk_size_gi = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + node_pool { + name = "node-pool-1" + machine_count = 1 + disk_size_gi = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + node_pool { + name = "node-pool-2" + machine_count = 1 + disk_size_gi = 20 + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id + storage_profile_id = data.vcd_storage_profile.sp.id + } + + default_storage_class { + name = "sc-1" + storage_profile_id = data.vcd_storage_profile.sp.id + reclaim_policy = "delete" + filesystem = "ext4" + } + + pods_cidr = "100.10.0.0/11" + services_cidr = "100.90.0.0/11" + + auto_repair_on_errors = false + node_health_check = false +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `cse_version` - (Required) Specifies the CSE version to use. Only `4.2` is supported +* `runtime` - (Optional) Specifies the Kubernetes runtime to use. Defaults to `tkg` (Tanzu Kubernetes Grid) +* `name` - (Required) The name of the Kubernetes cluster. It must contain only lowercase alphanumeric characters or "-", + start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters +* `ova_id` - (Required) The ID of the vApp Template that corresponds to a Kubernetes template OVA +* `org` - (Optional) The name of organization that will host the Kubernetes cluster, optional if defined in the provider configuration +* `vdc_id` - (Required) The ID of the VDC that hosts the Kubernetes cluster +* `network_id` - (Required) The ID of the network that the Kubernetes cluster will use +* `owner` - (Optional) The user that creates the cluster and owns the API token specified in `api_token`. + It must have the `Kubernetes Cluster Author` role that was created during CSE installation. + If not specified, it assumes it's the user from the provider configuration +* `api_token_file` - (Required) A file generated by [`vcd_api_token` resource](/providers/vmware/vcd/latest/docs/resources/api_token), + that stores the API token used to create and manage the cluster, owned by the user specified in `owner`. + Be careful about this file, as it contains sensitive information +* `ssh_public_key` - (Optional) The SSH public key used to login into the cluster nodes +* `control_plane` - (Required) See [**Control Plane**](#control-plane) +* `node_pool` - (Required) See [**Node Pools**](#node-pools) +* `default_storage_class` - (Optional) See [**Default Storage Class**](#default-storage-class) +* `pods_cidr` - (Optional) A CIDR block for the pods to use. Defaults to `100.96.0.0/11` +* `services_cidr` - (Optional) A CIDR block for the services to use. Defaults to `100.64.0.0/13` +* `virtual_ip_subnet` - (Optional) A virtual IP subnet for the cluster +* `auto_repair_on_errors` - (Optional) If errors occur before the Kubernetes cluster becomes available, and this argument is `true`, + CSE Server will automatically attempt to repair the cluster. Defaults to `false` +* `node_health_check` - (Optional) After the Kubernetes cluster becomes available, nodes that become unhealthy will be + remediated according to unhealthy node conditions and remediation rules. Defaults to `false` +* `create_timeout_minutes` - (Optional) The time, in minutes, to wait for the cluster to be completely created, with a + ready-to-use Kubeconfig. `0` means wait indefinitely (not recommended as it could hang Terraform). Defaults to `60` +* `delete_timeout_minutes` - (Optional) The time, in minutes, to wait for the cluster to be deleted when it is marked + for deletion. `0` means wait indefinitely (not recommended as it could hang Terraform). Defaults to `10` + +### Control Plane + +The `control_plane` block is **required** and unique per resource, meaning that there must be **exactly one** of these +in every resource. + +This block asks for the following arguments: + +* `machine_count` - (Optional) The number of nodes that the control plane has. Must be an odd number and higher than `0`. Defaults to `3` +* `disk_size_gi` - (Optional) Disk size, in **Gibibytes**, for the control plane VMs. Must be at least `20`. Defaults to `20` +* `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation +* `placement_policy_id` - (Optional) VM Placement policy for the control plane VMs +* `storage_profile_id` - (Optional) Storage profile for the control plane VMs +* `ip` - (Optional) A custom IP to use for the control plane + +### Node Pools + +The `node_pool` block is **required**, and every cluster should have **at least one** of them. + +Each block asks for the following arguments: + +* `name` - (Required) The name of the node pool. It must contain only lowercase alphanumeric characters or "-", + start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters +* `machine_count` - (Optional) The number of VMs that the node pool has. Must be higher than `0`. Defaults to `1` +* `disk_size_gi` - (Optional) Disk size, in **Gibibytes**, for the node pool VMs. Must be at least `20`. Defaults to `20` +* `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation +* `placement_policy_id` - (Optional) VM Placement policy for the node pool VMs. If this one is set, `vgpu_policy_id` must be empty +* `vgpu_policy_id` - (Optional) vGPU policy for the node pool VMs. If this one is set, `placement_policy_id` must be empty +* `storage_profile_id` - (Optional) Storage profile for the node pool VMs + +### Default Storage Class + +The `default_storage_class` block is **optional**, and every cluster should have **at most one** of them. + +If defined, the block asks for the following arguments: + +* `name` - (Required) The name of the default storage class. It must contain only lowercase alphanumeric characters or "-", + start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters +* `storage_profile_id` - (Required) Storage profile for the default storage class +* `reclaim_policy` - (Required) A value of `delete` deletes the volume when the PersistentVolumeClaim is deleted. `retain` does not, + and the volume can be manually reclaimed +* `filesystem` - (Required) Filesystem of the storage class, can be either `ext4` or `xfs` + +## Attribute Reference + +The following attributes are available for consumption as computed attributes: + +* `state` - The Kubernetes cluster status, can be `provisioning` when it is being created, `provisioned` when it was successfully + created and ready to use, or `error` when an error occurred. `provisioning` can only be obtained when a timeout happens during + cluster creation. `error` can only be obtained either with a timeout or when `auto_repair_on_errors=false`. +* `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` +* `raw_cluster_rde_json` - The raw JSON representation of this Kubernetes cluster inside the [RDE](/providers/vmware/vcd/latest/docs/resources/rde) + that CSE uses to operate the cluster + +## Importing + +!!!!!!!!!!! TODO: NOT IMPLEMENTED. HOW TO DEAL WITH REQUIRED IDS? + +~> The current implementation of Terraform import can only import resources into the state. +It does not generate configuration. [More information.](https://www.terraform.io/docs/import/) + +An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the Cluster (RDE) ID for it. +An example is below: + +```hcl +# This is just a snippet of code that will host the imported cluster from VCD. +# This must not be created with Terraform beforehand +resource "vcd_cse_kubernetes_cluster" "imported_cluster" { + # Only the required arguments are needed + cse_version = "4.2" + name = "my-cluster" + ova_id = data.vcd_catalog_vapp_template.tkg_ova.id + org = "tenant_org" + vdc_id = data.vcd_org_vdc.vdc.id + network_id = data.vcd_network_routed_v2.routed.id + api_token_file = vcd_api_token.token.file_name + + control_plane { + + } + + node_pool { + name = "node-pool-1" + } +} +``` + +```sh +terraform import vcd_cse_kubernetes_cluster.imported_cluster urn:vcloud:entity:vmware:capvcdCluster:1d24af33-6e5a-4d47-a6ea-06d76f3ee5c9 +``` + +-> The ID is required as it is the only way to unequivocally identify a Kubernetes cluster inside VCD. To obtain the ID +you can check the Kubernetes Container Clusters UI plugin, where all the available clusters are listed. + +After that, you can expand the configuration file and either update or delete the Kubernetes cluster. Running `terraform plan` +at this stage will show the difference between the minimal configuration file and the Kubernetes cluster stored properties. + +[docs-import]:https://www.terraform.io/docs/import/ From 58ee326b66b12c593c9b41659d0fc0405632a560 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 13:35:12 +0100 Subject: [PATCH 021/156] Created docs Signed-off-by: abarreiro --- .changes/v3.12.0/1195-deprecations.md | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .changes/v3.12.0/1195-deprecations.md diff --git a/.changes/v3.12.0/1195-deprecations.md b/.changes/v3.12.0/1195-deprecations.md new file mode 100644 index 000000000..bb3f5776d --- /dev/null +++ b/.changes/v3.12.0/1195-deprecations.md @@ -0,0 +1,2 @@ +* Resource `vcd_cse_kubernetes_cluster` deprecates the Container Service Extension cluster management guide, + so users should not use `vcd_rde` resources to create a Kubernetes cluster anymore [GH-1195] From 6022cc1084eb8cb97f221a25d415e46bd7d6afe3 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 13:37:41 +0100 Subject: [PATCH 022/156] self-review Signed-off-by: abarreiro --- vcd/structure.go | 2 +- vcdTestAccVcdCseKubernetesCluster.json | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) delete mode 100644 vcdTestAccVcdCseKubernetesCluster.json diff --git a/vcd/structure.go b/vcd/structure.go index 19d8062cc..de5486e09 100644 --- a/vcd/structure.go +++ b/vcd/structure.go @@ -162,7 +162,7 @@ func convertStringsToTypeSet(param []string) *schema.Set { return set } -// addrOf is a wantType function to return the address of a variable +// addrOf is a generic function to return the address of a variable // Note. It is mainly meant for converting literal values to pointers (e.g. `addrOf(true)`) or cases // for converting variables coming out straight from Terraform schema (e.g. // `addrOf(d.Get("name").(string))`). diff --git a/vcdTestAccVcdCseKubernetesCluster.json b/vcdTestAccVcdCseKubernetesCluster.json deleted file mode 100644 index 7f251b343..000000000 --- a/vcdTestAccVcdCseKubernetesCluster.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "token_type": "API Token", - "refresh_token": "mKtvcgS9La327wFyPXcmK51qgHtySyPE", - "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", - "updated_on": "2024-01-15T17:15:48+01:00" - } \ No newline at end of file From 11437830dcecae8cdb849b4f477c3a665cecf97c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 16:29:10 +0100 Subject: [PATCH 023/156] Fixes Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 12 ++++++------ vcd/resource_vcd_cse_kubernetes_cluster_test.go | 5 ++++- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 1073d14e0..1681d0245 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -290,14 +290,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Optional: true, Default: 60, Description: "The time, in minutes, to wait for the cluster to be completely created, with a ready-to-use Kubeconfig. 0 means wait indefinitely", - ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (unlimited)"), + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), }, "delete_timeout_minutes": { Type: schema.TypeInt, Optional: true, Default: 10, Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", - ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (unlimited)"), + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), }, "state": { Type: schema.TypeString, @@ -318,7 +318,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { } } -// getCseRdeTypeVersions gets the RDE Type versions. First returned parameter is VCDKEConfig, second is CAPVCDCluster +// getCseRdeTypeVersions gets the RDE Type versions. First returned parameter is VCDKEConfig, second is CAPVCDCluster, third is CAPVCD Behavior version func getCseRdeTypeVersions(d *schema.ResourceData) (string, string, string) { versions := cseVersions[d.Get("cse_version").(string)] return versions[0], versions[1], versions[2] @@ -351,7 +351,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } // We need to set the ID here to be able to distinguish this cluster from all the others that may have the same name and RDE Type. - // We could use some other ways of filtering, but ID is the best and most accurate. + // We could use some other ways of filtering, but ID is the only accurate. d.SetId(rde.DefinedEntity.ID) _, err = waitForClusterState(vcdClient, d, rde.DefinedEntity.ID, "provisioned", "error") @@ -371,7 +371,7 @@ func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId str currentState := "" start := time.Now() - for elapsed <= time.Duration(timeout) || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever + for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever rde, err := vcdClient.GetRdeById(rdeId) if err != nil { return "", err @@ -394,7 +394,7 @@ func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId str time.Sleep(50 * time.Second) elapsed = time.Since(start) } - return "", fmt.Errorf("timeout of %d seconds reached, latest cluster state obtained was '%s'", timeout, currentState) + return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) } func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 3c7c5d464..1885466b7 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -90,7 +90,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}65" + name = "{{.Name}}66" file_name = "{{.TokenFile}}" allow_token_file = true } @@ -137,5 +137,8 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { auto_repair_on_errors = false node_health_check = false + + create_timeout_minutes = 0 + delete_timeout_minutes = 0 } ` From ca18adefe537c593f5ebb3dcfdd3657ec9c747f5 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 16:48:10 +0100 Subject: [PATCH 024/156] Refactor delete Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 86 ++++++++++------------ 1 file changed, 37 insertions(+), 49 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 1681d0245..1248a802a 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -379,20 +379,22 @@ func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId str currentState, err = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") if err != nil { util.Logger.Printf("[DEBUG] Failed getting cluster state: %s", err) - time.Sleep(50 * time.Second) - continue - } - for _, stateToWaitFor := range statesToWaitFor { - if currentState == "error" && stateToWaitFor == "error" && d.Get("auto_repair_on_errors").(bool) { - // We do nothing, just keep waiting for the cluster to auto-recover and hopefully be in another currentState before timeout - break - } - if currentState == stateToWaitFor { - return currentState, nil + // We ignore this error, as eventually the state should be populated + } else { + for _, stateToWaitFor := range statesToWaitFor { + if currentState == "error" && stateToWaitFor == "error" && d.Get("auto_repair_on_errors").(bool) { + // We do nothing, just keep waiting for the cluster to auto-recover and hopefully be in another currentState before timeout + break + } + if currentState == stateToWaitFor { + return currentState, nil + } } } - time.Sleep(50 * time.Second) + elapsed = time.Since(start) + time.Sleep(50 * time.Second) + } return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) } @@ -521,54 +523,40 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m // We need to do this operation with retries due to the mutex mechanism VCD has (ETags). // We may hit an error if CSE Server is doing any operation in the background and we attempt to mark the cluster for deletion, // so we need to insist several times. - _, err := runWithRetry( - fmt.Sprintf("marking the cluster %s for deletion", d.Get("name").(string)), - "error marking the cluster for deletion", - 30*time.Second, - nil, - func() (any, error) { - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } + var elapsed time.Duration + timeout := d.Get("delete_timeout_minutes").(int) - vcdKe, err := traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") - if err != nil { - return nil, fmt.Errorf("JSON object 'spec.vcdKe' is not correct in the RDE: %s", err) + start := time.Now() + for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + if govcd.IsNotFound(err) { + break // The RDE is gone, so the process is completed and there's nothing more to do } + return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + vcdKe, err := traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") + if err != nil { + return diag.Errorf("JSON object 'spec.vcdKe' is not correct in the RDE '%s': %s", d.Id(), err) + } + + if !vcdKe["markForDelete"].(bool) || !vcdKe["forceDelete"].(bool) { + // Mark the cluster for deletion vcdKe["markForDelete"] = true vcdKe["forceDelete"] = true rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"] = vcdKe - err = rde.Update(*rde.DefinedEntity) if err != nil { - return nil, err - } - return nil, nil - }, - ) - if err != nil { - return diag.FromErr(err) - } - _, err = runWithRetry( - fmt.Sprintf("checking the cluster %s is correctly marked for deletion", d.Get("name").(string)), - "error completing the deletion of the cluster", - time.Duration(d.Get("delete_timeout_minutes").(int))*time.Minute, - nil, - func() (any, error) { - _, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - if govcd.IsNotFound(err) { - return nil, nil // All is correct, the cluster RDE is gone, so it is deleted + if strings.Contains(strings.ToLower(err.Error()), "etag") { + continue // We ignore any ETag error. This just means a clash between CSE Server and Terraform, we just try again } - return nil, fmt.Errorf("the cluster with ID '%s' is still present in VCD but it is unreadable: %s", d.Id(), err) + return diag.Errorf("could not mark the Kubernetes cluster with ID '%s' to be deleted: %s", d.Id(), err) } + } - return nil, fmt.Errorf("the cluster with ID '%s' is marked for deletion but still present in VCD", d.Id()) - }, - ) - if err != nil { - return diag.FromErr(err) + time.Sleep(30 * time.Second) + elapsed = time.Since(start) } return nil } From 5ac51149acdf872b965c5db0aa92a484cfc6da7c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 17:50:23 +0100 Subject: [PATCH 025/156] Added latest_event attribute Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 52 ++++++++++++++----- ...esource_vcd_cse_kubernetes_cluster_test.go | 17 +++++- .../r/cse_kubernetes_cluster.html.markdown | 1 + 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 1248a802a..320ce0292 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -309,6 +309,11 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Computed: true, Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", }, + "latest_event": { + Type: schema.TypeString, + Computed: true, + Description: "The latest event that occurred in the lifetime of the cluster", + }, "raw_cluster_rde_json": { Type: schema.TypeString, Computed: true, @@ -391,6 +396,13 @@ func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId str } } } + eventSet, _ := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") + if len(eventSet) > 1 && eventSet[len(eventSet)-1] != nil { + latestEvent, _ := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") + if latestEvent != "" { + fmt.Printf("[DEBUG] waiting for cluster to be in one of these states: %v. Latest event: '%s'", statesToWaitFor, latestEvent) + } + } elapsed = time.Since(start) time.Sleep(50 * time.Second) @@ -423,12 +435,12 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m invocationResult := map[string]interface{}{} err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", capvcdBehaviorVersion), types.BehaviorInvocation{}, invocationResult) if err != nil { - return diag.Errorf("could not retrieve Kubeconfig: %s", err) + return diag.Errorf("could not invoke the behavior to obtain the Kubeconfig for the Kubernetes cluster with ID '%s': %s", d.Id(), err) } kubeconfig, err := traverseMapAndGet[string](invocationResult, "entity.status.capvcd.private.kubeConfig") if err != nil { - return diag.Errorf("could not retrieve Kubeconfig: %s", err) + return diag.Errorf("could not retrieve Kubeconfig for Kubernetes cluster with ID '%s': %s", d.Id(), err) } dSet(d, "kubeconfig", kubeconfig) } else { @@ -439,7 +451,6 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m }) } - // This must be the last step, so it has the most possible elements jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) if err != nil { diags = append(diags, diag.Errorf("could not save the cluster '%s' raw RDE contents into 'raw_cluster_rde_json' attribute: %s", rde.DefinedEntity.ID, err)...) @@ -449,6 +460,17 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m } dSet(d, "raw_cluster_rde_json", jsonEntity) + // This must be the last step, so it is really the last event + eventSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") + if err != nil { + return diag.Errorf("could not retrieve the event set of the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + latestEvent, err := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") + if err != nil { + return diag.Errorf("could not retrieve the latest event of the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + dSet(d, "latest_event", latestEvent) + d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability return nil } @@ -511,32 +533,30 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) } - return diag.Errorf("not implemented") + return resourceVcdCseKubernetesRead(ctx, d, meta) } // resourceVcdCseKubernetesDelete deletes a CSE Kubernetes cluster. To delete a Kubernetes cluster, one must send // the flags "markForDelete" and "forceDelete" back to true, so the CSE Server is able to delete all cluster elements -// and perform a cleanup. Hence, this function sends these properties and waits for deletion. +// and perform a cleanup. Hence, this function sends an update of just these two properties and waits for the cluster RDE +// to be gone. func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) + vcdKe := map[string]interface{}{} - // We need to do this operation with retries due to the mutex mechanism VCD has (ETags). - // We may hit an error if CSE Server is doing any operation in the background and we attempt to mark the cluster for deletion, - // so we need to insist several times. var elapsed time.Duration timeout := d.Get("delete_timeout_minutes").(int) - start := time.Now() - for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever + for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies delete_timeout_minutes=0, we wait forever rde, err := vcdClient.GetRdeById(d.Id()) if err != nil { if govcd.IsNotFound(err) { - break // The RDE is gone, so the process is completed and there's nothing more to do + return nil // The RDE is gone, so the process is completed and there's nothing more to do } return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) } - vcdKe, err := traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") + vcdKe, err = traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") if err != nil { return diag.Errorf("JSON object 'spec.vcdKe' is not correct in the RDE '%s': %s", d.Id(), err) } @@ -558,7 +578,12 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m time.Sleep(30 * time.Second) elapsed = time.Since(start) } - return nil + + // We give a hint to the user whenever possible + if len(vcdKe) >= 2 && vcdKe["markForDelete"].(bool) && vcdKe["forceDelete"].(bool) { + return diag.Errorf("timeout of %d minutes reached, the cluster was successfully marked for deletion but was not removed in time", timeout) + } + return diag.Errorf("timeout of %d minutes reached, the cluster was not marked for deletion, please try again", timeout) } // getCseKubernetesClusterEntityMap gets the payload for the RDE that manages the Kubernetes cluster, so it @@ -818,6 +843,7 @@ func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeCon return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) } result.OvaName = vAppTemplate.VAppTemplate.Name + // TODO: Validate that it is a Kubernetes ova // Searches for the TKG components versions in the tkgMap with the OVA name details diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 1885466b7..fdbc7b7de 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -3,6 +3,7 @@ package vcd import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "os" "strings" "testing" @@ -17,6 +18,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { t.Skip("CSE tests deactivated, skipping " + t.Name()) } + tokenFilename := getCurrentDir() + t.Name() + ".json" + var params = StringMap{ "Name": strings.ToLower(t.Name()), "OvaCatalog": testConfig.Cse.OvaCatalog, @@ -26,7 +29,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { "Vdc": testConfig.Cse.Vdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, - "TokenFile": getCurrentDir() + t.Name() + ".json", + "TokenFile": tokenFilename, } testParamsNotEmpty(t, params) @@ -38,6 +41,16 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } resource.Test(t, resource.TestCase{ ProviderFactories: testAccProviders, + CheckDestroy: func(state *terraform.State) error { + // Clean the API Token file + if fileExists(tokenFilename) { + err := os.Remove(tokenFilename) + if err != nil { + return err + } + } + return nil + }, Steps: []resource.TestStep{ { Config: configText, @@ -90,7 +103,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}66" + name = "{{.Name}}71" file_name = "{{.TokenFile}}" allow_token_file = true } diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 6c85af67b..0f4718db0 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -200,6 +200,7 @@ The following attributes are available for consumption as computed attributes: * `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` * `raw_cluster_rde_json` - The raw JSON representation of this Kubernetes cluster inside the [RDE](/providers/vmware/vcd/latest/docs/resources/rde) that CSE uses to operate the cluster +* `latest_event` - The latest event that occurred in the lifetime of the cluster ## Importing From cac4eaa62be6d7a436f3e28dee029ac5f3c7173b Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 17:52:20 +0100 Subject: [PATCH 026/156] Added latest_event attribute Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index fdbc7b7de..3cb09a0f7 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -3,7 +3,7 @@ package vcd import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "fmt" "os" "strings" "testing" @@ -19,6 +19,15 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } tokenFilename := getCurrentDir() + t.Name() + ".json" + defer func() { + // Clean the API Token file + if fileExists(tokenFilename) { + err := os.Remove(tokenFilename) + if err != nil { + fmt.Printf("could not delete API token file '%s', please delete it manually", tokenFilename) + } + } + }() var params = StringMap{ "Name": strings.ToLower(t.Name()), @@ -41,16 +50,6 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } resource.Test(t, resource.TestCase{ ProviderFactories: testAccProviders, - CheckDestroy: func(state *terraform.State) error { - // Clean the API Token file - if fileExists(tokenFilename) { - err := os.Remove(tokenFilename) - if err != nil { - return err - } - } - return nil - }, Steps: []resource.TestStep{ { Config: configText, From 742a8ef363b0b2596fb27aaae5d4208f44d872df Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 16 Jan 2024 18:56:28 +0100 Subject: [PATCH 027/156] # Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 4 ++-- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 320ce0292..67eec9d97 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -411,7 +411,7 @@ func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId str return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) } -func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) var diags diag.Diagnostics _, _, capvcdBehaviorVersion := getCseRdeTypeVersions(d) @@ -447,7 +447,7 @@ func resourceVcdCseKubernetesRead(ctx context.Context, d *schema.ResourceData, m diags = append(diags, diag.Diagnostic{ Severity: diag.Warning, Summary: "Kubernetes cluster not in 'provisioned' state", - Detail: fmt.Sprintf("Kubernetes cluster with ID is in '%s' state, won't be able to read/refresh the Kubeconfig", d.Id()), + Detail: fmt.Sprintf("Kubernetes cluster with ID is in '%s' state, won't be able to read/refresh the Kubeconfig nor make updates", d.Id()), }) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 3cb09a0f7..89966c554 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -102,7 +102,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}71" + name = "{{.Name}}72" file_name = "{{.TokenFile}}" allow_token_file = true } From 904f08c55603bdc0ad7e0274d625734bf3cace4b Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 17 Jan 2024 11:10:04 +0100 Subject: [PATCH 028/156] Improve error handling Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 49 ++++++++++++------- ...esource_vcd_cse_kubernetes_cluster_test.go | 9 +++- 2 files changed, 40 insertions(+), 18 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 67eec9d97..2a7afef5d 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -359,18 +359,21 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour // We could use some other ways of filtering, but ID is the only accurate. d.SetId(rde.DefinedEntity.ID) - _, err = waitForClusterState(vcdClient, d, rde.DefinedEntity.ID, "provisioned", "error") + state, err := waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) if err != nil { return diag.Errorf("Kubernetes cluster creation finished with errors: %s", err) } + if state != "provisioned" { + return diag.Errorf("Kubernetes cluster creation failed, cluster is not in 'provisioned' state, but '%s'", state) + } return resourceVcdCseKubernetesRead(ctx, d, meta) } -// waitForClusterState waits for the Kubernetes cluster to be in one of the specified states, either indefinitely (if "create_timeout_minutes=0") +// waitUntilClusterIsProvisioned waits for the Kubernetes cluster to be in "provisioned" state, either indefinitely (if "create_timeout_minutes=0") // or until this timeout is reached. If one of the states is "error", this function also checks whether "auto_repair_on_errors=true" to keep // waiting. -func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId string, statesToWaitFor ...string) (string, error) { +func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, rdeId string) (string, error) { var elapsed time.Duration timeout := d.Get("create_timeout_minutes").(int) currentState := "" @@ -386,21 +389,33 @@ func waitForClusterState(vcdClient *VCDClient, d *schema.ResourceData, rdeId str util.Logger.Printf("[DEBUG] Failed getting cluster state: %s", err) // We ignore this error, as eventually the state should be populated } else { - for _, stateToWaitFor := range statesToWaitFor { - if currentState == "error" && stateToWaitFor == "error" && d.Get("auto_repair_on_errors").(bool) { - // We do nothing, just keep waiting for the cluster to auto-recover and hopefully be in another currentState before timeout - break - } - if currentState == stateToWaitFor { - return currentState, nil + + // Add some traceability in the logs and Terraform output about the progress of the cluster provisioning + eventSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") + if err == nil { + latestEvent, err := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") + if err != nil { + util.Logger.Printf("[DEBUG] waiting for cluster to be provisioned. Latest event: '%s'", latestEvent) } } - } - eventSet, _ := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") - if len(eventSet) > 1 && eventSet[len(eventSet)-1] != nil { - latestEvent, _ := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") - if latestEvent != "" { - fmt.Printf("[DEBUG] waiting for cluster to be in one of these states: %v. Latest event: '%s'", statesToWaitFor, latestEvent) + + switch currentState { + case "provisioned": + return currentState, nil + case "error": + // We just finish if auto-recovery is disabled, otherwise we just let CSE fixing things in background + if !d.Get("auto_repair_on_errors").(bool) { + // Try to give feedback about what went wrong, which is located in a set of events in the RDE payload + latestError := "could not parse error event" + errorSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.capvcd.errorSet") + if err == nil { + latestError, err = traverseMapAndGet[string](errorSet[len(errorSet)-1], "additionalDetails.error") + if err != nil { + latestError = "could not parse error event" + } + } + return "", fmt.Errorf("got an error and 'auto_repair_on_errors=false', aborting. Latest error: %s", latestError) + } } } @@ -550,7 +565,7 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies delete_timeout_minutes=0, we wait forever rde, err := vcdClient.GetRdeById(d.Id()) if err != nil { - if govcd.IsNotFound(err) { + if govcd.ContainsNotFound(err) { return nil // The RDE is gone, so the process is completed and there's nothing more to do } return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 89966c554..a017f44ed 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -60,6 +60,13 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { postTestChecks(t) } +// TODO: Test: +// Basic (DONE) +// With machine health checks +// With machine health checks +// Without storage class +// With virtual IP and control plane IPs +// Nodes With vGPU policies const testAccVcdCseKubernetesCluster = ` # skip-binary-test - This one requires a very special setup @@ -102,7 +109,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}72" + name = "{{.Name}}75" file_name = "{{.TokenFile}}" allow_token_file = true } From 98683b4def1d5d9aa199292bcda6ec8a53845746 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 17 Jan 2024 14:39:39 +0100 Subject: [PATCH 029/156] Prepare unit tests Signed-off-by: abarreiro --- vcd/cse_util.go | 375 +++++++++ vcd/cse_util_unit_test.go | 121 +++ vcd/resource_vcd_cse_kubernetes_cluster.go | 917 +++++++-------------- 3 files changed, 775 insertions(+), 638 deletions(-) create mode 100644 vcd/cse_util.go create mode 100644 vcd/cse_util_unit_test.go diff --git a/vcd/cse_util.go b/vcd/cse_util.go new file mode 100644 index 000000000..d8760f463 --- /dev/null +++ b/vcd/cse_util.go @@ -0,0 +1,375 @@ +package vcd + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/vmware/go-vcloud-director/v2/govcd" + "strconv" + "strings" + "text/template" +) + +// tkgVersionBundle is a type that contains all the versions of the components of +// a Kubernetes cluster that can be obtained with the vApp Template name, downloaded +// from VMware Customer connect: +// https://customerconnect.vmware.com/downloads/details?downloadGroup=TKG-240&productId=1400 +type tkgVersionBundle struct { + EtcdVersion string + CoreDnsVersion string + TkgVersion string + TkrVersion string + KubernetesVersion string +} + +// getTkgVersionBundleFromVAppTemplateName returns a tkgVersionBundle with the details of +// all the Kubernetes cluster components versions given a valid vApp Template name, that should +// correspond to a Kubernetes template. If it is not a valid vApp Template, returns an error. +func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, error) { + versionsMap := map[string]map[string]string{ + "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { + "tkg": "v2.2.0", + "etcd": "v3.5.6_vmware.9", + "coreDns": "v1.9.3_vmware.8", + }, + "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { + "tkg": "v2.4.0", + "etcd": "v3.5.7_vmware.6", + "coreDns": "v1.10.1_vmware.7", + }, + "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { + "tkg": "v2.4.0", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.10.1_vmware.7", + }, + "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { + "tkg": "v2.3.1", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.9.3_vmware.16", + }, + } + + result := tkgVersionBundle{} + + if strings.Contains(ovaName, "photon") { + return result, fmt.Errorf("the vApp Template '%s' uses Photon, and it is not supported", ovaName) + } + + cutPosition := strings.LastIndex(ovaName, "kube-") + if cutPosition < 0 { + return result, fmt.Errorf("the vApp Template '%s' is not a Kubernetes template OVA", ovaName) + } + parsedOvaName := strings.ReplaceAll(ovaName, ".ova", "")[cutPosition+len("kube-"):] + if _, ok := versionsMap[parsedOvaName]; !ok { + return result, fmt.Errorf("the Kubernetes OVA '%s' is not supported", parsedOvaName) + } + + // The map checking above guarantees that all splits and replaces will work + result.KubernetesVersion = strings.Split(parsedOvaName, "-")[0] + result.TkrVersion = strings.ReplaceAll(strings.Split(parsedOvaName, "-")[0], "+", "---") + "-" + strings.Split(parsedOvaName, "-")[1] + result.TkgVersion = versionsMap[parsedOvaName]["tkg"] + result.EtcdVersion = versionsMap[parsedOvaName]["etcd"] + result.CoreDnsVersion = versionsMap[parsedOvaName]["coreDns"] + return result, nil +} + +// createClusterDto is a helper struct that contains all the required elements to successfully create a Kubernetes cluster using CSE. +type createClusterDto struct { + Name string + VcdUrl string + Org *govcd.AdminOrg + VdcName string + OvaName string + CatalogName string + NetworkName string + RdeType *govcd.DefinedEntityType + UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) + VCDKEConfig struct { + MaxUnhealthyNodesPercentage string + NodeStartupTimeout string + NodeNotReadyTimeout string + NodeUnknownTimeout string + ContainerRegistryUrl string + } + TkgVersion tkgVersionBundle + Owner string + ApiToken string +} + +// getClusterCreateDto creates and returns a createClusterDto object by obtaining all the required information +// from the Terraform resource data and the target VCD. +func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion string) (*createClusterDto, error) { + result := &createClusterDto{} + result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema + + name := d.Get("name").(string) + result.Name = name + + org, err := vcdClient.GetAdminOrgFromResource(d) + if err != nil { + return nil, fmt.Errorf("could not retrieve the cluster Organization: %s", err) + } + result.Org = org + + vdcId := d.Get("vdc_id").(string) + vdc, err := org.GetVDCById(vdcId, true) + if err != nil { + return nil, fmt.Errorf("could not retrieve the VDC with ID '%s': %s", vdcId, err) + } + result.VdcName = vdc.Vdc.Name + + vAppTemplateId := d.Get("ova_id").(string) + vAppTemplate, err := vcdClient.GetVAppTemplateById(vAppTemplateId) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) + } + result.OvaName = vAppTemplate.VAppTemplate.Name + + tkgVersions, err := getTkgVersionBundleFromVAppTemplateName(vAppTemplate.VAppTemplate.Name) + if err != nil { + return nil, err + } + result.TkgVersion = tkgVersions + + catalogName, err := vAppTemplate.GetCatalogName() + if err != nil { + return nil, fmt.Errorf("could not retrieve the CatalogName of the OVA '%s': %s", vAppTemplateId, err) + } + result.CatalogName = catalogName + + networkId := d.Get("network_id").(string) + network, err := vdc.GetOrgVdcNetworkById(networkId, true) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Org VDC NetworkName with ID '%s': %s", networkId, err) + } + result.NetworkName = network.OrgVDCNetwork.Name + + rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", capvcdClusterRdeTypeVersion) + if err != nil { + return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", capvcdClusterRdeTypeVersion, err) + } + result.RdeType = rdeType + + // Builds a map that relates storage profiles IDs (the schema uses them to build a healthy Terraform dependency graph) + // with their corresponding names (the cluster YAML and CSE in general uses names only). + // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, + // as there can be a lot of them. + if _, isStorageClassSet := d.GetOk("default_storage_class"); isStorageClassSet { + storageProfileId := d.Get("default_storage_class.0.storage_profile_id").(string) + storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Storage Class: %s", storageProfileId, err) + } + result.UrnToNamesCache[storageProfileId] = storageProfile.Name + } + controlPlaneStorageProfileId := d.Get("control_plane.0.storage_profile_id").(string) + if _, ok := result.UrnToNamesCache[controlPlaneStorageProfileId]; !ok { // Only query if not already present + storageProfile, err := vcdClient.GetStorageProfileById(controlPlaneStorageProfileId) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) + } + result.UrnToNamesCache[controlPlaneStorageProfileId] = storageProfile.Name + } + for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + nodePoolStorageProfileId := nodePool["storage_profile_id"].(string) + if _, ok := result.UrnToNamesCache[nodePoolStorageProfileId]; !ok { // Only query if not already present + storageProfile, err := vcdClient.GetStorageProfileById(nodePoolStorageProfileId) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.UrnToNamesCache[nodePoolStorageProfileId] = storageProfile.Name + } + } + + // Builds a map that relates Compute Policies IDs (the schema uses them to build a healthy Terraform dependency graph) + // with their corresponding names (the cluster YAML and CSE in general uses names only). + // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, + // as there can be a lot of them. + if controlPlaneSizingPolicyId, isSet := d.GetOk("control_plane.0.sizing_policy_id"); isSet { + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlaneSizingPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) + } + result.UrnToNamesCache[controlPlaneSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + if controlPlanePlacementPolicyId, isSet := d.GetOk("control_plane.0.placement_policy_id"); isSet { + if _, ok := result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlanePlacementPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) + } + result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + if nodePoolSizingPolicyId, isSet := nodePool["sizing_policy_id"]; isSet { + if _, ok := result.UrnToNamesCache[nodePoolSizingPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolSizingPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.UrnToNamesCache[nodePoolSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + if nodePoolPlacementPolicyId, isSet := nodePool["placement_policy_id"]; isSet { + if _, ok := result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolPlacementPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + if nodePoolVGpuPolicyId, isSet := nodePool["vgpu_policy_id"]; isSet { + if _, ok := result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)]; !ok { // Only query if not already present + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolVGpuPolicyId.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) + } + result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + } + + rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigRdeTypeVersion, "vcdKeConfig") + if err != nil { + return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", vcdKeConfigRdeTypeVersion, err) + } + if len(rdes) != 1 { + return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) + } + + // Obtain some required elements from the CSE Server configuration (aka VCDKEConfig), so we don't have + // to deal with it again. + type vcdKeConfigType struct { + Profiles []struct { + K8Config struct { + Mhc struct { + MaxUnhealthyNodes int `json:"maxUnhealthyNodes:omitempty"` + NodeStartupTimeout int `json:"nodeStartupTimeout:omitempty"` + NodeNotReadyTimeout int `json:"nodeNotReadyTimeout:omitempty"` + NodeUnknownTimeout int `json:"nodeUnknownTimeout:omitempty"` + } `json:"mhc:omitempty"` + } `json:"K8Config:omitempty"` + ContainerRegistryUrl string `json:"containerRegistryUrl,omitempty"` + } `json:"profiles,omitempty"` + } + + var vcdKeConfig vcdKeConfigType + rawData, err := json.Marshal(rdes[0].DefinedEntity.Entity) + if err != nil { + return nil, err + } + + err = json.Unmarshal(rawData, &vcdKeConfig) + if err != nil { + return nil, err + } + + if len(vcdKeConfig.Profiles) != 1 { + return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(vcdKeConfig.Profiles)) + } + + result.VCDKEConfig.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) + result.VCDKEConfig.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) + result.VCDKEConfig.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) + result.VCDKEConfig.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) + result.VCDKEConfig.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", vcdKeConfig.Profiles[0].ContainerRegistryUrl) + + owner, ok := d.GetOk("owner") + if !ok { + sessionInfo, err := vcdClient.Client.GetSessionInfo() + if err != nil { + return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) + } + owner = sessionInfo.User.Name + } + result.Owner = owner.(string) + + apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) + if err != nil { + return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) + } + result.ApiToken = apiToken.RefreshToken + + result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) + return result, nil +} + +// generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded +// in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the createClusterDto to +// populate several Go templates and build a final YAML. +func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { + // This YAML snippet contains special strings, such as "%,", that render wrong using the Go template engine + sanitizedTemplate := strings.NewReplacer("%", "%%").Replace(cseClusterYamlTemplate) + capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(sanitizedTemplate)) + + nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) + if err != nil { + return "", err + } + + buf := &bytes.Buffer{} + args := map[string]string{ + "ClusterName": clusterDetails.Name, + "TargetNamespace": clusterDetails.Name + "-ns", + "TkrVersion": clusterDetails.TkgVersion.TkrVersion, + "TkgVersion": clusterDetails.TkgVersion.TkgVersion, + "UsernameB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.Owner)), + "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.ApiToken)), + "PodCidr": d.Get("pods_cidr").(string), + "ServiceCidr": d.Get("services_cidr").(string), + "VcdSite": clusterDetails.VcdUrl, + "Org": clusterDetails.Org.AdminOrg.Name, + "OrgVdc": clusterDetails.VdcName, + "OrgVdcNetwork": clusterDetails.NetworkName, + "Catalog": clusterDetails.CatalogName, + "VAppTemplate": clusterDetails.OvaName, + "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], + "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], + "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], + "ControlPlaneDiskSize": fmt.Sprintf("%dGi", d.Get("control_plane.0.disk_size_gi").(int)), + "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), + "DnsVersion": clusterDetails.TkgVersion.CoreDnsVersion, + "EtcdVersion": clusterDetails.TkgVersion.EtcdVersion, + "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, + "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, + "SshPublicKey": d.Get("ssh_public_key").(string), + } + + if _, ok := d.GetOk("control_plane.0.ip"); ok { + args["ControlPlaneEndpoint"] = d.Get("control_plane.0.ip").(string) + } + if _, ok := d.GetOk("virtual_ip_subnet"); ok { + args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) + } + + if d.Get("node_health_check").(bool) { + args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly + args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix + args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix + args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix + } + + if err := capiYamlEmpty.Execute(buf, args); err != nil { + return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) + } + + prettyYaml := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) + + // This encoder is used instead of a standard json.Marshal as the YAML contains special + // characters that are not encoded properly, such as '<'. + buf.Reset() + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err = enc.Encode(prettyYaml) + if err != nil { + return "", fmt.Errorf("could not encode the CAPI YAML into JSON: %s", err) + } + + return strings.Trim(strings.TrimSpace(buf.String()), "\""), nil +} diff --git a/vcd/cse_util_unit_test.go b/vcd/cse_util_unit_test.go new file mode 100644 index 000000000..40f9944df --- /dev/null +++ b/vcd/cse_util_unit_test.go @@ -0,0 +1,121 @@ +//go:build unit || ALL + +package vcd + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "reflect" + "testing" +) + +// Test_getTkgVersionBundleFromVAppTemplateName is a unit test that tests the getTkgVersionBundleFromVAppTemplateName function +func Test_getTkgVersionBundleFromVAppTemplateName(t *testing.T) { + tests := []struct { + name string + ovaName string + want tkgVersionBundle + wantErr string + }{ + { + name: "wrong ova name", + ovaName: "randomOVA", + want: tkgVersionBundle{}, + wantErr: "the vApp Template 'randomOVA' is not a Kubernetes template OVA", + }, + { + name: "not supported ova", + ovaName: "ubuntu-2004-kube-v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st", + want: tkgVersionBundle{}, + wantErr: "the Kubernetes OVA 'v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st' is not supported", + }, + { + name: "not supported photon ova", + ovaName: "photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46", + want: tkgVersionBundle{}, + wantErr: "the vApp Template 'photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46' uses Photon, and it is not supported", + }, + { + name: "supported ova", + ovaName: "ubuntu-2004-kube-v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8", + want: tkgVersionBundle{ + EtcdVersion: "v3.5.6_vmware.20", + CoreDnsVersion: "v1.9.3_vmware.16", + TkgVersion: "v2.3.1", + TkrVersion: "v1.26.8---vmware.1-tkg.1", + KubernetesVersion: "v1.26.8+vmware.1", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getTkgVersionBundleFromVAppTemplateName(tt.ovaName) + if err != nil { + if tt.wantErr == "" { + t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got error = %v, but should have not failed", err) + } + if err.Error() != tt.wantErr { + t.Fatalf("getTkgVersionBundleFromVAppTemplateName() error = %v, wantErr = %v", err, tt.wantErr) + } + } + if !reflect.DeepEqual(got, tt.want) { + t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got = %v, want = %v", got, tt.want) + } + }) + } +} + +// Test_generateCapiYaml tests generateCapiYaml function +func Test_generateCapiYaml(t *testing.T) { + type args struct { + resourceData map[string]interface{} + clusterDetails *createClusterDto + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "foo", + args: args{ + resourceData: map[string]interface{}{}, + clusterDetails: &createClusterDto{ + Name: "", + VcdUrl: "", + Org: nil, + VdcName: "", + OvaName: "", + CatalogName: "", + NetworkName: "", + RdeType: nil, + UrnToNamesCache: nil, + VCDKEConfig: struct { + MaxUnhealthyNodesPercentage string + NodeStartupTimeout string + NodeNotReadyTimeout string + NodeUnknownTimeout string + ContainerRegistryUrl string + }{}, + TkgVersion: tkgVersionBundle{}, + Owner: "", + ApiToken: "", + }, + }, + want: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := schema.TestResourceDataRaw(t, resourceVcdCseKubernetesClusterSchema, tt.args.resourceData) + got, err := generateCapiYaml(d, tt.args.clusterDetails) + if (err != nil) != tt.wantErr { + t.Errorf("generateCapiYaml() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("generateCapiYaml() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 2a7afef5d..549619ece 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -4,7 +4,6 @@ import ( "bytes" "context" _ "embed" - "encoding/base64" "encoding/json" "fmt" "github.com/hashicorp/go-cty/cty" @@ -37,289 +36,286 @@ var cseVersions = map[string][]string{ "4.2": {"1.1.0", "1.2.0", "1.0.0"}, } -func resourceVcdCseKubernetesCluster() *schema.Resource { - return &schema.Resource{ - CreateContext: resourceVcdCseKubernetesClusterCreate, - ReadContext: resourceVcdCseKubernetesRead, - UpdateContext: resourceVcdCseKubernetesUpdate, - DeleteContext: resourceVcdCseKubernetesDelete, - Schema: map[string]*schema.Schema{ - "cse_version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(getKeys(cseVersions), false), - Description: "The CSE version to use", - }, - "runtime": { - Type: schema.TypeString, - Optional: true, - Default: "tkg", - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"tkg"}, false), // May add others in future releases of CSE - Description: "The Kubernetes runtime for the cluster. Only 'tkg' (Tanzu Kubernetes Grid) is supported", - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The name of the Kubernetes cluster", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ - "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), - }, - "ova_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", - }, - "org": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The name of organization that will own this Kubernetes cluster, optional if defined at provider " + - "level. Useful when connected as sysadmin working across different organizations", - }, - "vdc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The ID of the VDC that hosts the Kubernetes cluster", - }, - "network_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The ID of the network that the Kubernetes cluster will use", - }, - "owner": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role. If not specified, it assumes it's the user from the provider configuration", - }, - "api_token_file": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", - }, - "ssh_public_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The SSH public key used to login into the cluster nodes", - }, - "control_plane": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "machine_count": { - Type: schema.TypeInt, - Optional: true, - Default: 3, // As suggested in UI - Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", - ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { - value, ok := v.(int) - if !ok { - return diag.Errorf("could not parse int value '%v' for control plane nodes", v) - } - if value < 1 || value%2 == 0 { - return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) - } - return nil - }, - }, - "disk_size_gi": { - Type: schema.TypeInt, - Optional: true, - Default: 20, // As suggested in UI - ForceNew: true, - ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), - Description: "Disk size, in Gibibytes, for the control plane nodes. Must be at least 20", - }, - "sizing_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Sizing policy for the control plane nodes", - }, - "placement_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Placement policy for the control plane nodes", - }, - "storage_profile_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Storage profile for the control plane nodes", - }, - "ip": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "IP for the control plane", - ValidateFunc: checkEmptyOrSingleIP(), - }, +var resourceVcdCseKubernetesClusterSchema = map[string]*schema.Schema{ + "cse_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(getKeys(cseVersions), false), + Description: "The CSE version to use", + }, + "runtime": { + Type: schema.TypeString, + Optional: true, + Default: "tkg", + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"tkg"}, false), // May add others in future releases of CSE + Description: "The Kubernetes runtime for the cluster. Only 'tkg' (Tanzu Kubernetes Grid) is supported", + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the Kubernetes cluster", + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "ova_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", + }, + "org": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of organization that will own this Kubernetes cluster, optional if defined at provider " + + "level. Useful when connected as sysadmin working across different organizations", + }, + "vdc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the VDC that hosts the Kubernetes cluster", + }, + "network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the network that the Kubernetes cluster will use", + }, + "owner": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role. If not specified, it assumes it's the user from the provider configuration", + }, + "api_token_file": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", + }, + "ssh_public_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The SSH public key used to login into the cluster nodes", + }, + "control_plane": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_count": { + Type: schema.TypeInt, + Optional: true, + Default: 3, // As suggested in UI + Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", + ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v' for control plane nodes", v) + } + if value < 1 || value%2 == 0 { + return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) + } + return nil }, }, - }, - "node_pool": { - Type: schema.TypeSet, - Required: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "The name of this node pool", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ - "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), - }, - "machine_count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, // As suggested in UI - Description: "The number of nodes that this node pool has. Must be higher than 0", - ValidateDiagFunc: minimumValue(1, "number of nodes must be higher than 0"), - }, - "disk_size_gi": { - Type: schema.TypeInt, - Optional: true, - Default: 20, // As suggested in UI - ForceNew: true, - Description: "Disk size, in Gibibytes, for the control plane nodes", - ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), - }, - "sizing_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Sizing policy for the control plane nodes", - }, - "placement_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Placement policy for the control plane nodes", - }, - "vgpu_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "vGPU policy for the control plane nodes", - }, - "storage_profile_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Storage profile for the control plane nodes", - }, - }, + "disk_size_gi": { + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + ForceNew: true, + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), + Description: "Disk size, in Gibibytes, for the control plane nodes. Must be at least 20", }, - }, - "default_storage_class": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "storage_profile_id": { - Required: true, - Type: schema.TypeString, - Description: "ID of the storage profile to use for the storage class", - }, - "name": { - Required: true, - Type: schema.TypeString, - Description: "Name to give to this storage class", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ - "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), - }, - "reclaim_policy": { - Required: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), - Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", - }, - "filesystem": { - Required: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), - Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", - }, - }, + "sizing_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Sizing policy for the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Placement policy for the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Storage profile for the control plane nodes", + }, + "ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "IP for the control plane", + ValidateFunc: checkEmptyOrSingleIP(), }, }, - "pods_cidr": { - Type: schema.TypeString, - Optional: true, - Default: "100.96.0.0/11", // As suggested in UI - Description: "CIDR that the Kubernetes pods will use", - }, - "services_cidr": { - Type: schema.TypeString, - Optional: true, - Default: "100.64.0.0/13", // As suggested in UI - Description: "CIDR that the Kubernetes services will use", - }, - "virtual_ip_subnet": { - Type: schema.TypeString, - Optional: true, - Description: "Virtual IP subnet for the cluster", - }, - "auto_repair_on_errors": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", - }, - "node_health_check": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", - }, - "create_timeout_minutes": { - Type: schema.TypeInt, - Optional: true, - Default: 60, - Description: "The time, in minutes, to wait for the cluster to be completely created, with a ready-to-use Kubeconfig. 0 means wait indefinitely", - ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), - }, - "delete_timeout_minutes": { - Type: schema.TypeInt, - Optional: true, - Default: 10, - Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", - ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: "The state of the cluster, can be 'provisioning', 'provisioned' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", - }, - "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", - }, - "latest_event": { - Type: schema.TypeString, - Computed: true, - Description: "The latest event that occurred in the lifetime of the cluster", + }, + }, + "node_pool": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of this node pool", + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "machine_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, // As suggested in UI + Description: "The number of nodes that this node pool has. Must be higher than 0", + ValidateDiagFunc: minimumValue(1, "number of nodes must be higher than 0"), + }, + "disk_size_gi": { + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + ForceNew: true, + Description: "Disk size, in Gibibytes, for the control plane nodes", + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), + }, + "sizing_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Sizing policy for the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Placement policy for the control plane nodes", + }, + "vgpu_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "vGPU policy for the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Storage profile for the control plane nodes", + }, }, - "raw_cluster_rde_json": { - Type: schema.TypeString, - Computed: true, - Description: "The raw JSON that describes the cluster configuration inside the Runtime Defined Entity", + }, + }, + "default_storage_class": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_profile_id": { + Required: true, + Type: schema.TypeString, + Description: "ID of the storage profile to use for the storage class", + }, + "name": { + Required: true, + Type: schema.TypeString, + Description: "Name to give to this storage class", + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "reclaim_policy": { + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), + Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", + }, + "filesystem": { + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), + Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", + }, }, }, + }, + "pods_cidr": { + Type: schema.TypeString, + Optional: true, + Default: "100.96.0.0/11", // As suggested in UI + Description: "CIDR that the Kubernetes pods will use", + }, + "services_cidr": { + Type: schema.TypeString, + Optional: true, + Default: "100.64.0.0/13", // As suggested in UI + Description: "CIDR that the Kubernetes services will use", + }, + "virtual_ip_subnet": { + Type: schema.TypeString, + Optional: true, + Description: "Virtual IP subnet for the cluster", + }, + "auto_repair_on_errors": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", + }, + "node_health_check": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", + }, + "create_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + Description: "The time, in minutes, to wait for the cluster to be completely created, with a ready-to-use Kubeconfig. 0 means wait indefinitely", + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), + }, + "delete_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 10, + Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the cluster, can be 'provisioning', 'provisioned', 'deleting' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", + }, + "raw_cluster_rde_json": { + Type: schema.TypeString, + Computed: true, + Description: "The raw JSON that describes the cluster configuration inside the Runtime Defined Entity", + }, +} + +func resourceVcdCseKubernetesCluster() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceVcdCseKubernetesClusterCreate, + ReadContext: resourceVcdCseKubernetesRead, + UpdateContext: resourceVcdCseKubernetesUpdate, + DeleteContext: resourceVcdCseKubernetesDelete, + Schema: resourceVcdCseKubernetesClusterSchema, } } @@ -333,7 +329,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour vcdClient := meta.(*VCDClient) vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion, _ := getCseRdeTypeVersions(d) - clusterDetails, err := createClusterInfoDto(d, vcdClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion) + clusterDetails, err := getClusterCreateDto(d, vcdClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion) if err != nil { return diag.Errorf("could not create Kubernetes cluster: %s", err) } @@ -357,11 +353,13 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour // We need to set the ID here to be able to distinguish this cluster from all the others that may have the same name and RDE Type. // We could use some other ways of filtering, but ID is the only accurate. + // Also, the RDE is created at this point, so Terraform should trigger an update/delete next. + // If the cluster can't be created due to errors, users should delete it and retry, like in UI. d.SetId(rde.DefinedEntity.ID) state, err := waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) if err != nil { - return diag.Errorf("Kubernetes cluster creation finished with errors: %s", err) + return diag.Errorf("Kubernetes cluster creation failed: %s", err) } if state != "provisioned" { return diag.Errorf("Kubernetes cluster creation failed, cluster is not in 'provisioned' state, but '%s'", state) @@ -462,7 +460,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met diags = append(diags, diag.Diagnostic{ Severity: diag.Warning, Summary: "Kubernetes cluster not in 'provisioned' state", - Detail: fmt.Sprintf("Kubernetes cluster with ID is in '%s' state, won't be able to read/refresh the Kubeconfig nor make updates", d.Id()), + Detail: fmt.Sprintf("Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), state), }) } @@ -475,17 +473,6 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met } dSet(d, "raw_cluster_rde_json", jsonEntity) - // This must be the last step, so it is really the last event - eventSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") - if err != nil { - return diag.Errorf("could not retrieve the event set of the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - latestEvent, err := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") - if err != nil { - return diag.Errorf("could not retrieve the latest event of the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - dSet(d, "latest_event", latestEvent) - d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability return nil } @@ -603,7 +590,7 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m // getCseKubernetesClusterEntityMap gets the payload for the RDE that manages the Kubernetes cluster, so it // can be created or updated. -func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *clusterInfoDto) (StringMap, error) { +func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *createClusterDto) (map[string]interface{}, error) { capiYaml, err := generateCapiYaml(d, clusterDetails) if err != nil { return nil, err @@ -647,82 +634,8 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cl return result.(map[string]interface{}), nil } -// generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded -// in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the clusterInfoDto to -// populate several Go templates and build a final YAML. -func generateCapiYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { - // This YAML snippet contains special strings, such as "%,", that render wrong using the Go template engine - sanitizedTemplate := strings.NewReplacer("%", "%%").Replace(cseClusterYamlTemplate) - capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(sanitizedTemplate)) - - nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) - if err != nil { - return "", err - } - - buf := &bytes.Buffer{} - args := map[string]string{ - "ClusterName": clusterDetails.Name, - "TargetNamespace": clusterDetails.Name + "-ns", - "TkrVersion": clusterDetails.TkgVersion.Tkr, - "TkgVersion": clusterDetails.TkgVersion.Tkg[0], - "UsernameB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.Owner)), - "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.ApiToken)), - "PodCidr": d.Get("pods_cidr").(string), - "ServiceCidr": d.Get("services_cidr").(string), - "VcdSite": clusterDetails.VcdUrl, - "Org": clusterDetails.Org.AdminOrg.Name, - "OrgVdc": clusterDetails.VdcName, - "OrgVdcNetwork": clusterDetails.NetworkName, - "Catalog": clusterDetails.CatalogName, - "VAppTemplate": clusterDetails.OvaName, - "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], - "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], - "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], - "ControlPlaneDiskSize": fmt.Sprintf("%dGi", d.Get("control_plane.0.disk_size_gi").(int)), - "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), - "DnsVersion": clusterDetails.TkgVersion.CoreDns, - "EtcdVersion": clusterDetails.TkgVersion.Etcd, - "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, - "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, - "SshPublicKey": d.Get("ssh_public_key").(string), - } - - if _, ok := d.GetOk("control_plane.0.ip"); ok { - args["ControlPlaneEndpoint"] = d.Get("control_plane.0.ip").(string) - } - if _, ok := d.GetOk("virtual_ip_subnet"); ok { - args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) - } - - if d.Get("node_health_check").(bool) { - args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly - args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix - args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix - args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix - } - - if err := capiYamlEmpty.Execute(buf, args); err != nil { - return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) - } - - prettyYaml := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) - - // This encoder is used instead of a standard json.Marshal as the YAML contains special - // characters that are not encoded properly, such as '<'. - buf.Reset() - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err = enc.Encode(prettyYaml) - if err != nil { - return "", fmt.Errorf("could not encode the CAPI YAML into JSON: %s", err) - } - - return strings.Trim(strings.TrimSpace(buf.String()), "\""), nil -} - // generateNodePoolYaml generates YAML blocks corresponding to the Kubernetes node pools. -func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto) (string, error) { +func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { nodePoolEmptyTmpl := template.Must(template.New(clusterDetails.Name + "_NodePool").Parse(cseNodePoolTemplate)) resultYaml := "" buf := &bytes.Buffer{} @@ -763,275 +676,3 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *clusterInfoDto } return resultYaml, nil } - -// clusterInfoDto is a helper struct that contains all the required elements to successfully create and manage -// a Kubernetes cluster using CSE. -type clusterInfoDto struct { - Name string - VcdUrl string - Org *govcd.AdminOrg - VdcName string - OvaName string - CatalogName string - NetworkName string - RdeType *govcd.DefinedEntityType - UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) - VCDKEConfig struct { - MaxUnhealthyNodesPercentage string - NodeStartupTimeout string - NodeNotReadyTimeout string - NodeUnknownTimeout string - ContainerRegistryUrl string - } - TkgVersion *tkgVersion - Owner string - ApiToken string -} - -// tkgVersion is an auxiliary structure used by the tkgMap variable to map -// a Kubernetes template OVA to some specific TKG components versions. -type tkgVersion struct { - Tkg []string - Tkr string - Etcd string - CoreDns string - KubernetesVersion string -} - -// tkgMap maps specific Kubernetes template OVAs to specific TKG components versions. -var tkgMap = map[string]tkgVersion{ - "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { - Tkg: []string{"v2.2.0"}, - Tkr: "v1.25.7---vmware.2-tkg.1", - Etcd: "v3.5.6_vmware.9", - CoreDns: "v1.9.3_vmware.8", - KubernetesVersion: "v1.25.7+vmware.2", - }, - "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { - Tkg: []string{"v2.4.0"}, - Tkr: "v1.27.5---vmware.1-tkg.1", - Etcd: "v3.5.7_vmware.6", - CoreDns: "v1.10.1_vmware.7", - KubernetesVersion: "v1.25.7+vmware.2", - }, - "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { - Tkg: []string{"v2.4.0"}, - Tkr: "v1.26.8---vmware.1-tkg.1", - Etcd: "v3.5.6_vmware.20", - CoreDns: "v1.10.1_vmware.7", - KubernetesVersion: "v1.25.7+vmware.2", - }, - "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { - Tkg: []string{"v2.3.1"}, - Tkr: "v1.26.8---vmware.1-tkg.2", - Etcd: "v3.5.6_vmware.20", - CoreDns: "v1.9.3_vmware.16", - KubernetesVersion: "v1.25.7+vmware.2", - }, -} - -// createClusterInfoDto creates and returns a clusterInfoDto object by obtaining all the required information -// from the Terraform resource data and the target VCD. -func createClusterInfoDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion string) (*clusterInfoDto, error) { - result := &clusterInfoDto{} - result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema - - name := d.Get("name").(string) - result.Name = name - - org, err := vcdClient.GetAdminOrgFromResource(d) - if err != nil { - return nil, fmt.Errorf("could not retrieve the cluster Organization: %s", err) - } - result.Org = org - - vdcId := d.Get("vdc_id").(string) - vdc, err := org.GetVDCById(vdcId, true) - if err != nil { - return nil, fmt.Errorf("could not retrieve the VDC with ID '%s': %s", vdcId, err) - } - result.VdcName = vdc.Vdc.Name - - vAppTemplateId := d.Get("ova_id").(string) - vAppTemplate, err := vcdClient.GetVAppTemplateById(vAppTemplateId) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) - } - result.OvaName = vAppTemplate.VAppTemplate.Name - // TODO: Validate that it is a Kubernetes ova - - // Searches for the TKG components versions in the tkgMap with the OVA name details - - ovaCode := strings.ReplaceAll(vAppTemplate.VAppTemplate.Name, ".ova", "")[strings.LastIndex(vAppTemplate.VAppTemplate.Name, "kube-")+len("kube-"):] - tkgVersion, ok := tkgMap[ovaCode] - if !ok { - return nil, fmt.Errorf("could not retrieve the TKG version details from Kubernetes template '%s'. Please check whether the OVA '%s' is compatible", ovaCode, vAppTemplate.VAppTemplate.Name) - } - result.TkgVersion = &tkgVersion - - catalogName, err := vAppTemplate.GetCatalogName() - if err != nil { - return nil, fmt.Errorf("could not retrieve the CatalogName of the OVA '%s': %s", vAppTemplateId, err) - } - result.CatalogName = catalogName - - networkId := d.Get("network_id").(string) - network, err := vdc.GetOrgVdcNetworkById(networkId, true) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Org VDC NetworkName with ID '%s': %s", networkId, err) - } - result.NetworkName = network.OrgVDCNetwork.Name - - rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", capvcdClusterRdeTypeVersion) - if err != nil { - return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", capvcdClusterRdeTypeVersion, err) - } - result.RdeType = rdeType - - // Builds a map that relates storage profiles IDs (the schema uses them to build a healthy Terraform dependency graph) - // with their corresponding names (the cluster YAML and CSE in general uses names only). - // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, - // as there can be a lot of them. - if _, isStorageClassSet := d.GetOk("default_storage_class"); isStorageClassSet { - storageProfileId := d.Get("default_storage_class.0.storage_profile_id").(string) - storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) - if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Storage Class: %s", storageProfileId, err) - } - result.UrnToNamesCache[storageProfileId] = storageProfile.Name - } - controlPlaneStorageProfileId := d.Get("control_plane.0.storage_profile_id").(string) - if _, ok := result.UrnToNamesCache[controlPlaneStorageProfileId]; !ok { // Only query if not already present - storageProfile, err := vcdClient.GetStorageProfileById(controlPlaneStorageProfileId) - if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[controlPlaneStorageProfileId] = storageProfile.Name - } - for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { - nodePool := nodePoolRaw.(map[string]interface{}) - nodePoolStorageProfileId := nodePool["storage_profile_id"].(string) - if _, ok := result.UrnToNamesCache[nodePoolStorageProfileId]; !ok { // Only query if not already present - storageProfile, err := vcdClient.GetStorageProfileById(nodePoolStorageProfileId) - if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolStorageProfileId] = storageProfile.Name - } - } - - // Builds a map that relates Compute Policies IDs (the schema uses them to build a healthy Terraform dependency graph) - // with their corresponding names (the cluster YAML and CSE in general uses names only). - // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, - // as there can be a lot of them. - if controlPlaneSizingPolicyId, isSet := d.GetOk("control_plane.0.sizing_policy_id"); isSet { - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlaneSizingPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[controlPlaneSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - if controlPlanePlacementPolicyId, isSet := d.GetOk("control_plane.0.placement_policy_id"); isSet { - if _, ok := result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlanePlacementPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { - nodePool := nodePoolRaw.(map[string]interface{}) - if nodePoolSizingPolicyId, isSet := nodePool["sizing_policy_id"]; isSet { - if _, ok := result.UrnToNamesCache[nodePoolSizingPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolSizingPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - if nodePoolPlacementPolicyId, isSet := nodePool["placement_policy_id"]; isSet { - if _, ok := result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolPlacementPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - if nodePoolVGpuPolicyId, isSet := nodePool["vgpu_policy_id"]; isSet { - if _, ok := result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolVGpuPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - } - - rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigRdeTypeVersion, "vcdKeConfig") - if err != nil { - return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", vcdKeConfigRdeTypeVersion, err) - } - if len(rdes) != 1 { - return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) - } - - // Obtain some required elements from the CSE Server configuration (aka VCDKEConfig), so we don't have - // to deal with it again. - type vcdKeConfigType struct { - Profiles []struct { - K8Config struct { - Mhc struct { - MaxUnhealthyNodes int `json:"maxUnhealthyNodes:omitempty"` - NodeStartupTimeout int `json:"nodeStartupTimeout:omitempty"` - NodeNotReadyTimeout int `json:"nodeNotReadyTimeout:omitempty"` - NodeUnknownTimeout int `json:"nodeUnknownTimeout:omitempty"` - } `json:"mhc:omitempty"` - } `json:"K8Config:omitempty"` - ContainerRegistryUrl string `json:"containerRegistryUrl,omitempty"` - } `json:"profiles,omitempty"` - } - - var vcdKeConfig vcdKeConfigType - rawData, err := json.Marshal(rdes[0].DefinedEntity.Entity) - if err != nil { - return nil, err - } - - err = json.Unmarshal(rawData, &vcdKeConfig) - if err != nil { - return nil, err - } - - if len(vcdKeConfig.Profiles) != 1 { - return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(vcdKeConfig.Profiles)) - } - - result.VCDKEConfig.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) - result.VCDKEConfig.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) - result.VCDKEConfig.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) - result.VCDKEConfig.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) - result.VCDKEConfig.ContainerRegistryUrl = vcdKeConfig.Profiles[0].ContainerRegistryUrl - - owner, ok := d.GetOk("owner") - if !ok { - sessionInfo, err := vcdClient.Client.GetSessionInfo() - if err != nil { - return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) - } - owner = sessionInfo.User.Name - } - result.Owner = owner.(string) - - apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) - if err != nil { - return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) - } - result.ApiToken = apiToken.RefreshToken - - result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) - return result, nil -} From de9803f6ae69713bd70ef5da5908d8c582711d80 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 17 Jan 2024 17:27:13 +0100 Subject: [PATCH 030/156] Refactor Signed-off-by: abarreiro --- .../capiyaml_cluster.tmpl} | 0 .../capiyaml_nodepool.tmpl} | 0 vcd/cse/{ => 4.2}/rde.tmpl | 0 vcd/cse_util.go | 375 ------- vcd/resource_vcd_cse_kubernetes_cluster.go | 962 ++++++++++++------ ...e_vcd_cse_kubernetes_cluster_unit_test.go} | 57 -- 6 files changed, 661 insertions(+), 733 deletions(-) rename vcd/cse/{capi-yaml/cluster.tmpl => 4.2/capiyaml_cluster.tmpl} (100%) rename vcd/cse/{capi-yaml/node_pool.tmpl => 4.2/capiyaml_nodepool.tmpl} (100%) rename vcd/cse/{ => 4.2}/rde.tmpl (100%) delete mode 100644 vcd/cse_util.go rename vcd/{cse_util_unit_test.go => resource_vcd_cse_kubernetes_cluster_unit_test.go} (57%) diff --git a/vcd/cse/capi-yaml/cluster.tmpl b/vcd/cse/4.2/capiyaml_cluster.tmpl similarity index 100% rename from vcd/cse/capi-yaml/cluster.tmpl rename to vcd/cse/4.2/capiyaml_cluster.tmpl diff --git a/vcd/cse/capi-yaml/node_pool.tmpl b/vcd/cse/4.2/capiyaml_nodepool.tmpl similarity index 100% rename from vcd/cse/capi-yaml/node_pool.tmpl rename to vcd/cse/4.2/capiyaml_nodepool.tmpl diff --git a/vcd/cse/rde.tmpl b/vcd/cse/4.2/rde.tmpl similarity index 100% rename from vcd/cse/rde.tmpl rename to vcd/cse/4.2/rde.tmpl diff --git a/vcd/cse_util.go b/vcd/cse_util.go deleted file mode 100644 index d8760f463..000000000 --- a/vcd/cse_util.go +++ /dev/null @@ -1,375 +0,0 @@ -package vcd - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/vmware/go-vcloud-director/v2/govcd" - "strconv" - "strings" - "text/template" -) - -// tkgVersionBundle is a type that contains all the versions of the components of -// a Kubernetes cluster that can be obtained with the vApp Template name, downloaded -// from VMware Customer connect: -// https://customerconnect.vmware.com/downloads/details?downloadGroup=TKG-240&productId=1400 -type tkgVersionBundle struct { - EtcdVersion string - CoreDnsVersion string - TkgVersion string - TkrVersion string - KubernetesVersion string -} - -// getTkgVersionBundleFromVAppTemplateName returns a tkgVersionBundle with the details of -// all the Kubernetes cluster components versions given a valid vApp Template name, that should -// correspond to a Kubernetes template. If it is not a valid vApp Template, returns an error. -func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, error) { - versionsMap := map[string]map[string]string{ - "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { - "tkg": "v2.2.0", - "etcd": "v3.5.6_vmware.9", - "coreDns": "v1.9.3_vmware.8", - }, - "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { - "tkg": "v2.4.0", - "etcd": "v3.5.7_vmware.6", - "coreDns": "v1.10.1_vmware.7", - }, - "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { - "tkg": "v2.4.0", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.10.1_vmware.7", - }, - "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { - "tkg": "v2.3.1", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.9.3_vmware.16", - }, - } - - result := tkgVersionBundle{} - - if strings.Contains(ovaName, "photon") { - return result, fmt.Errorf("the vApp Template '%s' uses Photon, and it is not supported", ovaName) - } - - cutPosition := strings.LastIndex(ovaName, "kube-") - if cutPosition < 0 { - return result, fmt.Errorf("the vApp Template '%s' is not a Kubernetes template OVA", ovaName) - } - parsedOvaName := strings.ReplaceAll(ovaName, ".ova", "")[cutPosition+len("kube-"):] - if _, ok := versionsMap[parsedOvaName]; !ok { - return result, fmt.Errorf("the Kubernetes OVA '%s' is not supported", parsedOvaName) - } - - // The map checking above guarantees that all splits and replaces will work - result.KubernetesVersion = strings.Split(parsedOvaName, "-")[0] - result.TkrVersion = strings.ReplaceAll(strings.Split(parsedOvaName, "-")[0], "+", "---") + "-" + strings.Split(parsedOvaName, "-")[1] - result.TkgVersion = versionsMap[parsedOvaName]["tkg"] - result.EtcdVersion = versionsMap[parsedOvaName]["etcd"] - result.CoreDnsVersion = versionsMap[parsedOvaName]["coreDns"] - return result, nil -} - -// createClusterDto is a helper struct that contains all the required elements to successfully create a Kubernetes cluster using CSE. -type createClusterDto struct { - Name string - VcdUrl string - Org *govcd.AdminOrg - VdcName string - OvaName string - CatalogName string - NetworkName string - RdeType *govcd.DefinedEntityType - UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) - VCDKEConfig struct { - MaxUnhealthyNodesPercentage string - NodeStartupTimeout string - NodeNotReadyTimeout string - NodeUnknownTimeout string - ContainerRegistryUrl string - } - TkgVersion tkgVersionBundle - Owner string - ApiToken string -} - -// getClusterCreateDto creates and returns a createClusterDto object by obtaining all the required information -// from the Terraform resource data and the target VCD. -func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion string) (*createClusterDto, error) { - result := &createClusterDto{} - result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema - - name := d.Get("name").(string) - result.Name = name - - org, err := vcdClient.GetAdminOrgFromResource(d) - if err != nil { - return nil, fmt.Errorf("could not retrieve the cluster Organization: %s", err) - } - result.Org = org - - vdcId := d.Get("vdc_id").(string) - vdc, err := org.GetVDCById(vdcId, true) - if err != nil { - return nil, fmt.Errorf("could not retrieve the VDC with ID '%s': %s", vdcId, err) - } - result.VdcName = vdc.Vdc.Name - - vAppTemplateId := d.Get("ova_id").(string) - vAppTemplate, err := vcdClient.GetVAppTemplateById(vAppTemplateId) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) - } - result.OvaName = vAppTemplate.VAppTemplate.Name - - tkgVersions, err := getTkgVersionBundleFromVAppTemplateName(vAppTemplate.VAppTemplate.Name) - if err != nil { - return nil, err - } - result.TkgVersion = tkgVersions - - catalogName, err := vAppTemplate.GetCatalogName() - if err != nil { - return nil, fmt.Errorf("could not retrieve the CatalogName of the OVA '%s': %s", vAppTemplateId, err) - } - result.CatalogName = catalogName - - networkId := d.Get("network_id").(string) - network, err := vdc.GetOrgVdcNetworkById(networkId, true) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Org VDC NetworkName with ID '%s': %s", networkId, err) - } - result.NetworkName = network.OrgVDCNetwork.Name - - rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", capvcdClusterRdeTypeVersion) - if err != nil { - return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", capvcdClusterRdeTypeVersion, err) - } - result.RdeType = rdeType - - // Builds a map that relates storage profiles IDs (the schema uses them to build a healthy Terraform dependency graph) - // with their corresponding names (the cluster YAML and CSE in general uses names only). - // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, - // as there can be a lot of them. - if _, isStorageClassSet := d.GetOk("default_storage_class"); isStorageClassSet { - storageProfileId := d.Get("default_storage_class.0.storage_profile_id").(string) - storageProfile, err := vcdClient.GetStorageProfileById(storageProfileId) - if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Storage Class: %s", storageProfileId, err) - } - result.UrnToNamesCache[storageProfileId] = storageProfile.Name - } - controlPlaneStorageProfileId := d.Get("control_plane.0.storage_profile_id").(string) - if _, ok := result.UrnToNamesCache[controlPlaneStorageProfileId]; !ok { // Only query if not already present - storageProfile, err := vcdClient.GetStorageProfileById(controlPlaneStorageProfileId) - if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[controlPlaneStorageProfileId] = storageProfile.Name - } - for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { - nodePool := nodePoolRaw.(map[string]interface{}) - nodePoolStorageProfileId := nodePool["storage_profile_id"].(string) - if _, ok := result.UrnToNamesCache[nodePoolStorageProfileId]; !ok { // Only query if not already present - storageProfile, err := vcdClient.GetStorageProfileById(nodePoolStorageProfileId) - if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolStorageProfileId] = storageProfile.Name - } - } - - // Builds a map that relates Compute Policies IDs (the schema uses them to build a healthy Terraform dependency graph) - // with their corresponding names (the cluster YAML and CSE in general uses names only). - // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, - // as there can be a lot of them. - if controlPlaneSizingPolicyId, isSet := d.GetOk("control_plane.0.sizing_policy_id"); isSet { - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlaneSizingPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[controlPlaneSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - if controlPlanePlacementPolicyId, isSet := d.GetOk("control_plane.0.placement_policy_id"); isSet { - if _, ok := result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(controlPlanePlacementPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Control Plane: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[controlPlanePlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { - nodePool := nodePoolRaw.(map[string]interface{}) - if nodePoolSizingPolicyId, isSet := nodePool["sizing_policy_id"]; isSet { - if _, ok := result.UrnToNamesCache[nodePoolSizingPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolSizingPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolSizingPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - if nodePoolPlacementPolicyId, isSet := nodePool["placement_policy_id"]; isSet { - if _, ok := result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolPlacementPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolPlacementPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - if nodePoolVGpuPolicyId, isSet := nodePool["vgpu_policy_id"]; isSet { - if _, ok := result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)]; !ok { // Only query if not already present - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(nodePoolVGpuPolicyId.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Placement Policy with ID '%s' for the Node Pool: %s", controlPlaneStorageProfileId, err) - } - result.UrnToNamesCache[nodePoolVGpuPolicyId.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - } - - rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", vcdKeConfigRdeTypeVersion, "vcdKeConfig") - if err != nil { - return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", vcdKeConfigRdeTypeVersion, err) - } - if len(rdes) != 1 { - return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) - } - - // Obtain some required elements from the CSE Server configuration (aka VCDKEConfig), so we don't have - // to deal with it again. - type vcdKeConfigType struct { - Profiles []struct { - K8Config struct { - Mhc struct { - MaxUnhealthyNodes int `json:"maxUnhealthyNodes:omitempty"` - NodeStartupTimeout int `json:"nodeStartupTimeout:omitempty"` - NodeNotReadyTimeout int `json:"nodeNotReadyTimeout:omitempty"` - NodeUnknownTimeout int `json:"nodeUnknownTimeout:omitempty"` - } `json:"mhc:omitempty"` - } `json:"K8Config:omitempty"` - ContainerRegistryUrl string `json:"containerRegistryUrl,omitempty"` - } `json:"profiles,omitempty"` - } - - var vcdKeConfig vcdKeConfigType - rawData, err := json.Marshal(rdes[0].DefinedEntity.Entity) - if err != nil { - return nil, err - } - - err = json.Unmarshal(rawData, &vcdKeConfig) - if err != nil { - return nil, err - } - - if len(vcdKeConfig.Profiles) != 1 { - return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(vcdKeConfig.Profiles)) - } - - result.VCDKEConfig.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) - result.VCDKEConfig.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) - result.VCDKEConfig.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) - result.VCDKEConfig.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) - result.VCDKEConfig.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", vcdKeConfig.Profiles[0].ContainerRegistryUrl) - - owner, ok := d.GetOk("owner") - if !ok { - sessionInfo, err := vcdClient.Client.GetSessionInfo() - if err != nil { - return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) - } - owner = sessionInfo.User.Name - } - result.Owner = owner.(string) - - apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) - if err != nil { - return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) - } - result.ApiToken = apiToken.RefreshToken - - result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) - return result, nil -} - -// generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded -// in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the createClusterDto to -// populate several Go templates and build a final YAML. -func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { - // This YAML snippet contains special strings, such as "%,", that render wrong using the Go template engine - sanitizedTemplate := strings.NewReplacer("%", "%%").Replace(cseClusterYamlTemplate) - capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "_CapiYaml").Parse(sanitizedTemplate)) - - nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) - if err != nil { - return "", err - } - - buf := &bytes.Buffer{} - args := map[string]string{ - "ClusterName": clusterDetails.Name, - "TargetNamespace": clusterDetails.Name + "-ns", - "TkrVersion": clusterDetails.TkgVersion.TkrVersion, - "TkgVersion": clusterDetails.TkgVersion.TkgVersion, - "UsernameB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.Owner)), - "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.ApiToken)), - "PodCidr": d.Get("pods_cidr").(string), - "ServiceCidr": d.Get("services_cidr").(string), - "VcdSite": clusterDetails.VcdUrl, - "Org": clusterDetails.Org.AdminOrg.Name, - "OrgVdc": clusterDetails.VdcName, - "OrgVdcNetwork": clusterDetails.NetworkName, - "Catalog": clusterDetails.CatalogName, - "VAppTemplate": clusterDetails.OvaName, - "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], - "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], - "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], - "ControlPlaneDiskSize": fmt.Sprintf("%dGi", d.Get("control_plane.0.disk_size_gi").(int)), - "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), - "DnsVersion": clusterDetails.TkgVersion.CoreDnsVersion, - "EtcdVersion": clusterDetails.TkgVersion.EtcdVersion, - "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, - "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, - "SshPublicKey": d.Get("ssh_public_key").(string), - } - - if _, ok := d.GetOk("control_plane.0.ip"); ok { - args["ControlPlaneEndpoint"] = d.Get("control_plane.0.ip").(string) - } - if _, ok := d.GetOk("virtual_ip_subnet"); ok { - args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) - } - - if d.Get("node_health_check").(bool) { - args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly - args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix - args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix - args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix - } - - if err := capiYamlEmpty.Execute(buf, args); err != nil { - return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) - } - - prettyYaml := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) - - // This encoder is used instead of a standard json.Marshal as the YAML contains special - // characters that are not encoded properly, such as '<'. - buf.Reset() - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err = enc.Encode(prettyYaml) - if err != nil { - return "", fmt.Errorf("could not encode the CAPI YAML into JSON: %s", err) - } - - return strings.Trim(strings.TrimSpace(buf.String()), "\""), nil -} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 549619ece..5c2d2c9f2 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -4,6 +4,7 @@ import ( "bytes" "context" _ "embed" + "encoding/base64" "encoding/json" "fmt" "github.com/hashicorp/go-cty/cty" @@ -14,322 +15,309 @@ import ( "github.com/vmware/go-vcloud-director/v2/types/v56" "github.com/vmware/go-vcloud-director/v2/util" "gopkg.in/yaml.v2" + "os" + "path/filepath" "strconv" "strings" "text/template" "time" ) -// TODO: Split per CSE version: 4.1, 4.2... -// -//go:embed cse/rde.tmpl -var cseRdeJsonTemplate string - -//go:embed cse/capi-yaml/cluster.tmpl -var cseClusterYamlTemplate string - -//go:embed cse/capi-yaml/node_pool.tmpl -var cseNodePoolTemplate string - -// Map of CSE version -> [VCDKEConfig RDE Type version, CAPVCD RDE Type version, CAPVCD Behavior version] -var cseVersions = map[string][]string{ - "4.2": {"1.1.0", "1.2.0", "1.0.0"}, +// supportedCseVersions is a map that contains only the supported CSE versions as keys, +// and its corresponding components versions as a slice of strings. The first string is the VCDKEConfig RDE Type version, +// then the CAPVCD RDE Type version and finally the CAPVCD Behavior version. +var supportedCseVersions = map[string][]string{ + "4.2": { + "1.1.0", // VCDKEConfig RDE Type version + "1.2.0", // CAPVCD RDE Type version + "1.0.0", // CAPVCD Behavior version + }, } -var resourceVcdCseKubernetesClusterSchema = map[string]*schema.Schema{ - "cse_version": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(getKeys(cseVersions), false), - Description: "The CSE version to use", - }, - "runtime": { - Type: schema.TypeString, - Optional: true, - Default: "tkg", - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"tkg"}, false), // May add others in future releases of CSE - Description: "The Kubernetes runtime for the cluster. Only 'tkg' (Tanzu Kubernetes Grid) is supported", - }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The name of the Kubernetes cluster", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ - "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), - }, - "ova_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", - }, - "org": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The name of organization that will own this Kubernetes cluster, optional if defined at provider " + - "level. Useful when connected as sysadmin working across different organizations", - }, - "vdc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The ID of the VDC that hosts the Kubernetes cluster", - }, - "network_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The ID of the network that the Kubernetes cluster will use", - }, - "owner": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role. If not specified, it assumes it's the user from the provider configuration", - }, - "api_token_file": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", - }, - "ssh_public_key": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "The SSH public key used to login into the cluster nodes", - }, - "control_plane": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "machine_count": { - Type: schema.TypeInt, - Optional: true, - Default: 3, // As suggested in UI - Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", - ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { - value, ok := v.(int) - if !ok { - return diag.Errorf("could not parse int value '%v' for control plane nodes", v) - } - if value < 1 || value%2 == 0 { - return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) - } - return nil +func resourceVcdCseKubernetesCluster() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceVcdCseKubernetesClusterCreate, + ReadContext: resourceVcdCseKubernetesRead, + UpdateContext: resourceVcdCseKubernetesUpdate, + DeleteContext: resourceVcdCseKubernetesDelete, + Schema: map[string]*schema.Schema{ + "cse_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(getKeys(supportedCseVersions), false), + Description: "The CSE version to use", + }, + "runtime": { + Type: schema.TypeString, + Optional: true, + Default: "tkg", + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"tkg"}, false), // May add others in future releases of CSE + Description: "The Kubernetes runtime for the cluster. Only 'tkg' (Tanzu Kubernetes Grid) is supported", + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the Kubernetes cluster", + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "ova_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", + }, + "org": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of organization that will own this Kubernetes cluster, optional if defined at provider " + + "level. Useful when connected as sysadmin working across different organizations", + }, + "vdc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the VDC that hosts the Kubernetes cluster", + }, + "network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The ID of the network that the Kubernetes cluster will use", + }, + "owner": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role. If not specified, it assumes it's the user from the provider configuration", + }, + "api_token_file": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", + }, + "ssh_public_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The SSH public key used to login into the cluster nodes", + }, + "control_plane": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_count": { + Type: schema.TypeInt, + Optional: true, + Default: 3, // As suggested in UI + Description: "The number of nodes that the control plane has. Must be an odd number and higher than 0", + ValidateDiagFunc: func(v interface{}, path cty.Path) diag.Diagnostics { + value, ok := v.(int) + if !ok { + return diag.Errorf("could not parse int value '%v' for control plane nodes", v) + } + if value < 1 || value%2 == 0 { + return diag.Errorf("number of control plane nodes must be odd and higher than 0, but it was '%d'", value) + } + return nil + }, + }, + "disk_size_gi": { + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + ForceNew: true, + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), + Description: "Disk size, in Gibibytes, for the control plane nodes. Must be at least 20", + }, + "sizing_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Sizing policy for the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Placement policy for the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Storage profile for the control plane nodes", + }, + "ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "IP for the control plane", + ValidateFunc: checkEmptyOrSingleIP(), + }, }, }, - "disk_size_gi": { - Type: schema.TypeInt, - Optional: true, - Default: 20, // As suggested in UI - ForceNew: true, - ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), - Description: "Disk size, in Gibibytes, for the control plane nodes. Must be at least 20", - }, - "sizing_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Sizing policy for the control plane nodes", - }, - "placement_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Placement policy for the control plane nodes", - }, - "storage_profile_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Storage profile for the control plane nodes", - }, - "ip": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "IP for the control plane", - ValidateFunc: checkEmptyOrSingleIP(), - }, }, - }, - }, - "node_pool": { - Type: schema.TypeSet, - Required: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - Description: "The name of this node pool", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ - "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), - }, - "machine_count": { - Type: schema.TypeInt, - Optional: true, - Default: 1, // As suggested in UI - Description: "The number of nodes that this node pool has. Must be higher than 0", - ValidateDiagFunc: minimumValue(1, "number of nodes must be higher than 0"), - }, - "disk_size_gi": { - Type: schema.TypeInt, - Optional: true, - Default: 20, // As suggested in UI - ForceNew: true, - Description: "Disk size, in Gibibytes, for the control plane nodes", - ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), - }, - "sizing_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Sizing policy for the control plane nodes", - }, - "placement_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "VM Placement policy for the control plane nodes", - }, - "vgpu_policy_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "vGPU policy for the control plane nodes", - }, - "storage_profile_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Description: "Storage profile for the control plane nodes", + "node_pool": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "The name of this node pool", + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "machine_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, // As suggested in UI + Description: "The number of nodes that this node pool has. Must be higher than 0", + ValidateDiagFunc: minimumValue(1, "number of nodes must be higher than 0"), + }, + "disk_size_gi": { + Type: schema.TypeInt, + Optional: true, + Default: 20, // As suggested in UI + ForceNew: true, + Description: "Disk size, in Gibibytes, for the control plane nodes", + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), + }, + "sizing_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Sizing policy for the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "VM Placement policy for the control plane nodes", + }, + "vgpu_policy_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "vGPU policy for the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Storage profile for the control plane nodes", + }, + }, }, }, - }, - }, - "default_storage_class": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "storage_profile_id": { - Required: true, - Type: schema.TypeString, - Description: "ID of the storage profile to use for the storage class", - }, - "name": { - Required: true, - Type: schema.TypeString, - Description: "Name to give to this storage class", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ - "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), - }, - "reclaim_policy": { - Required: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), - Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", - }, - "filesystem": { - Required: true, - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), - Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", + "default_storage_class": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_profile_id": { + Required: true, + Type: schema.TypeString, + Description: "ID of the storage profile to use for the storage class", + }, + "name": { + Required: true, + Type: schema.TypeString, + Description: "Name to give to this storage class", + ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), + }, + "reclaim_policy": { + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), + Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", + }, + "filesystem": { + Required: true, + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), + Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", + }, + }, }, }, + "pods_cidr": { + Type: schema.TypeString, + Optional: true, + Default: "100.96.0.0/11", // As suggested in UI + Description: "CIDR that the Kubernetes pods will use", + }, + "services_cidr": { + Type: schema.TypeString, + Optional: true, + Default: "100.64.0.0/13", // As suggested in UI + Description: "CIDR that the Kubernetes services will use", + }, + "virtual_ip_subnet": { + Type: schema.TypeString, + Optional: true, + Description: "Virtual IP subnet for the cluster", + }, + "auto_repair_on_errors": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", + }, + "node_health_check": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", + }, + "create_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + Description: "The time, in minutes, to wait for the cluster to be completely created, with a ready-to-use Kubeconfig. 0 means wait indefinitely", + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), + }, + "delete_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 10, + Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", + ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the cluster, can be 'provisioning', 'provisioned', 'deleting' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", + }, + "raw_cluster_rde_json": { + Type: schema.TypeString, + Computed: true, + Description: "The raw JSON that describes the cluster configuration inside the Runtime Defined Entity", + }, }, - }, - "pods_cidr": { - Type: schema.TypeString, - Optional: true, - Default: "100.96.0.0/11", // As suggested in UI - Description: "CIDR that the Kubernetes pods will use", - }, - "services_cidr": { - Type: schema.TypeString, - Optional: true, - Default: "100.64.0.0/13", // As suggested in UI - Description: "CIDR that the Kubernetes services will use", - }, - "virtual_ip_subnet": { - Type: schema.TypeString, - Optional: true, - Description: "Virtual IP subnet for the cluster", - }, - "auto_repair_on_errors": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", - }, - "node_health_check": { - Type: schema.TypeBool, - Optional: true, - Default: false, - Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", - }, - "create_timeout_minutes": { - Type: schema.TypeInt, - Optional: true, - Default: 60, - Description: "The time, in minutes, to wait for the cluster to be completely created, with a ready-to-use Kubeconfig. 0 means wait indefinitely", - ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), - }, - "delete_timeout_minutes": { - Type: schema.TypeInt, - Optional: true, - Default: 10, - Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", - ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), - }, - "state": { - Type: schema.TypeString, - Computed: true, - Description: "The state of the cluster, can be 'provisioning', 'provisioned', 'deleting' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", - }, - "kubeconfig": { - Type: schema.TypeString, - Computed: true, - Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", - }, - "raw_cluster_rde_json": { - Type: schema.TypeString, - Computed: true, - Description: "The raw JSON that describes the cluster configuration inside the Runtime Defined Entity", - }, -} - -func resourceVcdCseKubernetesCluster() *schema.Resource { - return &schema.Resource{ - CreateContext: resourceVcdCseKubernetesClusterCreate, - ReadContext: resourceVcdCseKubernetesRead, - UpdateContext: resourceVcdCseKubernetesUpdate, - DeleteContext: resourceVcdCseKubernetesDelete, - Schema: resourceVcdCseKubernetesClusterSchema, } } -// getCseRdeTypeVersions gets the RDE Type versions. First returned parameter is VCDKEConfig, second is CAPVCDCluster, third is CAPVCD Behavior version -func getCseRdeTypeVersions(d *schema.ResourceData) (string, string, string) { - versions := cseVersions[d.Get("cse_version").(string)] - return versions[0], versions[1], versions[2] -} - func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion, _ := getCseRdeTypeVersions(d) - - clusterDetails, err := getClusterCreateDto(d, vcdClient, vcdKeConfigRdeTypeVersion, capvcdClusterRdeTypeVersion) + clusterDetails, err := getClusterCreateDto(d, vcdClient) if err != nil { return diag.Errorf("could not create Kubernetes cluster: %s", err) } @@ -427,7 +415,7 @@ func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) var diags diag.Diagnostics - _, _, capvcdBehaviorVersion := getCseRdeTypeVersions(d) + behaviorVersion := supportedCseVersions[d.Get("cse_version").(string)][2] // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be // many clusters with the same name and RDE Type. @@ -446,7 +434,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met if state == "provisioned" { // This can only be done if the cluster is in 'provisioned' state invocationResult := map[string]interface{}{} - err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", capvcdBehaviorVersion), types.BehaviorInvocation{}, invocationResult) + err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", behaviorVersion), types.BehaviorInvocation{}, invocationResult) if err != nil { return diag.Errorf("could not invoke the behavior to obtain the Kubeconfig for the Kubernetes cluster with ID '%s': %s", d.Id(), err) } @@ -619,7 +607,12 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cr args["DefaultStorageClassFileSystem"] = d.Get("default_storage_class.0.filesystem").(string) } - capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(cseRdeJsonTemplate)) + rdeTmpl, err := getCseTemplateFile(d, "rde") + if err != nil { + return nil, err + } + + capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(rdeTmpl)) buf := &bytes.Buffer{} if err := capvcdEmpty.Execute(buf, args); err != nil { return nil, fmt.Errorf("could not render the Go template with the CAPVCD JSON: %s", err) @@ -636,7 +629,12 @@ func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *cr // generateNodePoolYaml generates YAML blocks corresponding to the Kubernetes node pools. func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { - nodePoolEmptyTmpl := template.Must(template.New(clusterDetails.Name + "_NodePool").Parse(cseNodePoolTemplate)) + nodePoolTmpl, err := getCseTemplateFile(d, "capiyaml_nodepool") + if err != nil { + return "", err + } + + nodePoolEmptyTmpl := template.Must(template.New(clusterDetails.Name + "-node-pool").Parse(nodePoolTmpl)) resultYaml := "" buf := &bytes.Buffer{} @@ -652,7 +650,7 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterD return "", fmt.Errorf("the node pool '%s' should have either a Placement Policy or a vGPU Policy, not both", name) } if vpguPolicyId != "" { - placementPolicyId = vpguPolicyId // For convenience, we just use one of them as both cannot be set at same time + placementPolicyId = vpguPolicyId // For convenience, we just use one of the variables as both cannot be set at same time } if err := nodePoolEmptyTmpl.Execute(buf, map[string]string{ @@ -662,7 +660,7 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterD "Catalog": clusterDetails.CatalogName, "VAppTemplate": clusterDetails.OvaName, "NodePoolSizingPolicy": clusterDetails.UrnToNamesCache[nodePool["sizing_policy_id"].(string)], - "NodePoolPlacementPolicy": clusterDetails.UrnToNamesCache[placementPolicyId.(string)], + "NodePoolPlacementPolicy": clusterDetails.UrnToNamesCache[placementPolicyId.(string)], // Can be either Placement or vGPU "NodePoolStorageProfile": clusterDetails.UrnToNamesCache[nodePool["storage_profile_id"].(string)], "NodePoolDiskSize": fmt.Sprintf("%dGi", nodePool["disk_size_gi"].(int)), "NodePoolEnableGpu": strconv.FormatBool(vpguPolicyId != ""), @@ -676,3 +674,365 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterD } return resultYaml, nil } + +// tkgVersionBundle is a type that contains all the versions of the components of +// a Kubernetes cluster that can be obtained with the vApp Template name, downloaded +// from VMware Customer connect: +// https://customerconnect.vmware.com/downloads/details?downloadGroup=TKG-240&productId=1400 +type tkgVersionBundle struct { + EtcdVersion string + CoreDnsVersion string + TkgVersion string + TkrVersion string + KubernetesVersion string +} + +// getTkgVersionBundleFromVAppTemplateName returns a tkgVersionBundle with the details of +// all the Kubernetes cluster components versions given a valid vApp Template name, that should +// correspond to a Kubernetes template. If it is not a valid vApp Template, returns an error. +func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, error) { + versionsMap := map[string]map[string]string{ + "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { + "tkg": "v2.2.0", + "etcd": "v3.5.6_vmware.9", + "coreDns": "v1.9.3_vmware.8", + }, + "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { + "tkg": "v2.4.0", + "etcd": "v3.5.7_vmware.6", + "coreDns": "v1.10.1_vmware.7", + }, + "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { + "tkg": "v2.4.0", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.10.1_vmware.7", + }, + "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { + "tkg": "v2.3.1", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.9.3_vmware.16", + }, + } + + result := tkgVersionBundle{} + + if strings.Contains(ovaName, "photon") { + return result, fmt.Errorf("the vApp Template '%s' uses Photon, and it is not supported", ovaName) + } + + cutPosition := strings.LastIndex(ovaName, "kube-") + if cutPosition < 0 { + return result, fmt.Errorf("the vApp Template '%s' is not a Kubernetes template OVA", ovaName) + } + parsedOvaName := strings.ReplaceAll(ovaName, ".ova", "")[cutPosition+len("kube-"):] + if _, ok := versionsMap[parsedOvaName]; !ok { + return result, fmt.Errorf("the Kubernetes OVA '%s' is not supported", parsedOvaName) + } + + // The map checking above guarantees that all splits and replaces will work + result.KubernetesVersion = strings.Split(parsedOvaName, "-")[0] + result.TkrVersion = strings.ReplaceAll(strings.Split(parsedOvaName, "-")[0], "+", "---") + "-" + strings.Split(parsedOvaName, "-")[1] + result.TkgVersion = versionsMap[parsedOvaName]["tkg"] + result.EtcdVersion = versionsMap[parsedOvaName]["etcd"] + result.CoreDnsVersion = versionsMap[parsedOvaName]["coreDns"] + return result, nil +} + +// createClusterDto is a helper struct that contains all the required elements to successfully create a Kubernetes cluster using CSE. +// This is useful to avoid querying VCD too much, as the Terraform configuration works mostly with IDs, but we require names, among +// other items that we eventually need to retrieve from VCD. +type createClusterDto struct { + Name string + VcdUrl string + Org *govcd.AdminOrg + VdcName string + OvaName string + CatalogName string + NetworkName string + RdeType *govcd.DefinedEntityType + UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) + VCDKEConfig struct { + MaxUnhealthyNodesPercentage string + NodeStartupTimeout string + NodeNotReadyTimeout string + NodeUnknownTimeout string + ContainerRegistryUrl string + } + TkgVersion tkgVersionBundle + Owner string + ApiToken string +} + +// getClusterCreateDto creates and returns a createClusterDto object by obtaining all the required information +// from the Terraform resource data and the target VCD. +func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createClusterDto, error) { + result := &createClusterDto{} + result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema + + name := d.Get("name").(string) + result.Name = name + + org, err := vcdClient.GetAdminOrgFromResource(d) + if err != nil { + return nil, fmt.Errorf("could not retrieve the cluster Organization: %s", err) + } + result.Org = org + + vdcId := d.Get("vdc_id").(string) + vdc, err := org.GetVDCById(vdcId, true) + if err != nil { + return nil, fmt.Errorf("could not retrieve the VDC with ID '%s': %s", vdcId, err) + } + result.VdcName = vdc.Vdc.Name + + vAppTemplateId := d.Get("ova_id").(string) + vAppTemplate, err := vcdClient.GetVAppTemplateById(vAppTemplateId) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) + } + result.OvaName = vAppTemplate.VAppTemplate.Name + + tkgVersions, err := getTkgVersionBundleFromVAppTemplateName(vAppTemplate.VAppTemplate.Name) + if err != nil { + return nil, err + } + result.TkgVersion = tkgVersions + + catalogName, err := vAppTemplate.GetCatalogName() + if err != nil { + return nil, fmt.Errorf("could not retrieve the CatalogName of the OVA '%s': %s", vAppTemplateId, err) + } + result.CatalogName = catalogName + + networkId := d.Get("network_id").(string) + network, err := vdc.GetOrgVdcNetworkById(networkId, true) + if err != nil { + return nil, fmt.Errorf("could not retrieve the Org VDC NetworkName with ID '%s': %s", networkId, err) + } + result.NetworkName = network.OrgVDCNetwork.Name + + currentCseVersion := supportedCseVersions[d.Get("cse_version").(string)] + rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", currentCseVersion[1]) + if err != nil { + return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", currentCseVersion[1], err) + } + result.RdeType = rdeType + + // Fills the cache map that relates Storage profiles IDs and Compute policies IDs (the schema uses them to build a + // healthy Terraform dependency graph) with their corresponding names (the cluster YAML and CSE in general uses names only). + // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, + // as there can be a lot of them. + for _, configBlockAttr := range []string{"default_storage_class", "control_plane", "node_pool"} { + if _, ok := d.GetOk(configBlockAttr); !ok { + continue // Some blocks are optional, this is managed by the schema constraints + } + + // The node_pool is a Set, but the others are already Lists + configBlockAsSet, isASet := d.Get(configBlockAttr).(*schema.Set) + var setOrListConfigBlock []interface{} + if isASet { + setOrListConfigBlock = configBlockAsSet.List() + } else { + setOrListConfigBlock = d.Get(configBlockAttr).([]interface{}) + } + + // For every block, we check the inner attributes to retrieve their corresponding object names, + // like Storage Profile names and Compute Policy names. If the ID is already registered, we skip it. + for _, configBlockRaw := range setOrListConfigBlock { + configBlock := configBlockRaw.(map[string]interface{}) + if id, ok := configBlock["storage_profile_id"]; ok { + if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { + storageProfile, err := vcdClient.GetStorageProfileById(id.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Storage Profile with ID '%s': %s", id, err) + } + result.UrnToNamesCache[id.(string)] = storageProfile.Name + } + } + if id, ok := configBlock["sizing_policy_id"]; ok { + if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s': %s", id, err) + } + result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + if id, ok := configBlock["vgpu_policy_id"]; ok { + if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a vGPU Policy with ID '%s': %s", id, err) + } + result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + if id, ok := configBlock["placement_policy_id"]; ok { + if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Placement Policy with ID '%s': %s", id, err) + } + result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name + } + } + } + } + + rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", currentCseVersion[0], "vcdKeConfig") + if err != nil { + return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", currentCseVersion[0], err) + } + if len(rdes) != 1 { + return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) + } + + // Obtain some required elements from the CSE Server configuration (aka VCDKEConfig), so we don't have + // to deal with it again. + type vcdKeConfigType struct { + Profiles []struct { + K8Config struct { + Mhc struct { + MaxUnhealthyNodes int `json:"maxUnhealthyNodes:omitempty"` + NodeStartupTimeout int `json:"nodeStartupTimeout:omitempty"` + NodeNotReadyTimeout int `json:"nodeNotReadyTimeout:omitempty"` + NodeUnknownTimeout int `json:"nodeUnknownTimeout:omitempty"` + } `json:"mhc:omitempty"` + } `json:"K8Config:omitempty"` + ContainerRegistryUrl string `json:"containerRegistryUrl,omitempty"` + } `json:"profiles,omitempty"` + } + + var vcdKeConfig vcdKeConfigType + rawData, err := json.Marshal(rdes[0].DefinedEntity.Entity) + if err != nil { + return nil, err + } + + err = json.Unmarshal(rawData, &vcdKeConfig) + if err != nil { + return nil, err + } + + if len(vcdKeConfig.Profiles) != 1 { + return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(vcdKeConfig.Profiles)) + } + + result.VCDKEConfig.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) + result.VCDKEConfig.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) + result.VCDKEConfig.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) + result.VCDKEConfig.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) + result.VCDKEConfig.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", vcdKeConfig.Profiles[0].ContainerRegistryUrl) + + owner, ok := d.GetOk("owner") + if !ok { + sessionInfo, err := vcdClient.Client.GetSessionInfo() + if err != nil { + return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) + } + owner = sessionInfo.User.Name + } + result.Owner = owner.(string) + + apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) + if err != nil { + return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) + } + result.ApiToken = apiToken.RefreshToken + + result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) + return result, nil +} + +// generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded +// in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the createClusterDto to +// populate several Go templates and build a final YAML. +func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { + clusterTmpl, err := getCseTemplateFile(d, "capiyaml_cluster") + if err != nil { + return "", err + } + + // This YAML snippet contains special strings, such as "%,", that render wrong using the Go template engine + sanitizedTemplate := strings.NewReplacer("%", "%%").Replace(clusterTmpl) + capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "-cluster").Parse(sanitizedTemplate)) + + nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) + if err != nil { + return "", err + } + + args := map[string]string{ + "ClusterName": clusterDetails.Name, + "TargetNamespace": clusterDetails.Name + "-ns", + "TkrVersion": clusterDetails.TkgVersion.TkrVersion, + "TkgVersion": clusterDetails.TkgVersion.TkgVersion, + "UsernameB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.Owner)), + "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.ApiToken)), + "PodCidr": d.Get("pods_cidr").(string), + "ServiceCidr": d.Get("services_cidr").(string), + "VcdSite": clusterDetails.VcdUrl, + "Org": clusterDetails.Org.AdminOrg.Name, + "OrgVdc": clusterDetails.VdcName, + "OrgVdcNetwork": clusterDetails.NetworkName, + "Catalog": clusterDetails.CatalogName, + "VAppTemplate": clusterDetails.OvaName, + "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], + "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], + "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], + "ControlPlaneDiskSize": fmt.Sprintf("%dGi", d.Get("control_plane.0.disk_size_gi").(int)), + "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), + "DnsVersion": clusterDetails.TkgVersion.CoreDnsVersion, + "EtcdVersion": clusterDetails.TkgVersion.EtcdVersion, + "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, + "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, + "SshPublicKey": d.Get("ssh_public_key").(string), + } + if _, ok := d.GetOk("control_plane.0.ip"); ok { + args["ControlPlaneEndpoint"] = d.Get("control_plane.0.ip").(string) + } + if _, ok := d.GetOk("virtual_ip_subnet"); ok { + args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) + } + if d.Get("node_health_check").(bool) { + args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly + args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix + args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix + args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix + } + + buf := &bytes.Buffer{} + if err := capiYamlEmpty.Execute(buf, args); err != nil { + return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) + } + // The final "pretty" YAML. To embed it in the final payload it must be marshaled into a one-line JSON string + prettyYaml := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) + + // This encoder is used instead of a standard json.Marshal as the YAML contains special + // characters that are not encoded properly, such as '<'. + buf.Reset() + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err = enc.Encode(prettyYaml) + if err != nil { + return "", fmt.Errorf("could not encode the CAPI YAML into JSON: %s", err) + } + + // Removes trailing quotes from the final JSON string + return strings.Trim(strings.TrimSpace(buf.String()), "\""), nil +} + +// getCseTemplateFile gets a Go template file corresponding to the CSE version set in the Terraform configuration +func getCseTemplateFile(d *schema.ResourceData, templateName string) (string, error) { + cseVersion := d.Get("cse_version").(string) + + // In the future, we can put here some logic for equivalent CSE versions, to avoid duplicating the same Go + // templates that didn't change among versions. + + t := fmt.Sprintf("cse/%s/%s.tmpl", cseVersion, templateName) + b, err := os.ReadFile(filepath.Clean(t)) + if err != nil { + return "", fmt.Errorf("error reading '%s': %s", t, err) + } + return string(b), nil +} diff --git a/vcd/cse_util_unit_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_unit_test.go similarity index 57% rename from vcd/cse_util_unit_test.go rename to vcd/resource_vcd_cse_kubernetes_cluster_unit_test.go index 40f9944df..4e744e053 100644 --- a/vcd/cse_util_unit_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_unit_test.go @@ -3,7 +3,6 @@ package vcd import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "reflect" "testing" ) @@ -63,59 +62,3 @@ func Test_getTkgVersionBundleFromVAppTemplateName(t *testing.T) { }) } } - -// Test_generateCapiYaml tests generateCapiYaml function -func Test_generateCapiYaml(t *testing.T) { - type args struct { - resourceData map[string]interface{} - clusterDetails *createClusterDto - } - tests := []struct { - name string - args args - want string - wantErr bool - }{ - { - name: "foo", - args: args{ - resourceData: map[string]interface{}{}, - clusterDetails: &createClusterDto{ - Name: "", - VcdUrl: "", - Org: nil, - VdcName: "", - OvaName: "", - CatalogName: "", - NetworkName: "", - RdeType: nil, - UrnToNamesCache: nil, - VCDKEConfig: struct { - MaxUnhealthyNodesPercentage string - NodeStartupTimeout string - NodeNotReadyTimeout string - NodeUnknownTimeout string - ContainerRegistryUrl string - }{}, - TkgVersion: tkgVersionBundle{}, - Owner: "", - ApiToken: "", - }, - }, - want: "", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - d := schema.TestResourceDataRaw(t, resourceVcdCseKubernetesClusterSchema, tt.args.resourceData) - got, err := generateCapiYaml(d, tt.args.clusterDetails) - if (err != nil) != tt.wantErr { - t.Errorf("generateCapiYaml() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("generateCapiYaml() got = %v, want %v", got, tt.want) - } - }) - } -} From bb777a019bc3ef98a166a5272a5601c4e82e8514 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 17 Jan 2024 17:32:47 +0100 Subject: [PATCH 031/156] Refactor Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 36 ++++++++-------------- 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 5c2d2c9f2..f15a5b25c 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -818,7 +818,7 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC } result.RdeType = rdeType - // Fills the cache map that relates Storage profiles IDs and Compute policies IDs (the schema uses them to build a + // Fills the cache map that relates IDs of Storage profiles and Compute policies (the schema uses them to build a // healthy Terraform dependency graph) with their corresponding names (the cluster YAML and CSE in general uses names only). // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, // as there can be a lot of them. @@ -849,32 +849,20 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC result.UrnToNamesCache[id.(string)] = storageProfile.Name } } - if id, ok := configBlock["sizing_policy_id"]; ok { - if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Sizing Policy with ID '%s': %s", id, err) - } - result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name + // The other sub-attributes are just Compute policies, we treat them the same + for _, attribute := range []string{"sizing_policy_id", "vgpu_policy_id", "placement_policy_id"} { + id, ok := configBlock[attribute] + if !ok { + continue } - } - if id, ok := configBlock["vgpu_policy_id"]; ok { - if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a vGPU Policy with ID '%s': %s", id, err) - } - result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name + if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; alreadyPresent { + continue } - } - if id, ok := configBlock["placement_policy_id"]; ok { - if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) - if err != nil { - return nil, fmt.Errorf("could not get a Placement Policy with ID '%s': %s", id, err) - } - result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name + computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) + if err != nil { + return nil, fmt.Errorf("could not get a Compute Policy with ID '%s': %s", id, err) } + result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name } } } From 7fa70bce3ba0f88b7cfef55c5fa2d1fa66c460f6 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 17 Jan 2024 17:36:28 +0100 Subject: [PATCH 032/156] Refactor Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index f15a5b25c..5743caf6f 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -826,25 +826,23 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC if _, ok := d.GetOk(configBlockAttr); !ok { continue // Some blocks are optional, this is managed by the schema constraints } - // The node_pool is a Set, but the others are already Lists - configBlockAsSet, isASet := d.Get(configBlockAttr).(*schema.Set) - var setOrListConfigBlock []interface{} - if isASet { - setOrListConfigBlock = configBlockAsSet.List() + var configBlockAsList []interface{} + if _, isASet := d.Get(configBlockAttr).(*schema.Set); isASet { + configBlockAsList = d.Get(configBlockAttr).(*schema.Set).List() } else { - setOrListConfigBlock = d.Get(configBlockAttr).([]interface{}) + configBlockAsList = d.Get(configBlockAttr).([]interface{}) } - // For every block, we check the inner attributes to retrieve their corresponding object names, + // For every existing block/list, we check the inner attributes to retrieve their corresponding object names, // like Storage Profile names and Compute Policy names. If the ID is already registered, we skip it. - for _, configBlockRaw := range setOrListConfigBlock { + for _, configBlockRaw := range configBlockAsList { configBlock := configBlockRaw.(map[string]interface{}) if id, ok := configBlock["storage_profile_id"]; ok { if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { storageProfile, err := vcdClient.GetStorageProfileById(id.(string)) if err != nil { - return nil, fmt.Errorf("could not get a Storage Profile with ID '%s': %s", id, err) + return nil, fmt.Errorf("could not get Storage Profile with ID '%s': %s", id, err) } result.UrnToNamesCache[id.(string)] = storageProfile.Name } @@ -860,7 +858,7 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC } computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) if err != nil { - return nil, fmt.Errorf("could not get a Compute Policy with ID '%s': %s", id, err) + return nil, fmt.Errorf("could not get Compute Policy with ID '%s': %s", id, err) } result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name } From fd71defa8b95f163f102b9e3fbd8344bad8bd9ad Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 17 Jan 2024 18:08:48 +0100 Subject: [PATCH 033/156] Refactor Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 57 +++++++++---------- ...esource_vcd_cse_kubernetes_cluster_test.go | 8 --- vcdTestAccVcdCseKubernetesCluster.json | 6 ++ 3 files changed, 34 insertions(+), 37 deletions(-) create mode 100644 vcdTestAccVcdCseKubernetesCluster.json diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 5743caf6f..52b02497c 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -742,25 +742,23 @@ func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, // This is useful to avoid querying VCD too much, as the Terraform configuration works mostly with IDs, but we require names, among // other items that we eventually need to retrieve from VCD. type createClusterDto struct { - Name string - VcdUrl string - Org *govcd.AdminOrg - VdcName string - OvaName string - CatalogName string - NetworkName string - RdeType *govcd.DefinedEntityType - UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) - VCDKEConfig struct { - MaxUnhealthyNodesPercentage string - NodeStartupTimeout string - NodeNotReadyTimeout string - NodeUnknownTimeout string - ContainerRegistryUrl string - } - TkgVersion tkgVersionBundle - Owner string - ApiToken string + Name string + VcdUrl string + Org *govcd.AdminOrg + VdcName string + OvaName string + CatalogName string + NetworkName string + RdeType *govcd.DefinedEntityType + UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) + MaxUnhealthyNodesPercentage string + NodeStartupTimeout string + NodeNotReadyTimeout string + NodeUnknownTimeout string + ContainerRegistryUrl string + TkgVersion tkgVersionBundle + Owner string + ApiToken string } // getClusterCreateDto creates and returns a createClusterDto object by obtaining all the required information @@ -904,11 +902,12 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(vcdKeConfig.Profiles)) } - result.VCDKEConfig.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) - result.VCDKEConfig.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) - result.VCDKEConfig.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) - result.VCDKEConfig.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) - result.VCDKEConfig.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", vcdKeConfig.Profiles[0].ContainerRegistryUrl) + result.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) + result.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) + result.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) + result.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) + // TODO: Check airgapped environments: https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1.1a/VMware-Cloud-Director-Container-Service-Extension-Install-provider-4.1.1/GUID-F00BE796-B5F2-48F2-A012-546E2E694400.html + result.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", vcdKeConfig.Profiles[0].ContainerRegistryUrl) owner, ok := d.GetOk("owner") if !ok { @@ -970,7 +969,7 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), "DnsVersion": clusterDetails.TkgVersion.CoreDnsVersion, "EtcdVersion": clusterDetails.TkgVersion.EtcdVersion, - "ContainerRegistryUrl": clusterDetails.VCDKEConfig.ContainerRegistryUrl, + "ContainerRegistryUrl": clusterDetails.ContainerRegistryUrl, "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, "SshPublicKey": d.Get("ssh_public_key").(string), } @@ -981,10 +980,10 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) } if d.Get("node_health_check").(bool) { - args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%%%", clusterDetails.VCDKEConfig.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly - args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeStartupTimeout) // With the 'second' suffix - args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeUnknownTimeout) // With the 'second' suffix - args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.VCDKEConfig.NodeNotReadyTimeout) // With the 'second' suffix + args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%%%", clusterDetails.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly + args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeStartupTimeout) // With the 'second' suffix + args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeUnknownTimeout) // With the 'second' suffix + args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeNotReadyTimeout) // With the 'second' suffix } buf := &bytes.Buffer{} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index a017f44ed..7f58af005 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -139,14 +139,6 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { storage_profile_id = data.vcd_storage_profile.sp.id } - node_pool { - name = "node-pool-2" - machine_count = 1 - disk_size_gi = 20 - sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id - storage_profile_id = data.vcd_storage_profile.sp.id - } - default_storage_class { name = "sc-1" storage_profile_id = data.vcd_storage_profile.sp.id diff --git a/vcdTestAccVcdCseKubernetesCluster.json b/vcdTestAccVcdCseKubernetesCluster.json new file mode 100644 index 000000000..a9a5e5b9d --- /dev/null +++ b/vcdTestAccVcdCseKubernetesCluster.json @@ -0,0 +1,6 @@ +{ + "token_type": "API Token", + "refresh_token": "bMw5VMERIo4MmsZFIY0ZNw7VHXsdIyIK", + "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", + "updated_on": "2024-01-17T17:41:24+01:00" + } \ No newline at end of file From b5000c4b4b1365c492ce78a43ec250b7624ff8ec Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 18 Jan 2024 11:18:51 +0100 Subject: [PATCH 034/156] Tests pass Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- ...esource_vcd_cse_kubernetes_cluster_test.go | 23 +++++++++++++++++-- vcdTestAccVcdCseKubernetesCluster.json | 6 ----- 3 files changed, 22 insertions(+), 9 deletions(-) delete mode 100644 vcdTestAccVcdCseKubernetesCluster.json diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 52b02497c..e20f1253e 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -434,7 +434,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met if state == "provisioned" { // This can only be done if the cluster is in 'provisioned' state invocationResult := map[string]interface{}{} - err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", behaviorVersion), types.BehaviorInvocation{}, invocationResult) + err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", behaviorVersion), types.BehaviorInvocation{}, &invocationResult) if err != nil { return diag.Errorf("could not invoke the behavior to obtain the Kubeconfig for the Kubernetes cluster with ID '%s': %s", d.Id(), err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 7f58af005..241250fd3 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -4,6 +4,7 @@ package vcd import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "os" "strings" "testing" @@ -48,12 +49,30 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { t.Skip(acceptanceTestsSkipped) return } + cacheId := testCachedFieldValue{} + clusterName := "vcd_cse_kubernetes_cluster.my_cluster" resource.Test(t, resource.TestCase{ ProviderFactories: testAccProviders, + CheckDestroy: func(state *terraform.State) error { + if cacheId.fieldValue == "" { + return fmt.Errorf("cached ID '%s' is empty", cacheId.fieldValue) + } + conn := testAccProvider.Meta().(*VCDClient) + _, err := conn.GetRdeById(cacheId.fieldValue) + if err == nil { + return fmt.Errorf("cluster with ID '%s' still exists", cacheId.fieldValue) + } + return nil + }, Steps: []resource.TestStep{ { Config: configText, - Check: resource.ComposeTestCheckFunc(), + Check: resource.ComposeTestCheckFunc( + cacheId.cacheTestResourceFieldValue(clusterName, "id"), + resource.TestCheckResourceAttrSet(clusterName, "id"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig")), }, }, }) @@ -109,7 +128,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}75" + name = "{{.Name}}77" file_name = "{{.TokenFile}}" allow_token_file = true } diff --git a/vcdTestAccVcdCseKubernetesCluster.json b/vcdTestAccVcdCseKubernetesCluster.json deleted file mode 100644 index a9a5e5b9d..000000000 --- a/vcdTestAccVcdCseKubernetesCluster.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "token_type": "API Token", - "refresh_token": "bMw5VMERIo4MmsZFIY0ZNw7VHXsdIyIK", - "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", - "updated_on": "2024-01-17T17:41:24+01:00" - } \ No newline at end of file From 56877a7030d9f693d1404861a9e045a5ec536c66 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 18 Jan 2024 15:56:59 +0100 Subject: [PATCH 035/156] Half implementation of update Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 252 ++++++++++-------- ...esource_vcd_cse_kubernetes_cluster_test.go | 12 +- .../r/cse_kubernetes_cluster.html.markdown | 10 +- 3 files changed, 155 insertions(+), 119 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index e20f1253e..e82876717 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -356,62 +356,6 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return resourceVcdCseKubernetesRead(ctx, d, meta) } -// waitUntilClusterIsProvisioned waits for the Kubernetes cluster to be in "provisioned" state, either indefinitely (if "create_timeout_minutes=0") -// or until this timeout is reached. If one of the states is "error", this function also checks whether "auto_repair_on_errors=true" to keep -// waiting. -func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, rdeId string) (string, error) { - var elapsed time.Duration - timeout := d.Get("create_timeout_minutes").(int) - currentState := "" - - start := time.Now() - for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever - rde, err := vcdClient.GetRdeById(rdeId) - if err != nil { - return "", err - } - currentState, err = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") - if err != nil { - util.Logger.Printf("[DEBUG] Failed getting cluster state: %s", err) - // We ignore this error, as eventually the state should be populated - } else { - - // Add some traceability in the logs and Terraform output about the progress of the cluster provisioning - eventSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") - if err == nil { - latestEvent, err := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") - if err != nil { - util.Logger.Printf("[DEBUG] waiting for cluster to be provisioned. Latest event: '%s'", latestEvent) - } - } - - switch currentState { - case "provisioned": - return currentState, nil - case "error": - // We just finish if auto-recovery is disabled, otherwise we just let CSE fixing things in background - if !d.Get("auto_repair_on_errors").(bool) { - // Try to give feedback about what went wrong, which is located in a set of events in the RDE payload - latestError := "could not parse error event" - errorSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.capvcd.errorSet") - if err == nil { - latestError, err = traverseMapAndGet[string](errorSet[len(errorSet)-1], "additionalDetails.error") - if err != nil { - latestError = "could not parse error event" - } - } - return "", fmt.Errorf("got an error and 'auto_repair_on_errors=false', aborting. Latest error: %s", latestError) - } - } - } - - elapsed = time.Since(start) - time.Sleep(50 * time.Second) - - } - return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) -} - func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) var diags diag.Diagnostics @@ -452,23 +396,22 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met }) } - jsonEntity, err := jsonToCompactString(rde.DefinedEntity.Entity) - if err != nil { - diags = append(diags, diag.Errorf("could not save the cluster '%s' raw RDE contents into 'raw_cluster_rde_json' attribute: %s", rde.DefinedEntity.ID, err)...) - } - if diags != nil && diags.HasError() { + d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability + if len(diags) > 0 { return diags } - dSet(d, "raw_cluster_rde_json", jsonEntity) - - d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability return nil } func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be + // Some arguments don't require changes in the backend + if !d.HasChangesExcept("create_timeout_minutes", "delete_timeout_minutes") { + return nil + } + + // The ID must be already set for the update to be successful. We can't rely on GetRdesByName as there can be // many clusters with the same name and RDE Type. rde, err := vcdClient.GetRdeById(d.Id()) if err != nil { @@ -481,42 +424,89 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, if state != "provisioned" { return diag.Errorf("could not update the Kubernetes cluster with ID '%s': It is in '%s' state, but should be 'provisioned'", d.Id(), state) } - // Only OVA and pool sizes can be changed. This is guaranteed by all ForceNew flags, but it's worth it to - // double-check - if d.HasChangesExcept("ova_id", "control_plane.0.machine_count", "node_pool") { - return diag.Errorf("only the Kubernetes template or the control plane/node machine pools can be modified") - } // Gets and unmarshals the CAPI YAML to update it capiYaml, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "spec.capiYaml") if err != nil { return diag.Errorf("could not retrieve the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) } - capiMap := map[string]interface{}{} - err = yaml.Unmarshal([]byte(capiYaml), &capiMap) - if err != nil { - return diag.Errorf("could not unmarshal the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) + // TODO: Is there a simpler way? + dec := yaml.NewDecoder(bytes.NewReader([]byte(capiYaml))) + var yamlDocs []map[string]interface{} + i := 0 + for { + yamlDocs[i] = map[string]interface{}{} + if dec.Decode(&yamlDocs[i]) != nil { + break + } + i++ } // TODO: Change YAML here if d.HasChange("ova_id") { newOva := d.Get("ova_id") - _, err := vcdClient.GetVAppTemplateById(newOva.(string)) + ova, err := vcdClient.GetVAppTemplateById(newOva.(string)) if err != nil { return diag.Errorf("could not retrieve the new Kubernetes OVA with ID '%s': %s", newOva, err) } // TODO: Check whether the update can be performed + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "VCDMachineTemplate" { + yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["template"] = ova.VAppTemplate.Name + } + } } if d.HasChange("control_plane.0.machine_count") { - util.Logger.Printf("not done but make static complains :)") + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "KubeadmControlPlane" { + yamlDoc["spec"].(map[string]interface{})["replicas"] = d.Get("control_plane.0.machine_count") + } + } } + // The pools can only be resized + nodePools := map[string]int{} if d.HasChange("node_pool") { + for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + nodePools[nodePool["name"].(string)] = nodePool["machine_count"].(int) + } + util.Logger.Printf("not done but make static complains :)") + } + + for nodePoolName, nodePoolSize := range nodePools { + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "KubeadmControlPlane" { + if yamlDoc["metadata"].(map[string]interface{})["name"] == nodePoolName { + yamlDoc["spec"].(map[string]interface{})["replicas"] = nodePoolSize + } + } + } + } + + if d.HasChange("node_health_check") { + oldNhc, newNhc := d.GetChange("node_health_check") + if oldNhc.(bool) && !newNhc.(bool) { + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "MachineHealthCheck" { + // TODO: TBD + fmt.Printf("a") + } + } + } else { + // Add the YAML block + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "MachineHealthCheck" { + // TODO: Error? Should not be there + fmt.Printf("b") + } + } + } util.Logger.Printf("not done but make static complains :)") } updatedYaml := capiYaml // FIXME rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = updatedYaml - + rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"].(map[string]interface{})["autoRepairOnErrors"] = d.Get("auto_repair_on_errors").(bool) // FIXME: This must be done with retries due to ETag clash err = rde.Update(*rde.DefinedEntity) if err != nil { @@ -675,6 +665,62 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterD return resultYaml, nil } +// waitUntilClusterIsProvisioned waits for the Kubernetes cluster to be in "provisioned" state, either indefinitely (if "create_timeout_minutes=0") +// or until this timeout is reached. If one of the states is "error", this function also checks whether "auto_repair_on_errors=true" to keep +// waiting. +func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, rdeId string) (string, error) { + var elapsed time.Duration + timeout := d.Get("create_timeout_minutes").(int) + currentState := "" + + start := time.Now() + for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever + rde, err := vcdClient.GetRdeById(rdeId) + if err != nil { + return "", err + } + currentState, err = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + if err != nil { + util.Logger.Printf("[DEBUG] Failed getting cluster state: %s", err) + // We ignore this error, as eventually the state should be populated + } else { + + // Add some traceability in the logs and Terraform output about the progress of the cluster provisioning + eventSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") + if err == nil { + latestEvent, err := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") + if err != nil { + util.Logger.Printf("[DEBUG] waiting for cluster to be provisioned. Latest event: '%s'", latestEvent) + } + } + + switch currentState { + case "provisioned": + return currentState, nil + case "error": + // We just finish if auto-recovery is disabled, otherwise we just let CSE fixing things in background + if !d.Get("auto_repair_on_errors").(bool) { + // Try to give feedback about what went wrong, which is located in a set of events in the RDE payload + latestError := "could not parse error event" + errorSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.capvcd.errorSet") + if err == nil { + latestError, err = traverseMapAndGet[string](errorSet[len(errorSet)-1], "additionalDetails.error") + if err != nil { + latestError = "could not parse error event" + } + } + return "", fmt.Errorf("got an error and 'auto_repair_on_errors=false', aborting. Latest error: %s", latestError) + } + } + } + + elapsed = time.Since(start) + time.Sleep(50 * time.Second) + + } + return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) +} + // tkgVersionBundle is a type that contains all the versions of the components of // a Kubernetes cluster that can be obtained with the vApp Template name, downloaded // from VMware Customer connect: @@ -691,6 +737,7 @@ type tkgVersionBundle struct { // all the Kubernetes cluster components versions given a valid vApp Template name, that should // correspond to a Kubernetes template. If it is not a valid vApp Template, returns an error. func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, error) { + // TODO: This should be probably a JSON file versionsMap := map[string]map[string]string{ "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { "tkg": "v2.2.0", @@ -751,7 +798,7 @@ type createClusterDto struct { NetworkName string RdeType *govcd.DefinedEntityType UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) - MaxUnhealthyNodesPercentage string + MaxUnhealthyNodesPercentage float64 NodeStartupTimeout string NodeNotReadyTimeout string NodeUnknownTimeout string @@ -871,44 +918,27 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) } - // Obtain some required elements from the CSE Server configuration (aka VCDKEConfig), so we don't have - // to deal with it again. - type vcdKeConfigType struct { - Profiles []struct { - K8Config struct { - Mhc struct { - MaxUnhealthyNodes int `json:"maxUnhealthyNodes:omitempty"` - NodeStartupTimeout int `json:"nodeStartupTimeout:omitempty"` - NodeNotReadyTimeout int `json:"nodeNotReadyTimeout:omitempty"` - NodeUnknownTimeout int `json:"nodeUnknownTimeout:omitempty"` - } `json:"mhc:omitempty"` - } `json:"K8Config:omitempty"` - ContainerRegistryUrl string `json:"containerRegistryUrl,omitempty"` - } `json:"profiles,omitempty"` - } - - var vcdKeConfig vcdKeConfigType - rawData, err := json.Marshal(rdes[0].DefinedEntity.Entity) + profiles, err := traverseMapAndGet[[]interface{}](rdes[0].DefinedEntity.Entity, "profiles") if err != nil { - return nil, err + return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a 'profiles' element: %s", err) } - - err = json.Unmarshal(rawData, &vcdKeConfig) - if err != nil { - return nil, err + if len(profiles) != 1 { + return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(profiles)) } + // TODO: Check airgapped environments: https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1.1a/VMware-Cloud-Director-Container-Service-Extension-Install-provider-4.1.1/GUID-F00BE796-B5F2-48F2-A012-546E2E694400.html + result.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", profiles[0].(map[string]interface{})["containerRegistryUrl"].(string)) - if len(vcdKeConfig.Profiles) != 1 { - return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(vcdKeConfig.Profiles)) + if _, ok := d.GetOk("node_health_check"); ok { + mhc, err := traverseMapAndGet[map[string]interface{}](profiles[0], "K8Config.mhc") + if err != nil { + return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a 'profiles[0].K8sConfig.mhc' element: %s", err) + } + result.MaxUnhealthyNodesPercentage = mhc["maxUnhealthyNodes"].(float64) + result.NodeStartupTimeout = mhc["nodeStartupTimeout"].(string) + result.NodeNotReadyTimeout = mhc["nodeUnknownTimeout"].(string) + result.NodeUnknownTimeout = mhc["nodeNotReadyTimeout"].(string) } - result.MaxUnhealthyNodesPercentage = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.MaxUnhealthyNodes) - result.NodeStartupTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeStartupTimeout) - result.NodeNotReadyTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeNotReadyTimeout) - result.NodeUnknownTimeout = strconv.Itoa(vcdKeConfig.Profiles[0].K8Config.Mhc.NodeUnknownTimeout) - // TODO: Check airgapped environments: https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1.1a/VMware-Cloud-Director-Container-Service-Extension-Install-provider-4.1.1/GUID-F00BE796-B5F2-48F2-A012-546E2E694400.html - result.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", vcdKeConfig.Profiles[0].ContainerRegistryUrl) - owner, ok := d.GetOk("owner") if !ok { sessionInfo, err := vcdClient.Client.GetSessionInfo() @@ -980,7 +1010,7 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) } if d.Get("node_health_check").(bool) { - args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%s%%%%", clusterDetails.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix, it is doubled to render the template correctly + args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%.0f%%", clusterDetails.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeStartupTimeout) // With the 'second' suffix args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeUnknownTimeout) // With the 'second' suffix args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeNotReadyTimeout) // With the 'second' suffix diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 241250fd3..8a8fb5dfb 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -67,12 +67,14 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { Steps: []resource.TestStep{ { Config: configText, - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( cacheId.cacheTestResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttrSet(clusterName, "id"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), - resource.TestCheckResourceAttrSet(clusterName, "kubeconfig")), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestCheckResourceAttrSet(clusterName, "raw_cluster_rde_json"), + ), }, }, }) @@ -128,7 +130,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}77" + name = "{{.Name}}83" file_name = "{{.TokenFile}}" allow_token_file = true } @@ -165,8 +167,8 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { filesystem = "ext4" } - auto_repair_on_errors = false - node_health_check = false + auto_repair_on_errors = true + node_health_check = true create_timeout_minutes = 0 delete_timeout_minutes = 0 diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 0f4718db0..dcd6b28d2 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -198,9 +198,13 @@ The following attributes are available for consumption as computed attributes: created and ready to use, or `error` when an error occurred. `provisioning` can only be obtained when a timeout happens during cluster creation. `error` can only be obtained either with a timeout or when `auto_repair_on_errors=false`. * `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` -* `raw_cluster_rde_json` - The raw JSON representation of this Kubernetes cluster inside the [RDE](/providers/vmware/vcd/latest/docs/resources/rde) - that CSE uses to operate the cluster -* `latest_event` - The latest event that occurred in the lifetime of the cluster + +## Updating + +Upgrading CSE version with `cse_version` is not supported as this operation would require human intervention, +as stated [in their documentation](https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1/VMware-Cloud-Director-Container-Service-Extension-Using-Tenant-4.1/GUID-092C40B4-D0BA-4B90-813F-D36929F2F395.html). + + ## Importing From f9f92223699f6f063b6fdab7a0630687106e5611 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 11:14:49 +0100 Subject: [PATCH 036/156] Cluster update impl, but not tested Signed-off-by: abarreiro --- vcd/cse/4.2/capiyaml_cluster.tmpl | 25 --- vcd/cse/4.2/capiyaml_mhc.tmpl | 22 +++ vcd/resource_vcd_cse_kubernetes_cluster.go | 171 ++++++++++++------ ...esource_vcd_cse_kubernetes_cluster_test.go | 3 +- .../r/cse_kubernetes_cluster.html.markdown | 9 +- 5 files changed, 139 insertions(+), 91 deletions(-) create mode 100644 vcd/cse/4.2/capiyaml_mhc.tmpl diff --git a/vcd/cse/4.2/capiyaml_cluster.tmpl b/vcd/cse/4.2/capiyaml_cluster.tmpl index e6065202d..16a676ae1 100644 --- a/vcd/cse/4.2/capiyaml_cluster.tmpl +++ b/vcd/cse/4.2/capiyaml_cluster.tmpl @@ -29,31 +29,6 @@ spec: kind: VCDCluster name: "{{.ClusterName}}" namespace: "{{.TargetNamespace}}" -{{- if .MaxUnhealthyNodePercentage }} ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: "{{.ClusterName}}" - namespace: "{{.TargetNamespace}}" - labels: - clusterctl.cluster.x-k8s.io: "" - clusterctl.cluster.x-k8s.io/move: "" -spec: - clusterName: "{{.ClusterName}}" - maxUnhealthy: "{{.MaxUnhealthyNodePercentage}}" - nodeStartupTimeout: "{{.NodeStartupTimeout}}" - selector: - matchLabels: - cluster.x-k8s.io/cluster-name: "{{.ClusterName}}" - unhealthyConditions: - - type: Ready - status: Unknown - timeout: "{{.NodeUnknownTimeout}}" - - type: Ready - status: "False" - timeout: "{{.NodeNotReadyTimeout}}" -{{- end }} --- apiVersion: v1 kind: Secret diff --git a/vcd/cse/4.2/capiyaml_mhc.tmpl b/vcd/cse/4.2/capiyaml_mhc.tmpl new file mode 100644 index 000000000..d31e4c3ec --- /dev/null +++ b/vcd/cse/4.2/capiyaml_mhc.tmpl @@ -0,0 +1,22 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: "{{.ClusterName}}" + namespace: "{{.TargetNamespace}}" + labels: + clusterctl.cluster.x-k8s.io: "" + clusterctl.cluster.x-k8s.io/move: "" +spec: + clusterName: "{{.ClusterName}}" + maxUnhealthy: "{{.MaxUnhealthyNodePercentage}}" + nodeStartupTimeout: "{{.NodeStartupTimeout}}" + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: "{{.ClusterName}}" + unhealthyConditions: + - type: Ready + status: Unknown + timeout: "{{.NodeUnknownTimeout}}" + - type: Ready + status: "False" + timeout: "{{.NodeNotReadyTimeout}}" \ No newline at end of file diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index e82876717..b712e1a32 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -282,18 +282,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Default: false, Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", }, - "create_timeout_minutes": { - Type: schema.TypeInt, - Optional: true, - Default: 60, - Description: "The time, in minutes, to wait for the cluster to be completely created, with a ready-to-use Kubeconfig. 0 means wait indefinitely", - ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), - }, - "delete_timeout_minutes": { - Type: schema.TypeInt, - Optional: true, - Default: 10, - Description: "The time, in minutes, to wait for the cluster to be deleted when it is marked for deletion. 0 means wait indefinitely", + "operations_timeout_minutes": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + Description: "The time, in minutes, to wait for the cluster operations to be successfully completed. For example, during cluster creation/update, it should be in `provisioned`" + + "state before the timeout is reached, otherwise the operation will return an error. For cluster deletion, this timeout" + + "specifies the time to wait until the cluster is completely deleted. Setting this argument to `0` means to wait indefinitely", ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), }, "state": { @@ -322,7 +317,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diag.Errorf("could not create Kubernetes cluster: %s", err) } - entityMap, err := getCseKubernetesClusterEntityMap(d, clusterDetails) + entityMap, err := getCseKubernetesClusterCreationPayload(d, clusterDetails) if err != nil { return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } @@ -407,7 +402,7 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, vcdClient := meta.(*VCDClient) // Some arguments don't require changes in the backend - if !d.HasChangesExcept("create_timeout_minutes", "delete_timeout_minutes") { + if !d.HasChangesExcept("operations_timeout_minutes") { return nil } @@ -442,7 +437,6 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, i++ } - // TODO: Change YAML here if d.HasChange("ova_id") { newOva := d.Get("ova_id") ova, err := vcdClient.GetVAppTemplateById(newOva.(string)) @@ -463,21 +457,15 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, } } } - // The pools can only be resized - nodePools := map[string]int{} + // The node pools can only be resized if d.HasChange("node_pool") { for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { nodePool := nodePoolRaw.(map[string]interface{}) - nodePools[nodePool["name"].(string)] = nodePool["machine_count"].(int) - } - util.Logger.Printf("not done but make static complains :)") - } - - for nodePoolName, nodePoolSize := range nodePools { - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "KubeadmControlPlane" { - if yamlDoc["metadata"].(map[string]interface{})["name"] == nodePoolName { - yamlDoc["spec"].(map[string]interface{})["replicas"] = nodePoolSize + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "KubeadmControlPlane" { + if yamlDoc["metadata"].(map[string]interface{})["name"] == nodePool["name"].(string) { + yamlDoc["spec"].(map[string]interface{})["replicas"] = nodePool["machine_count"].(int) + } } } } @@ -486,31 +474,67 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, if d.HasChange("node_health_check") { oldNhc, newNhc := d.GetChange("node_health_check") if oldNhc.(bool) && !newNhc.(bool) { - for _, yamlDoc := range yamlDocs { + toDelete := 0 + for i, yamlDoc := range yamlDocs { if yamlDoc["kind"] == "MachineHealthCheck" { - // TODO: TBD - fmt.Printf("a") + toDelete = i } } + yamlDocs[toDelete] = yamlDocs[len(yamlDocs)-1] // We delete the MachineHealthCheck block by putting the last doc in its place + yamlDocs = yamlDocs[:len(yamlDocs)-1] // Then we remove the last doc } else { // Add the YAML block - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "MachineHealthCheck" { - // TODO: Error? Should not be there - fmt.Printf("b") - } + rawYaml, err := generateMemoryHealthCheckYaml(d, nil) + if err != nil { + return diag.FromErr(err) + } + yamlBlock := map[string]interface{}{} + err = yaml.Unmarshal([]byte(rawYaml), &yamlBlock) + if err != nil { + return diag.Errorf("error updating Memory Health Check: %s", err) } + yamlDocs = append(yamlDocs, yamlBlock) } util.Logger.Printf("not done but make static complains :)") } - updatedYaml := capiYaml // FIXME - rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = updatedYaml - rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"].(map[string]interface{})["autoRepairOnErrors"] = d.Get("auto_repair_on_errors").(bool) - // FIXME: This must be done with retries due to ETag clash - err = rde.Update(*rde.DefinedEntity) + updatedYaml, err := yaml.Marshal(yamlDocs) if err != nil { - return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) + return diag.Errorf("error updating cluster: %s", err) + } + + // This must be done with retries due to the possible clash on ETags + _, err = runWithRetry( + "update cluster", + "could not update cluster", + 1*time.Minute, + nil, + func() (any, error) { + rde, err := vcdClient.GetRdeById(d.Id()) + if err != nil { + return nil, fmt.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = updatedYaml + rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"].(map[string]interface{})["autoRepairOnErrors"] = d.Get("auto_repair_on_errors").(bool) + + err = rde.Update(*rde.DefinedEntity) + if err != nil { + return nil, err + } + return nil, nil + }, + ) + if err != nil { + return diag.FromErr(err) + } + + state, err = waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) + if err != nil { + return diag.Errorf("Kubernetes cluster update failed: %s", err) + } + if state != "provisioned" { + return diag.Errorf("Kubernetes cluster update failed, cluster is not in 'provisioned' state, but '%s'", state) } return resourceVcdCseKubernetesRead(ctx, d, meta) @@ -525,9 +549,9 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m vcdKe := map[string]interface{}{} var elapsed time.Duration - timeout := d.Get("delete_timeout_minutes").(int) + timeout := d.Get("operations_timeout_minutes").(int) start := time.Now() - for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies delete_timeout_minutes=0, we wait forever + for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies operations_timeout_minutes=0, we wait forever rde, err := vcdClient.GetRdeById(d.Id()) if err != nil { if govcd.ContainsNotFound(err) { @@ -555,20 +579,21 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m } } - time.Sleep(30 * time.Second) + time.Sleep(10 * time.Second) elapsed = time.Since(start) } - // We give a hint to the user whenever possible + // We give a hint to the user about the deletion process result if len(vcdKe) >= 2 && vcdKe["markForDelete"].(bool) && vcdKe["forceDelete"].(bool) { return diag.Errorf("timeout of %d minutes reached, the cluster was successfully marked for deletion but was not removed in time", timeout) } return diag.Errorf("timeout of %d minutes reached, the cluster was not marked for deletion, please try again", timeout) } -// getCseKubernetesClusterEntityMap gets the payload for the RDE that manages the Kubernetes cluster, so it -// can be created or updated. -func getCseKubernetesClusterEntityMap(d *schema.ResourceData, clusterDetails *createClusterDto) (map[string]interface{}, error) { +// getCseKubernetesClusterCreationPayload gets the payload for the RDE that will trigger a Kubernetes cluster creation. +// It generates a valid YAML that is embedded inside the RDE JSON, then it is returned as an unmarshaled +// generic map, that allows to be sent to VCD as it is. +func getCseKubernetesClusterCreationPayload(d *schema.ResourceData, clusterDetails *createClusterDto) (map[string]interface{}, error) { capiYaml, err := generateCapiYaml(d, clusterDetails) if err != nil { return nil, err @@ -665,16 +690,44 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterD return resultYaml, nil } -// waitUntilClusterIsProvisioned waits for the Kubernetes cluster to be in "provisioned" state, either indefinitely (if "create_timeout_minutes=0") +// generateMemoryHealthCheckYaml generates a YAML block corresponding to the Kubernetes memory health check. +func generateMemoryHealthCheckYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { + if !d.Get("node_health_check").(bool) { + return "", nil + } + + mhcTmpl, err := getCseTemplateFile(d, "capiyaml_mhc") + if err != nil { + return "", err + } + + mhcEmptyTmpl := template.Must(template.New(clusterDetails.Name + "-mhc").Parse(mhcTmpl)) + buf := &bytes.Buffer{} + + if err := mhcEmptyTmpl.Execute(buf, map[string]string{ + "ClusterName": clusterDetails.Name, + "TargetNamespace": clusterDetails.Name + "-ns", + "MaxUnhealthyNodePercentage": fmt.Sprintf("%.0f%%", clusterDetails.MaxUnhealthyNodesPercentage), // With the 'percentage' suffix + "NodeStartupTimeout": fmt.Sprintf("%ss", clusterDetails.NodeStartupTimeout), // With the 'second' suffix + "NodeUnknownTimeout": fmt.Sprintf("%ss", clusterDetails.NodeUnknownTimeout), // With the 'second' suffix + "NodeNotReadyTimeout": fmt.Sprintf("%ss", clusterDetails.NodeNotReadyTimeout), // With the 'second' suffix + }); err != nil { + return "", fmt.Errorf("could not generate a correct Memory Health Check YAML: %s", err) + } + return fmt.Sprintf("%s\n---\n", buf.String()), nil + +} + +// waitUntilClusterIsProvisioned waits for the Kubernetes cluster to be in "provisioned" state, either indefinitely (if "operations_timeout_minutes=0") // or until this timeout is reached. If one of the states is "error", this function also checks whether "auto_repair_on_errors=true" to keep // waiting. func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, rdeId string) (string, error) { var elapsed time.Duration - timeout := d.Get("create_timeout_minutes").(int) + timeout := d.Get("operations_timeout_minutes").(int) currentState := "" start := time.Now() - for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies create_timeout_minutes=0, we wait forever + for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies operations_timeout_minutes=0, we wait forever rde, err := vcdClient.GetRdeById(rdeId) if err != nil { return "", err @@ -715,8 +768,7 @@ func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, } elapsed = time.Since(start) - time.Sleep(50 * time.Second) - + time.Sleep(30 * time.Second) } return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) } @@ -977,6 +1029,11 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) return "", err } + memoryHealthCheckYaml, err := generateMemoryHealthCheckYaml(d, clusterDetails) + if err != nil { + return "", err + } + args := map[string]string{ "ClusterName": clusterDetails.Name, "TargetNamespace": clusterDetails.Name + "-ns", @@ -1009,19 +1066,13 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) if _, ok := d.GetOk("virtual_ip_subnet"); ok { args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) } - if d.Get("node_health_check").(bool) { - args["MaxUnhealthyNodePercentage"] = fmt.Sprintf("%.0f%%", clusterDetails.MaxUnhealthyNodesPercentage) // With the 'percentage' suffix - args["NodeStartupTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeStartupTimeout) // With the 'second' suffix - args["NodeUnknownTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeUnknownTimeout) // With the 'second' suffix - args["NodeNotReadyTimeout"] = fmt.Sprintf("%ss", clusterDetails.NodeNotReadyTimeout) // With the 'second' suffix - } buf := &bytes.Buffer{} if err := capiYamlEmpty.Execute(buf, args); err != nil { return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) } // The final "pretty" YAML. To embed it in the final payload it must be marshaled into a one-line JSON string - prettyYaml := fmt.Sprintf("%s\n%s", nodePoolYaml, buf.String()) + prettyYaml := fmt.Sprintf("%s\n%s\n%s", memoryHealthCheckYaml, nodePoolYaml, buf.String()) // This encoder is used instead of a standard json.Marshal as the YAML contains special // characters that are not encoded properly, such as '<'. diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 8a8fb5dfb..50328860d 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -170,7 +170,6 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { auto_repair_on_errors = true node_health_check = true - create_timeout_minutes = 0 - delete_timeout_minutes = 0 + operations_timeout_minutes = 0 } ` diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index dcd6b28d2..d7acdb855 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -143,10 +143,11 @@ The following arguments are supported: CSE Server will automatically attempt to repair the cluster. Defaults to `false` * `node_health_check` - (Optional) After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules. Defaults to `false` -* `create_timeout_minutes` - (Optional) The time, in minutes, to wait for the cluster to be completely created, with a - ready-to-use Kubeconfig. `0` means wait indefinitely (not recommended as it could hang Terraform). Defaults to `60` -* `delete_timeout_minutes` - (Optional) The time, in minutes, to wait for the cluster to be deleted when it is marked - for deletion. `0` means wait indefinitely (not recommended as it could hang Terraform). Defaults to `10` +* `operations_timeout_minutes` - (Optional) The time, in minutes, to wait for the cluster operations to be successfully completed. + For example, during cluster creation/update, it should be in `provisioned` state before the timeout is reached, otherwise the + operation will return an error. For cluster deletion, this timeout specifies the time to wait until the cluster is completely deleted. + Setting this argument to `0` means to wait indefinitely (not recommended as it could hang Terraform if the cluster can't be created or deleted + due to a configuration error). Defaults to `60` ### Control Plane From 2cc0e9cc88d0db262e56c9344319fdb15df8c9ed Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 11:38:37 +0100 Subject: [PATCH 037/156] Fixes in update, not tested Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 105 +++++++++++++-------- 1 file changed, 64 insertions(+), 41 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index b712e1a32..f112e6e65 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -484,7 +484,11 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, yamlDocs = yamlDocs[:len(yamlDocs)-1] // Then we remove the last doc } else { // Add the YAML block - rawYaml, err := generateMemoryHealthCheckYaml(d, nil) + vcdKeConfig, err := getVcdKeConfiguration(d, vcdClient) + if err != nil { + return diag.FromErr(err) + } + rawYaml, err := generateMemoryHealthCheckYaml(d, *vcdKeConfig, d.Get("name").(string)) if err != nil { return diag.FromErr(err) } @@ -691,7 +695,7 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterD } // generateMemoryHealthCheckYaml generates a YAML block corresponding to the Kubernetes memory health check. -func generateMemoryHealthCheckYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { +func generateMemoryHealthCheckYaml(d *schema.ResourceData, vcdKeConfig vcdKeConfig, clusterName string) (string, error) { if !d.Get("node_health_check").(bool) { return "", nil } @@ -701,16 +705,16 @@ func generateMemoryHealthCheckYaml(d *schema.ResourceData, clusterDetails *creat return "", err } - mhcEmptyTmpl := template.Must(template.New(clusterDetails.Name + "-mhc").Parse(mhcTmpl)) + mhcEmptyTmpl := template.Must(template.New(clusterName + "-mhc").Parse(mhcTmpl)) buf := &bytes.Buffer{} if err := mhcEmptyTmpl.Execute(buf, map[string]string{ - "ClusterName": clusterDetails.Name, - "TargetNamespace": clusterDetails.Name + "-ns", - "MaxUnhealthyNodePercentage": fmt.Sprintf("%.0f%%", clusterDetails.MaxUnhealthyNodesPercentage), // With the 'percentage' suffix - "NodeStartupTimeout": fmt.Sprintf("%ss", clusterDetails.NodeStartupTimeout), // With the 'second' suffix - "NodeUnknownTimeout": fmt.Sprintf("%ss", clusterDetails.NodeUnknownTimeout), // With the 'second' suffix - "NodeNotReadyTimeout": fmt.Sprintf("%ss", clusterDetails.NodeNotReadyTimeout), // With the 'second' suffix + "ClusterName": clusterName, + "TargetNamespace": clusterName + "-ns", + "MaxUnhealthyNodePercentage": fmt.Sprintf("%.0f%%", vcdKeConfig.MaxUnhealthyNodesPercentage), // With the 'percentage' suffix + "NodeStartupTimeout": fmt.Sprintf("%ss", vcdKeConfig.NodeStartupTimeout), // With the 'second' suffix + "NodeUnknownTimeout": fmt.Sprintf("%ss", vcdKeConfig.NodeUnknownTimeout), // With the 'second' suffix + "NodeNotReadyTimeout": fmt.Sprintf("%ss", vcdKeConfig.NodeNotReadyTimeout), // With the 'second' suffix }); err != nil { return "", fmt.Errorf("could not generate a correct Memory Health Check YAML: %s", err) } @@ -841,23 +845,29 @@ func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, // This is useful to avoid querying VCD too much, as the Terraform configuration works mostly with IDs, but we require names, among // other items that we eventually need to retrieve from VCD. type createClusterDto struct { - Name string - VcdUrl string - Org *govcd.AdminOrg - VdcName string - OvaName string - CatalogName string - NetworkName string - RdeType *govcd.DefinedEntityType - UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) + Name string + VcdUrl string + Org *govcd.AdminOrg + VdcName string + OvaName string + CatalogName string + NetworkName string + RdeType *govcd.DefinedEntityType + UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) + VcdKeConfig vcdKeConfig + TkgVersion tkgVersionBundle + Owner string + ApiToken string +} + +// vcdKeConfig is a type that contains only the required and relevant fields from the CSE installation configuration, +// such as the Machine Health Check settings or the container registry URL. +type vcdKeConfig struct { MaxUnhealthyNodesPercentage float64 NodeStartupTimeout string NodeNotReadyTimeout string NodeUnknownTimeout string ContainerRegistryUrl string - TkgVersion tkgVersionBundle - Owner string - ApiToken string } // getClusterCreateDto creates and returns a createClusterDto object by obtaining all the required information @@ -962,6 +972,37 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC } } + vcdKeConfig, err := getVcdKeConfiguration(d, vcdClient) + if err != nil { + return nil, err + } + result.VcdKeConfig = *vcdKeConfig + + owner, ok := d.GetOk("owner") + if !ok { + sessionInfo, err := vcdClient.Client.GetSessionInfo() + if err != nil { + return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) + } + owner = sessionInfo.User.Name + } + result.Owner = owner.(string) + + apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) + if err != nil { + return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) + } + result.ApiToken = apiToken.RefreshToken + + result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) + return result, nil +} + +// getVcdKeConfiguration gets the required information from the CSE Server configuration RDE +func getVcdKeConfiguration(d *schema.ResourceData, vcdClient *VCDClient) (*vcdKeConfig, error) { + currentCseVersion := supportedCseVersions[d.Get("cse_version").(string)] + result := &vcdKeConfig{} + rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", currentCseVersion[0], "vcdKeConfig") if err != nil { return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", currentCseVersion[0], err) @@ -990,24 +1031,6 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC result.NodeNotReadyTimeout = mhc["nodeUnknownTimeout"].(string) result.NodeUnknownTimeout = mhc["nodeNotReadyTimeout"].(string) } - - owner, ok := d.GetOk("owner") - if !ok { - sessionInfo, err := vcdClient.Client.GetSessionInfo() - if err != nil { - return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) - } - owner = sessionInfo.User.Name - } - result.Owner = owner.(string) - - apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) - if err != nil { - return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) - } - result.ApiToken = apiToken.RefreshToken - - result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) return result, nil } @@ -1029,7 +1052,7 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) return "", err } - memoryHealthCheckYaml, err := generateMemoryHealthCheckYaml(d, clusterDetails) + memoryHealthCheckYaml, err := generateMemoryHealthCheckYaml(d, clusterDetails.VcdKeConfig, clusterDetails.Name) if err != nil { return "", err } @@ -1056,7 +1079,7 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), "DnsVersion": clusterDetails.TkgVersion.CoreDnsVersion, "EtcdVersion": clusterDetails.TkgVersion.EtcdVersion, - "ContainerRegistryUrl": clusterDetails.ContainerRegistryUrl, + "ContainerRegistryUrl": clusterDetails.VcdKeConfig.ContainerRegistryUrl, "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, "SshPublicKey": d.Get("ssh_public_key").(string), } From f43fb210ffd8fe8abd0f6ca441f839f79e8fca36 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 11:57:26 +0100 Subject: [PATCH 038/156] Refactor Signed-off-by: abarreiro --- vcd/cse/tkg_versions.json | 92 ++++++++++++++++++++++ vcd/resource_vcd_cse_kubernetes_cluster.go | 44 ++++------- 2 files changed, 108 insertions(+), 28 deletions(-) create mode 100644 vcd/cse/tkg_versions.json diff --git a/vcd/cse/tkg_versions.json b/vcd/cse/tkg_versions.json new file mode 100644 index 000000000..0566126b9 --- /dev/null +++ b/vcd/cse/tkg_versions.json @@ -0,0 +1,92 @@ +{ + "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { + "tkg": "v2.4.0", + "etcd": "v3.5.7_vmware.6", + "coreDns": "v1.10.1_vmware.7" + }, + "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { + "tkg": "v2.4.0", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.10.1_vmware.7" + }, + "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { + "tkg": "v2.3.1", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.9.3_vmware.16" + }, + "v1.25.13+vmware.1-tkg.1-0031669997707d1c644156b8fc31ebst": { + "tkg": "v2.4.0", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.10.1_vmware.7" + }, + "v1.25.13+vmware.1-tkg.1-6f7650434fd3787d751e8fb3c9e2153d": { + "tkg": "v2.3.1", + "etcd": "v3.5.6_vmware.20", + "coreDns": "v1.9.3_vmware.11" + }, + "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { + "tkg": "v2.2.0", + "etcd": "v3.5.6_vmware.9", + "coreDns": "v1.9.3_vmware.8" + }, + "v1.24.17+vmware.1-tkg.1-9f70d901a7d851fb115411e6790fdeae": { + "tkg": "v2.3.1", + "etcd": "v3.5.6_vmware.19", + "coreDns": "v1.8.6_vmware.26" + }, + "v1.24.11+vmware.1-tkg.1-2ccb2a001f8bd8f15f1bfbc811071830": { + "tkg": "v2.2.0", + "etcd": "v3.5.6_vmware.10", + "coreDns": "v1.8.6_vmware.18" + }, + "v1.24.10+vmware.1-tkg.1-765d418b72c247c2310384e640ee075e": { + "tkg": "v2.1.1", + "etcd": "v3.5.6_vmware.6", + "coreDns": "v1.8.6_vmware.17" + }, + "v1.23.17+vmware.1-tkg.1-ee4d95d5d08cd7f31da47d1480571754": { + "tkg": "v2.2.0", + "etcd": "v3.5.6_vmware.11", + "coreDns": "v1.8.6_vmware.19" + }, + "v1.23.16+vmware.1-tkg.1-eb0de9755338b944ea9652e6f758b3ce": { + "tkg": "v2.1.1", + "etcd": "v3.5.6_vmware.5", + "coreDns": "v1.8.6_vmware.16" + }, + "v1.22.17+vmware.1-tkg.1-df08b304658a6cf17f5e74dc0ab7543c": { + "tkg": "v2.1.1", + "etcd": "v3.5.6_vmware.1", + "coreDns": "v1.8.4_vmware.10" + }, + "v1.22.9+vmware.1-tkg.1-2182cbabee08edf480ee9bc5866d6933": { + "tkg": "v1.5.4", + "etcd": "v3.5.4_vmware.2", + "coreDns": "v1.8.4_vmware.9" + }, + "v1.21.11+vmware.1-tkg.2-d788dbbb335710c0a0d1a28670057896": { + "tkg": "v1.5.4", + "etcd": "v3.4.13_vmware.27", + "coreDns": "v1.8.0_vmware.13" + }, + "v1.21.8+vmware.1-tkg.2-ed3c93616a02968be452fe1934a1d37c": { + "tkg": "v1.4.3", + "etcd": "v3.4.13_vmware.25", + "coreDns": "v1.8.0_vmware.11" + }, + "v1.20.15+vmware.1-tkg.2-839faf7d1fa7fa356be22b72170ce1a8": { + "tkg": "v1.5.4", + "etcd": "v3.4.13_vmware.23", + "coreDns": "v1.7.0_vmware.15" + }, + "v1.20.14+vmware.1-tkg.2-5a5027ce2528a6229acb35b38ff8084e": { + "tkg": "v1.4.3", + "etcd": "v3.4.13_vmware.23", + "coreDns": "v1.7.0_vmware.15" + }, + "v1.19.16+vmware.1-tkg.2-fba68db15591c15fcd5f26b512663a42": { + "tkg": "v1.4.3", + "etcd": "v3.4.13_vmware.19", + "coreDns": "v1.7.0_vmware.15" + } +} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index f112e6e65..87cbe6291 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -793,30 +793,6 @@ type tkgVersionBundle struct { // all the Kubernetes cluster components versions given a valid vApp Template name, that should // correspond to a Kubernetes template. If it is not a valid vApp Template, returns an error. func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, error) { - // TODO: This should be probably a JSON file - versionsMap := map[string]map[string]string{ - "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { - "tkg": "v2.2.0", - "etcd": "v3.5.6_vmware.9", - "coreDns": "v1.9.3_vmware.8", - }, - "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { - "tkg": "v2.4.0", - "etcd": "v3.5.7_vmware.6", - "coreDns": "v1.10.1_vmware.7", - }, - "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { - "tkg": "v2.4.0", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.10.1_vmware.7", - }, - "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { - "tkg": "v2.3.1", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.9.3_vmware.16", - }, - } - result := tkgVersionBundle{} if strings.Contains(ovaName, "photon") { @@ -828,16 +804,28 @@ func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, return result, fmt.Errorf("the vApp Template '%s' is not a Kubernetes template OVA", ovaName) } parsedOvaName := strings.ReplaceAll(ovaName, ".ova", "")[cutPosition+len("kube-"):] - if _, ok := versionsMap[parsedOvaName]; !ok { + + b, err := os.ReadFile(filepath.Clean("cse/tkg_versions.json")) + if err != nil { + return result, fmt.Errorf("error reading cse/tkg_versions.json: %s", err) + } + + versionsMap := map[string]interface{}{} + err = json.Unmarshal(b, &versionsMap) + if err != nil { + return result, err + } + versionMap, ok := versionsMap[parsedOvaName] + if !ok { return result, fmt.Errorf("the Kubernetes OVA '%s' is not supported", parsedOvaName) } // The map checking above guarantees that all splits and replaces will work result.KubernetesVersion = strings.Split(parsedOvaName, "-")[0] result.TkrVersion = strings.ReplaceAll(strings.Split(parsedOvaName, "-")[0], "+", "---") + "-" + strings.Split(parsedOvaName, "-")[1] - result.TkgVersion = versionsMap[parsedOvaName]["tkg"] - result.EtcdVersion = versionsMap[parsedOvaName]["etcd"] - result.CoreDnsVersion = versionsMap[parsedOvaName]["coreDns"] + result.TkgVersion = versionMap.(map[string]interface{})["tkg"].(string) + result.EtcdVersion = versionMap.(map[string]interface{})["etcd"].(string) + result.CoreDnsVersion = versionMap.(map[string]interface{})["coreDns"].(string) return result, nil } From 9189d7f78bed562ac794a3c691a3f0fc25480733 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 13:28:15 +0100 Subject: [PATCH 039/156] Change local get files to remote get files, other improvements. Create works, update not tested Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 64 +++++++++++-------- ...esource_vcd_cse_kubernetes_cluster_test.go | 64 ++++++++++++++++++- ...ce_vcd_cse_kubernetes_cluster_unit_test.go | 64 ------------------- 3 files changed, 101 insertions(+), 91 deletions(-) delete mode 100644 vcd/resource_vcd_cse_kubernetes_cluster_unit_test.go diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 87cbe6291..5f32a0c11 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -16,7 +16,6 @@ import ( "github.com/vmware/go-vcloud-director/v2/util" "gopkg.in/yaml.v2" "os" - "path/filepath" "strconv" "strings" "text/template" @@ -317,7 +316,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diag.Errorf("could not create Kubernetes cluster: %s", err) } - entityMap, err := getCseKubernetesClusterCreationPayload(d, clusterDetails) + entityMap, err := getCseKubernetesClusterCreationPayload(d, vcdClient, clusterDetails) if err != nil { return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) } @@ -488,7 +487,7 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, if err != nil { return diag.FromErr(err) } - rawYaml, err := generateMemoryHealthCheckYaml(d, *vcdKeConfig, d.Get("name").(string)) + rawYaml, err := generateMemoryHealthCheckYaml(d, vcdClient, *vcdKeConfig, d.Get("name").(string)) if err != nil { return diag.FromErr(err) } @@ -552,6 +551,13 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m vcdClient := meta.(*VCDClient) vcdKe := map[string]interface{}{} + // The following loop is constantly polling VCD to retrieve the RDE, which has a big JSON inside, so we avoid filling + // the log with these big payloads. + _ = os.Setenv("GOVCD_LOG_SKIP_HTTP_RESP", "1") + defer func() { + _ = os.Unsetenv("GOVCD_LOG_SKIP_HTTP_RESP") + }() + var elapsed time.Duration timeout := d.Get("operations_timeout_minutes").(int) start := time.Now() @@ -597,8 +603,8 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m // getCseKubernetesClusterCreationPayload gets the payload for the RDE that will trigger a Kubernetes cluster creation. // It generates a valid YAML that is embedded inside the RDE JSON, then it is returned as an unmarshaled // generic map, that allows to be sent to VCD as it is. -func getCseKubernetesClusterCreationPayload(d *schema.ResourceData, clusterDetails *createClusterDto) (map[string]interface{}, error) { - capiYaml, err := generateCapiYaml(d, clusterDetails) +func getCseKubernetesClusterCreationPayload(d *schema.ResourceData, vcdClient *VCDClient, clusterDetails *createClusterDto) (map[string]interface{}, error) { + capiYaml, err := generateCapiYaml(d, vcdClient, clusterDetails) if err != nil { return nil, err } @@ -626,7 +632,7 @@ func getCseKubernetesClusterCreationPayload(d *schema.ResourceData, clusterDetai args["DefaultStorageClassFileSystem"] = d.Get("default_storage_class.0.filesystem").(string) } - rdeTmpl, err := getCseTemplateFile(d, "rde") + rdeTmpl, err := getCseTemplateFile(d, vcdClient, "rde") if err != nil { return nil, err } @@ -647,8 +653,8 @@ func getCseKubernetesClusterCreationPayload(d *schema.ResourceData, clusterDetai } // generateNodePoolYaml generates YAML blocks corresponding to the Kubernetes node pools. -func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { - nodePoolTmpl, err := getCseTemplateFile(d, "capiyaml_nodepool") +func generateNodePoolYaml(d *schema.ResourceData, vcdClient *VCDClient, clusterDetails *createClusterDto) (string, error) { + nodePoolTmpl, err := getCseTemplateFile(d, vcdClient, "capiyaml_nodepool") if err != nil { return "", err } @@ -695,12 +701,12 @@ func generateNodePoolYaml(d *schema.ResourceData, clusterDetails *createClusterD } // generateMemoryHealthCheckYaml generates a YAML block corresponding to the Kubernetes memory health check. -func generateMemoryHealthCheckYaml(d *schema.ResourceData, vcdKeConfig vcdKeConfig, clusterName string) (string, error) { +func generateMemoryHealthCheckYaml(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfig vcdKeConfig, clusterName string) (string, error) { if !d.Get("node_health_check").(bool) { return "", nil } - mhcTmpl, err := getCseTemplateFile(d, "capiyaml_mhc") + mhcTmpl, err := getCseTemplateFile(d, vcdClient, "capiyaml_mhc") if err != nil { return "", err } @@ -728,6 +734,13 @@ func generateMemoryHealthCheckYaml(d *schema.ResourceData, vcdKeConfig vcdKeConf func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, rdeId string) (string, error) { var elapsed time.Duration timeout := d.Get("operations_timeout_minutes").(int) + + // The following loop is constantly polling VCD to retrieve the RDE, which has a big JSON inside, so we avoid filling + // the log with these big payloads. + _ = os.Setenv("GOVCD_LOG_SKIP_HTTP_RESP", "1") + defer func() { + _ = os.Unsetenv("GOVCD_LOG_SKIP_HTTP_RESP") + }() currentState := "" start := time.Now() @@ -792,7 +805,7 @@ type tkgVersionBundle struct { // getTkgVersionBundleFromVAppTemplateName returns a tkgVersionBundle with the details of // all the Kubernetes cluster components versions given a valid vApp Template name, that should // correspond to a Kubernetes template. If it is not a valid vApp Template, returns an error. -func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, error) { +func getTkgVersionBundleFromVAppTemplateName(vcdClient *VCDClient, ovaName string) (tkgVersionBundle, error) { result := tkgVersionBundle{} if strings.Contains(ovaName, "photon") { @@ -805,13 +818,14 @@ func getTkgVersionBundleFromVAppTemplateName(ovaName string) (tkgVersionBundle, } parsedOvaName := strings.ReplaceAll(ovaName, ".ova", "")[cutPosition+len("kube-"):] - b, err := os.ReadFile(filepath.Clean("cse/tkg_versions.json")) + // FIXME: This points to my fork, but should point to final version!! + file, err := fileFromUrlToString(vcdClient, "https://raw.githubusercontent.com/adambarreiro/terraform-provider-vcd/add-cse-cluster-resource/vcd/cse/tkg_versions.json", "json") if err != nil { - return result, fmt.Errorf("error reading cse/tkg_versions.json: %s", err) + return result, fmt.Errorf("error reading tkg_versions.json: %s", err) } versionsMap := map[string]interface{}{} - err = json.Unmarshal(b, &versionsMap) + err = json.Unmarshal([]byte(file), &versionsMap) if err != nil { return result, err } @@ -887,7 +901,7 @@ func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createC } result.OvaName = vAppTemplate.VAppTemplate.Name - tkgVersions, err := getTkgVersionBundleFromVAppTemplateName(vAppTemplate.VAppTemplate.Name) + tkgVersions, err := getTkgVersionBundleFromVAppTemplateName(vcdClient, vAppTemplate.VAppTemplate.Name) if err != nil { return nil, err } @@ -1025,8 +1039,8 @@ func getVcdKeConfiguration(d *schema.ResourceData, vcdClient *VCDClient) (*vcdKe // generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded // in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the createClusterDto to // populate several Go templates and build a final YAML. -func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) (string, error) { - clusterTmpl, err := getCseTemplateFile(d, "capiyaml_cluster") +func generateCapiYaml(d *schema.ResourceData, vcdClient *VCDClient, clusterDetails *createClusterDto) (string, error) { + clusterTmpl, err := getCseTemplateFile(d, vcdClient, "capiyaml_cluster") if err != nil { return "", err } @@ -1035,12 +1049,12 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) sanitizedTemplate := strings.NewReplacer("%", "%%").Replace(clusterTmpl) capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "-cluster").Parse(sanitizedTemplate)) - nodePoolYaml, err := generateNodePoolYaml(d, clusterDetails) + nodePoolYaml, err := generateNodePoolYaml(d, vcdClient, clusterDetails) if err != nil { return "", err } - memoryHealthCheckYaml, err := generateMemoryHealthCheckYaml(d, clusterDetails.VcdKeConfig, clusterDetails.Name) + memoryHealthCheckYaml, err := generateMemoryHealthCheckYaml(d, vcdClient, clusterDetails.VcdKeConfig, clusterDetails.Name) if err != nil { return "", err } @@ -1100,16 +1114,14 @@ func generateCapiYaml(d *schema.ResourceData, clusterDetails *createClusterDto) } // getCseTemplateFile gets a Go template file corresponding to the CSE version set in the Terraform configuration -func getCseTemplateFile(d *schema.ResourceData, templateName string) (string, error) { +func getCseTemplateFile(d *schema.ResourceData, vcdClient *VCDClient, templateName string) (string, error) { cseVersion := d.Get("cse_version").(string) // In the future, we can put here some logic for equivalent CSE versions, to avoid duplicating the same Go // templates that didn't change among versions. - t := fmt.Sprintf("cse/%s/%s.tmpl", cseVersion, templateName) - b, err := os.ReadFile(filepath.Clean(t)) - if err != nil { - return "", fmt.Errorf("error reading '%s': %s", t, err) - } - return string(b), nil + // FIXME: This points to my fork, but should point to the final URL!! + t := fmt.Sprintf("https://raw.githubusercontent.com/adambarreiro/terraform-provider-vcd/add-cse-cluster-resource/vcd/cse/%s/%s.tmpl", cseVersion, templateName) + + return fileFromUrlToString(vcdClient, t, "tmpl") } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 50328860d..ce32686ae 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -6,8 +6,10 @@ import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "os" + "reflect" "strings" "testing" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -30,6 +32,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } }() + now := time.Now() var params = StringMap{ "Name": strings.ToLower(t.Name()), "OvaCatalog": testConfig.Cse.OvaCatalog, @@ -39,6 +42,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { "Vdc": testConfig.Cse.Vdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, + "TokenName": fmt.Sprintf("%s%d%d%d", strings.ToLower(t.Name()), now.Day(), now.Hour(), now.Minute()), "TokenFile": tokenFilename, } testParamsNotEmpty(t, params) @@ -130,7 +134,7 @@ data "vcd_storage_profile" "sp" { } resource "vcd_api_token" "token" { - name = "{{.Name}}83" + name = "{{.TokenName}}" file_name = "{{.TokenFile}}" allow_token_file = true } @@ -173,3 +177,61 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { operations_timeout_minutes = 0 } ` + +// Test_getTkgVersionBundleFromVAppTemplateName requires connectivity with GitHub, as it fetches the 'tkg_versions.json' file. +// This tests asserts that getTkgVersionBundleFromVAppTemplateName works correctly, retrieving the correct TKG versions from that file. +func Test_getTkgVersionBundleFromVAppTemplateName(t *testing.T) { + vcdClient := createSystemTemporaryVCDConnection() + tests := []struct { + name string + ovaName string + want tkgVersionBundle + wantErr string + }{ + { + name: "wrong ova name", + ovaName: "randomOVA", + want: tkgVersionBundle{}, + wantErr: "the vApp Template 'randomOVA' is not a Kubernetes template OVA", + }, + { + name: "not supported ova", + ovaName: "ubuntu-2004-kube-v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st", + want: tkgVersionBundle{}, + wantErr: "the Kubernetes OVA 'v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st' is not supported", + }, + { + name: "not supported photon ova", + ovaName: "photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46", + want: tkgVersionBundle{}, + wantErr: "the vApp Template 'photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46' uses Photon, and it is not supported", + }, + { + name: "supported ova", + ovaName: "ubuntu-2004-kube-v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8", + want: tkgVersionBundle{ + EtcdVersion: "v3.5.6_vmware.20", + CoreDnsVersion: "v1.9.3_vmware.16", + TkgVersion: "v2.3.1", + TkrVersion: "v1.26.8---vmware.1-tkg.1", + KubernetesVersion: "v1.26.8+vmware.1", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getTkgVersionBundleFromVAppTemplateName(vcdClient, tt.ovaName) + if err != nil { + if tt.wantErr == "" { + t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got error = %v, but should have not failed", err) + } + if err.Error() != tt.wantErr { + t.Fatalf("getTkgVersionBundleFromVAppTemplateName() error = %v, wantErr = %v", err, tt.wantErr) + } + } + if !reflect.DeepEqual(got, tt.want) { + t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got = %v, want = %v", got, tt.want) + } + }) + } +} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_unit_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_unit_test.go deleted file mode 100644 index 4e744e053..000000000 --- a/vcd/resource_vcd_cse_kubernetes_cluster_unit_test.go +++ /dev/null @@ -1,64 +0,0 @@ -//go:build unit || ALL - -package vcd - -import ( - "reflect" - "testing" -) - -// Test_getTkgVersionBundleFromVAppTemplateName is a unit test that tests the getTkgVersionBundleFromVAppTemplateName function -func Test_getTkgVersionBundleFromVAppTemplateName(t *testing.T) { - tests := []struct { - name string - ovaName string - want tkgVersionBundle - wantErr string - }{ - { - name: "wrong ova name", - ovaName: "randomOVA", - want: tkgVersionBundle{}, - wantErr: "the vApp Template 'randomOVA' is not a Kubernetes template OVA", - }, - { - name: "not supported ova", - ovaName: "ubuntu-2004-kube-v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st", - want: tkgVersionBundle{}, - wantErr: "the Kubernetes OVA 'v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st' is not supported", - }, - { - name: "not supported photon ova", - ovaName: "photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46", - want: tkgVersionBundle{}, - wantErr: "the vApp Template 'photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46' uses Photon, and it is not supported", - }, - { - name: "supported ova", - ovaName: "ubuntu-2004-kube-v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8", - want: tkgVersionBundle{ - EtcdVersion: "v3.5.6_vmware.20", - CoreDnsVersion: "v1.9.3_vmware.16", - TkgVersion: "v2.3.1", - TkrVersion: "v1.26.8---vmware.1-tkg.1", - KubernetesVersion: "v1.26.8+vmware.1", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getTkgVersionBundleFromVAppTemplateName(tt.ovaName) - if err != nil { - if tt.wantErr == "" { - t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got error = %v, but should have not failed", err) - } - if err.Error() != tt.wantErr { - t.Fatalf("getTkgVersionBundleFromVAppTemplateName() error = %v, wantErr = %v", err, tt.wantErr) - } - } - if !reflect.DeepEqual(got, tt.want) { - t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got = %v, want = %v", got, tt.want) - } - }) - } -} From b0432047a84218b629e74cc2b8d10bdc60a2e07c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 13:28:52 +0100 Subject: [PATCH 040/156] Change local get files to remote get files, other improvements. Create works, update not tested Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index ce32686ae..af13c639c 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -178,7 +178,8 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { } ` -// Test_getTkgVersionBundleFromVAppTemplateName requires connectivity with GitHub, as it fetches the 'tkg_versions.json' file. +// Test_getTkgVersionBundleFromVAppTemplateName requires connectivity with GitHub (that's why it's not labeled as a unit test), +// as it fetches the 'tkg_versions.json' file. // This tests asserts that getTkgVersionBundleFromVAppTemplateName works correctly, retrieving the correct TKG versions from that file. func Test_getTkgVersionBundleFromVAppTemplateName(t *testing.T) { vcdClient := createSystemTemporaryVCDConnection() From 748aab5f552a4cb96d3a6d22be6f2f8c7890f8ff Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 14:01:11 +0100 Subject: [PATCH 041/156] Fix missing ForceNews Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 5f32a0c11..16f91a6de 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -173,6 +173,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "name": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: "The name of this node pool", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), @@ -227,11 +228,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "storage_profile_id": { Required: true, + ForceNew: true, Type: schema.TypeString, Description: "ID of the storage profile to use for the storage class", }, "name": { Required: true, + ForceNew: true, Type: schema.TypeString, Description: "Name to give to this storage class", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ @@ -239,12 +242,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "reclaim_policy": { Required: true, + ForceNew: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", }, "filesystem": { Required: true, + ForceNew: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", From 14148cacfd657e898aea1bae6c48e2e156b20bc4 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 14:13:28 +0100 Subject: [PATCH 042/156] Fix few things Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 8 ++++---- .../docs/r/cse_kubernetes_cluster.html.markdown | 17 +++++++++++++---- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 16f91a6de..7ee3cd78a 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -133,8 +133,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Optional: true, Default: 20, // As suggested in UI ForceNew: true, - ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), - Description: "Disk size, in Gibibytes, for the control plane nodes. Must be at least 20", + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes (Gi) must be at least 20"), + Description: "Disk size, in Gibibytes (Gi), for the control plane nodes. Must be at least 20", }, "sizing_policy_id": { Type: schema.TypeString, @@ -190,8 +190,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Optional: true, Default: 20, // As suggested in UI ForceNew: true, - Description: "Disk size, in Gibibytes, for the control plane nodes", - ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes must be at least 20"), + Description: "Disk size, in Gibibytes (Gi), for the control plane nodes", + ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes (Gi) must be at least 20"), }, "sizing_policy_id": { Type: schema.TypeString, diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index d7acdb855..1cd6c6748 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -157,7 +157,7 @@ in every resource. This block asks for the following arguments: * `machine_count` - (Optional) The number of nodes that the control plane has. Must be an odd number and higher than `0`. Defaults to `3` -* `disk_size_gi` - (Optional) Disk size, in **Gibibytes**, for the control plane VMs. Must be at least `20`. Defaults to `20` +* `disk_size_gi` - (Optional) Disk size, in **Gibibytes (Gi)**, for the control plane VMs. Must be at least `20`. Defaults to `20` * `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation * `placement_policy_id` - (Optional) VM Placement policy for the control plane VMs * `storage_profile_id` - (Optional) Storage profile for the control plane VMs @@ -172,7 +172,7 @@ Each block asks for the following arguments: * `name` - (Required) The name of the node pool. It must contain only lowercase alphanumeric characters or "-", start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters * `machine_count` - (Optional) The number of VMs that the node pool has. Must be higher than `0`. Defaults to `1` -* `disk_size_gi` - (Optional) Disk size, in **Gibibytes**, for the node pool VMs. Must be at least `20`. Defaults to `20` +* `disk_size_gi` - (Optional) Disk size, in **Gibibytes (Gi)**, for the node pool VMs. Must be at least `20`. Defaults to `20` * `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation * `placement_policy_id` - (Optional) VM Placement policy for the node pool VMs. If this one is set, `vgpu_policy_id` must be empty * `vgpu_policy_id` - (Optional) vGPU policy for the node pool VMs. If this one is set, `placement_policy_id` must be empty @@ -202,10 +202,19 @@ The following attributes are available for consumption as computed attributes: ## Updating -Upgrading CSE version with `cse_version` is not supported as this operation would require human intervention, -as stated [in their documentation](https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1/VMware-Cloud-Director-Container-Service-Extension-Using-Tenant-4.1/GUID-092C40B4-D0BA-4B90-813F-D36929F2F395.html). +Only the following arguments can be updated: + +* `ova_id`: The cluster must allow upgrading to the new TKG version +* `machine_count` of the `control_plane`: Supports scaling up and down +* `machine_count` of any `node_pool`: Supports scaling up and down +* `auto_repair_on_errors` +* `node_health_check` +* `operations_timeout_minutes`: Does not require modifying the existing cluster +Updating any other argument will delete the existing cluster and create a new one, if the Terraform plan is applied. +Upgrading CSE version with `cse_version` is not supported as this operation would require human intervention, +as stated [in the official documentation](https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1/VMware-Cloud-Director-Container-Service-Extension-Using-Tenant-4.1/GUID-092C40B4-D0BA-4B90-813F-D36929F2F395.html). ## Importing From e4d01c50f8ace070064930f1d5dcef91c57a7395 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 14:16:15 +0100 Subject: [PATCH 043/156] Remove unused attr Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 5 ----- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 1 - 2 files changed, 6 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 7ee3cd78a..b3d08c208 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -305,11 +305,6 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Computed: true, Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", }, - "raw_cluster_rde_json": { - Type: schema.TypeString, - Computed: true, - Description: "The raw JSON that describes the cluster configuration inside the Runtime Defined Entity", - }, }, } } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index af13c639c..343787a0b 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -77,7 +77,6 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), - resource.TestCheckResourceAttrSet(clusterName, "raw_cluster_rde_json"), ), }, }, From 677328df6579862329da73a65197a28d793c40cf Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 14:33:06 +0100 Subject: [PATCH 044/156] Complete computed attributes Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 113 ++++++++++++++++-- .../r/cse_kubernetes_cluster.html.markdown | 18 ++- 2 files changed, 120 insertions(+), 11 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index b3d08c208..625b1c202 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -107,9 +107,10 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Description: "The SSH public key used to login into the cluster nodes", }, "control_plane": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Description: "Defines the control plane for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "machine_count": { @@ -157,17 +158,19 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "ip": { Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, - Description: "IP for the control plane", + Description: "IP for the control plane. It will be automatically assigned during cluster creation if left empty", ValidateFunc: checkEmptyOrSingleIP(), }, }, }, }, "node_pool": { - Type: schema.TypeSet, - Required: true, - MinItems: 1, + Type: schema.TypeSet, + Required: true, + MinItems: 1, + Description: "Defines a node pool for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -221,9 +224,10 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "default_storage_class": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Defines the default storage class for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "storage_profile_id": { @@ -295,6 +299,37 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "specifies the time to wait until the cluster is completely deleted. Setting this argument to `0` means to wait indefinitely", ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), }, + "kubernetes_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of Kubernetes installed in this cluster", + }, + "tkg_product_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of TKG installed in this cluster", + }, + "capvcd_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of CAPVCD used by this cluster", + }, + "cluster_resource_set_bindings": { + Type: schema.TypeSet, + Computed: true, + Description: "The cluster resource set bindings of this cluster", + Elem: schema.TypeString, + }, + "cpi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Cloud Provider Interface used by this cluster", + }, + "csi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Container Storage Interface used by this cluster", + }, "state": { Type: schema.TypeString, Computed: true, @@ -305,6 +340,55 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Computed: true, Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", }, + "persistent_volumes": { + Type: schema.TypeSet, + Computed: true, + Description: "A set of persistent volumes that are present in the cluster, only available when a 'default_storage_class' was provided during cluster creation", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Computed: true, + Type: schema.TypeString, + Description: "The name of the persistent volume", + }, + "status": { + Computed: true, + Type: schema.TypeString, + Description: "The status of the persistent volume", + }, + "shared": { + Computed: true, + Type: schema.TypeString, + Description: "Whether the persistent volume is shared or not", + }, + "attached_node_count": { + Computed: true, + Type: schema.TypeInt, + Description: "How many nodes are consuming the persistent volume", + }, + "iops": { + Computed: true, + Type: schema.TypeInt, + Description: "I/O operations per second for the persistent volume", + }, + "size": { + Computed: true, + Type: schema.TypeInt, + Description: "Size of the persistent volume", + }, + "storage_profile": { + Computed: true, + Type: schema.TypeString, + Description: "Storage profile name of the persistent volume", + }, + "owner": { + Computed: true, + Type: schema.TypeString, + Description: "Owner of the persistent volume", + }, + }, + }, + }, }, } } @@ -390,6 +474,15 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met }) } + // TODO: Set + // kubernetes_version + // tkg_product_version + // capvcd_version + // cluster_resource_set_bindings + // cpi_version + // csi_version + // persistent_volumes + d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability if len(diags) > 0 { return diags diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 1cd6c6748..4e3fc5b9f 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -161,7 +161,7 @@ This block asks for the following arguments: * `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation * `placement_policy_id` - (Optional) VM Placement policy for the control plane VMs * `storage_profile_id` - (Optional) Storage profile for the control plane VMs -* `ip` - (Optional) A custom IP to use for the control plane +* `ip` - (Optional) IP for the control plane. It will be automatically assigned during cluster creation if left empty ### Node Pools @@ -195,10 +195,26 @@ If defined, the block asks for the following arguments: The following attributes are available for consumption as computed attributes: +* `kubernetes_version` - The version of Kubernetes installed in this cluster +* `tkg_product_version` - The version of TKG installed in this cluster +* `capvcd_version` - The version of CAPVCD used by this cluster +* `cluster_resource_set_bindings` - The cluster resource set bindings of this cluster +* `cpi_version` - The version of the Cloud Provider Interface used by this cluster +* `csi_version` - The version of the Container Storage Interface used by this cluster * `state` - The Kubernetes cluster status, can be `provisioning` when it is being created, `provisioned` when it was successfully created and ready to use, or `error` when an error occurred. `provisioning` can only be obtained when a timeout happens during cluster creation. `error` can only be obtained either with a timeout or when `auto_repair_on_errors=false`. * `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` +* `persistent_volumes` - A set of persistent volumes that are present in the cluster, only available when a `default_storage_class` was provided during + cluster creation: + * `name` - The name of the persistent volume + * `status` - The status of the persistent volume + * `shared` - Whether the persistent volume is shared or not + * `attached_node_count` - How many nodes are consuming the persistent volume + * `iops` - I/O operations per second for the persistent volume + * `size` - Size of the persistent volume + * `storage_profile` - Storage profile name of the persistent volume + * `owner` - Owner of the persistent volume ## Updating From d3448ceb195e1cb75a3b7d096efd7dd524fedd13 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 14:33:29 +0100 Subject: [PATCH 045/156] Complete computed attributes Signed-off-by: abarreiro --- website/docs/r/cse_kubernetes_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 4e3fc5b9f..9fcf5fb9e 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -193,7 +193,7 @@ If defined, the block asks for the following arguments: ## Attribute Reference -The following attributes are available for consumption as computed attributes: +The following attributes are available for consumption as read-only attributes: * `kubernetes_version` - The version of Kubernetes installed in this cluster * `tkg_product_version` - The version of TKG installed in this cluster From 77122f1bb09fe98e2dd9fea46905804cb310a0e1 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 14:33:50 +0100 Subject: [PATCH 046/156] Complete computed attributes Signed-off-by: abarreiro --- website/docs/r/cse_kubernetes_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 9fcf5fb9e..eb9ab89af 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -193,7 +193,7 @@ If defined, the block asks for the following arguments: ## Attribute Reference -The following attributes are available for consumption as read-only attributes: +The following attributes are available for consumption as read-only attributes after a successful cluster creation: * `kubernetes_version` - The version of Kubernetes installed in this cluster * `tkg_product_version` - The version of TKG installed in this cluster From 766b990793d866ecc98bc15239934bdfb71c4a0a Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 14:41:59 +0100 Subject: [PATCH 047/156] Docs Signed-off-by: abarreiro --- .../r/cse_kubernetes_cluster.html.markdown | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index eb9ab89af..83dce6c56 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -112,6 +112,9 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { node_health_check = false } +output "kubeconfig" { + value = vcd_cse_kubernetes_cluster.my_cluster.kubeconfig +} ``` ## Argument Reference @@ -232,6 +235,24 @@ Updating any other argument will delete the existing cluster and create a new on Upgrading CSE version with `cse_version` is not supported as this operation would require human intervention, as stated [in the official documentation](https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1/VMware-Cloud-Director-Container-Service-Extension-Using-Tenant-4.1/GUID-092C40B4-D0BA-4B90-813F-D36929F2F395.html). +## Accessing the Kubernetes cluster + +To retrieve the Kubeconfig of a created cluster, you may set it as an output: + +```hcl +output "kubeconfig" { + value = vcd_cse_kubernetes_cluster.my_cluster.kubeconfig +} +``` + +Then, creating a file turns out to be trivial: + +```shell +terraform output kubeconfig > $HOME/kubeconfig +``` + +The Kubeconfig can now be used with `kubectl` and the Kubernetes cluster can be used. + ## Importing !!!!!!!!!!! TODO: NOT IMPLEMENTED. HOW TO DEAL WITH REQUIRED IDS? From 5d33a6939764f281e7dc0970cfa3b72b54dc09f4 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 15:02:01 +0100 Subject: [PATCH 048/156] Move Required to Optionals and create validator Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 69 +++++++++++++++---- .../r/cse_kubernetes_cluster.html.markdown | 17 +---- 2 files changed, 58 insertions(+), 28 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 625b1c202..09d532db8 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -42,7 +42,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "cse_version": { Type: schema.TypeString, - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, ValidateFunc: validation.StringInSlice(getKeys(supportedCseVersions), false), Description: "The CSE version to use", @@ -57,7 +57,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "name": { Type: schema.TypeString, - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Description: "The name of the Kubernetes cluster", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ @@ -65,7 +65,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "ova_id": { Type: schema.TypeString, - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", }, @@ -78,13 +78,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "vdc_id": { Type: schema.TypeString, - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Description: "The ID of the VDC that hosts the Kubernetes cluster", }, "network_id": { Type: schema.TypeString, - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Description: "The ID of the network that the Kubernetes cluster will use", }, @@ -96,7 +96,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "api_token_file": { Type: schema.TypeString, - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", }, @@ -109,7 +109,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "control_plane": { Type: schema.TypeList, MaxItems: 1, - Required: true, + Optional: true, // Required, but validated at runtime Description: "Defines the control plane for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -168,14 +168,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "node_pool": { Type: schema.TypeSet, - Required: true, - MinItems: 1, + Optional: true, // Required, but validated at runtime Description: "Defines a node pool for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Description: "The name of this node pool", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ @@ -231,13 +230,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "storage_profile_id": { - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Type: schema.TypeString, Description: "ID of the storage profile to use for the storage class", }, "name": { - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Type: schema.TypeString, Description: "Name to give to this storage class", @@ -245,14 +244,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "reclaim_policy": { - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", }, "filesystem": { - Required: true, + Optional: true, // Required, but validated at runtime ForceNew: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), @@ -393,7 +392,44 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { } } +// validateCseKubernetesClusterSchema validates all the required arguments at runtime. All the required arguments +// are marked as Optional in the schema to facilitate the Import operation, but some of them are actually mandatory. +func validateCseKubernetesClusterSchema(d *schema.ResourceData) diag.Diagnostics { + var diags diag.Diagnostics + for _, arg := range []string{"cse_version", "name", "ova_id", "vdc_id", "network_id", "api_token_file", "control_plane", "node_pool"} { + if _, ok := d.GetOk(arg); !ok { + diags = append(diags, diag.Errorf("the argument '%s' is required, but no definition was found", arg)...) + } + } + nodePoolsRaw := d.Get("node_pool").(*schema.Set).List() + for _, nodePoolRaw := range nodePoolsRaw { + nodePool := nodePoolRaw.(map[string]interface{}) + for _, arg := range []string{"name"} { + if _, ok := nodePool[arg]; !ok { + diags = append(diags, diag.Errorf("the argument 'node_pool.%s' is required, but no definition was found", arg)...) + } + } + } + if _, ok := d.GetOk("default_storage_class"); ok { + for _, arg := range []string{"storage_profile_id", "name", "reclaim_policy", "filesystem"} { + if _, ok := d.GetOk("default_storage_class.0." + arg); !ok { + diags = append(diags, diag.Errorf("the argument 'default_storage_class.%s' is required, but no definition was found", arg)...) + } + } + } + + if len(diags) > 0 { + return diags + } + return nil +} + func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + diags := validateCseKubernetesClusterSchema(d) + if diags.HasError() && len(diags) > 0 { + return diags + } + vcdClient := meta.(*VCDClient) clusterDetails, err := getClusterCreateDto(d, vcdClient) if err != nil { @@ -491,6 +527,11 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met } func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + diags := validateCseKubernetesClusterSchema(d) + if diags.HasError() && len(diags) > 0 { + return diags + } + vcdClient := meta.(*VCDClient) // Some arguments don't require changes in the backend diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 83dce6c56..0c9b7fb85 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -267,21 +267,10 @@ An example is below: # This is just a snippet of code that will host the imported cluster from VCD. # This must not be created with Terraform beforehand resource "vcd_cse_kubernetes_cluster" "imported_cluster" { - # Only the required arguments are needed - cse_version = "4.2" - name = "my-cluster" - ova_id = data.vcd_catalog_vapp_template.tkg_ova.id - org = "tenant_org" - vdc_id = data.vcd_org_vdc.vdc.id - network_id = data.vcd_network_routed_v2.routed.id - api_token_file = vcd_api_token.token.file_name - - control_plane { - - } - + # There is no need to provide any Required argument here, the Import operation + # will set them node_pool { - name = "node-pool-1" + } } ``` From 413a58f9436dc6775811d18e8047dbb8b965fef7 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 15:05:10 +0100 Subject: [PATCH 049/156] Move Required to Optionals and create validator Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 09d532db8..b014f977f 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -426,7 +426,7 @@ func validateCseKubernetesClusterSchema(d *schema.ResourceData) diag.Diagnostics func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { diags := validateCseKubernetesClusterSchema(d) - if diags.HasError() && len(diags) > 0 { + if diags != nil { return diags } @@ -528,7 +528,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { diags := validateCseKubernetesClusterSchema(d) - if diags.HasError() && len(diags) > 0 { + if diags != nil { return diags } From 4a50b8d8a72b7da1609a03c6255228ffcaa0207d Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 19 Jan 2024 15:32:11 +0100 Subject: [PATCH 050/156] Fixes in import Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 67 ++++++++++++++++++- .../r/cse_kubernetes_cluster.html.markdown | 9 +-- 2 files changed, 68 insertions(+), 8 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index b014f977f..41600b978 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -39,6 +39,9 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ReadContext: resourceVcdCseKubernetesRead, UpdateContext: resourceVcdCseKubernetesUpdate, DeleteContext: resourceVcdCseKubernetesDelete, + Importer: &schema.ResourceImporter{ + StateContext: resourceVcdCseKubernetesImport, + }, Schema: map[string]*schema.Schema{ "cse_version": { Type: schema.TypeString, @@ -317,7 +320,9 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeSet, Computed: true, Description: "The cluster resource set bindings of this cluster", - Elem: schema.TypeString, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, }, "cpi_version": { Type: schema.TypeString, @@ -734,6 +739,66 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m return diag.Errorf("timeout of %d minutes reached, the cluster was not marked for deletion, please try again", timeout) } +func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + vcdClient := meta.(*VCDClient) + + resourceURI := strings.Split(d.Id(), ImportSeparator) + var rdeId, cseVersion string + switch len(resourceURI) { + case 2: // ImportSeparator != '.' + cseVersion = resourceURI[0] + rdeId = resourceURI[1] + case 3: // ImportSeparator == '.' + cseVersion = fmt.Sprintf("%s.%s", resourceURI[0], resourceURI[1]) + rdeId = resourceURI[2] + default: + return nil, fmt.Errorf("resource name must be specified as cse_version.cluster_id") + } + + rde, err := vcdClient.GetRdeById(rdeId) + if err != nil { + return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + // TODO: Set the Optional fields that are Required at runtime + dSet(d, "cse_version", cseVersion) + dSet(d, "name", rde.DefinedEntity.Name) + dSet(d, "ova_id", rde.DefinedEntity.Name) + dSet(d, "vdc_id", rde.DefinedEntity.Name) + dSet(d, "network_id", rde.DefinedEntity.Name) + dSet(d, "api_token_file", "******") + /*err = d.Set("control_plane", []map[string]interface{}{ + { + "name": "a", + }, + }) + if err != nil { + return nil, fmt.Errorf("error importing the control plane of the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + err = d.Set("node_pool", []map[string]interface{}{ + { + "name": "node-pool-1", + }, + { + "name": "node-pool-2", + }, + }) + if err != nil { + return nil, fmt.Errorf("error importing node pools of the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + err = d.Set("default_storage_class", []map[string]interface{}{ + { + "name": "sc-1", + }, + }) + if err != nil { + return nil, fmt.Errorf("error importing node pools of the Kubernetes cluster with ID '%s': %s", d.Id(), err) + }*/ + + d.SetId(rde.DefinedEntity.ID) + return []*schema.ResourceData{d}, nil +} + // getCseKubernetesClusterCreationPayload gets the payload for the RDE that will trigger a Kubernetes cluster creation. // It generates a valid YAML that is embedded inside the RDE JSON, then it is returned as an unmarshaled // generic map, that allows to be sent to VCD as it is. diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 0c9b7fb85..b63019665 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -260,23 +260,18 @@ The Kubeconfig can now be used with `kubectl` and the Kubernetes cluster can be ~> The current implementation of Terraform import can only import resources into the state. It does not generate configuration. [More information.](https://www.terraform.io/docs/import/) -An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the Cluster (RDE) ID for it. +An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the CSE Version installed in VCD and the Cluster (RDE) ID for it. An example is below: ```hcl # This is just a snippet of code that will host the imported cluster from VCD. # This must not be created with Terraform beforehand resource "vcd_cse_kubernetes_cluster" "imported_cluster" { - # There is no need to provide any Required argument here, the Import operation - # will set them - node_pool { - - } } ``` ```sh -terraform import vcd_cse_kubernetes_cluster.imported_cluster urn:vcloud:entity:vmware:capvcdCluster:1d24af33-6e5a-4d47-a6ea-06d76f3ee5c9 +terraform import vcd_cse_kubernetes_cluster.imported_cluster 4.2.urn:vcloud:entity:vmware:capvcdCluster:1d24af33-6e5a-4d47-a6ea-06d76f3ee5c9 ``` -> The ID is required as it is the only way to unequivocally identify a Kubernetes cluster inside VCD. To obtain the ID From c6f397f6e0a9662574a821196593281cdb55c49a Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 22 Jan 2024 15:37:29 +0100 Subject: [PATCH 051/156] Start testing Update op Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 55 ++++++++++++++----- 1 file changed, 41 insertions(+), 14 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 343787a0b..35d59ef6d 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -34,20 +34,31 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { now := time.Now() var params = StringMap{ - "Name": strings.ToLower(t.Name()), - "OvaCatalog": testConfig.Cse.OvaCatalog, - "OvaName": testConfig.Cse.OvaName, - "SolutionsOrg": testConfig.Cse.SolutionsOrg, - "TenantOrg": testConfig.Cse.TenantOrg, - "Vdc": testConfig.Cse.Vdc, - "EdgeGateway": testConfig.Cse.EdgeGateway, - "Network": testConfig.Cse.RoutedNetwork, - "TokenName": fmt.Sprintf("%s%d%d%d", strings.ToLower(t.Name()), now.Day(), now.Hour(), now.Minute()), - "TokenFile": tokenFilename, + "Name": strings.ToLower(t.Name()), + "OvaCatalog": testConfig.Cse.OvaCatalog, + "OvaName": testConfig.Cse.OvaName, + "SolutionsOrg": testConfig.Cse.SolutionsOrg, + "TenantOrg": testConfig.Cse.TenantOrg, + "Vdc": testConfig.Cse.Vdc, + "EdgeGateway": testConfig.Cse.EdgeGateway, + "Network": testConfig.Cse.RoutedNetwork, + "TokenName": fmt.Sprintf("%s%d%d%d", strings.ToLower(t.Name()), now.Day(), now.Hour(), now.Minute()), + "TokenFile": tokenFilename, + "ControlPlaneCount": 1, + "NodePoolCount": 1, } testParamsNotEmpty(t, params) - configText := templateFill(testAccVcdCseKubernetesCluster, params) + step1 := templateFill(testAccVcdCseKubernetesCluster, params) + + params["FuncName"] = t.Name() + "Step2" + params["ControlPlaneCount"] = 2 + step2 := templateFill(testAccVcdCseKubernetesCluster, params) + + params["FuncName"] = t.Name() + "Step3" + params["ControlPlaneCount"] = 1 + params["NodePoolCount"] = 2 + step3 := templateFill(testAccVcdCseKubernetesCluster, params) if vcdShortTest { t.Skip(acceptanceTestsSkipped) @@ -70,7 +81,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { }, Steps: []resource.TestStep{ { - Config: configText, + Config: step1, Check: resource.ComposeAggregateTestCheckFunc( cacheId.cacheTestResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttrSet(clusterName, "id"), @@ -79,6 +90,22 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), ), }, + { + Config: step2, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + ), + }, + { + Config: step3, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + ), + }, }, }) postTestChecks(t) @@ -149,7 +176,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { api_token_file = vcd_api_token.token.file_name control_plane { - machine_count = 1 + machine_count = {{.ControlPlaneCount}} disk_size_gi = 20 sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id storage_profile_id = data.vcd_storage_profile.sp.id @@ -157,7 +184,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { node_pool { name = "node-pool-1" - machine_count = 1 + machine_count = {{.NodePoolCount}} disk_size_gi = 20 sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id storage_profile_id = data.vcd_storage_profile.sp.id From fe3b333a4af57601f6442fe60b19a3fff12f6f41 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 23 Jan 2024 13:29:13 +0100 Subject: [PATCH 052/156] Fixes Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 119 ++---------------- .../r/cse_kubernetes_cluster.html.markdown | 5 +- 2 files changed, 15 insertions(+), 109 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 41600b978..9c14d1705 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -14,7 +14,6 @@ import ( "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" "github.com/vmware/go-vcloud-director/v2/util" - "gopkg.in/yaml.v2" "os" "strconv" "strings" @@ -188,7 +187,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Optional: true, Default: 1, // As suggested in UI Description: "The number of nodes that this node pool has. Must be higher than 0", - ValidateDiagFunc: minimumValue(1, "number of nodes must be higher than 0"), + ValidateDiagFunc: minimumValue(0, "number of nodes must be higher than or equal to 0"), }, "disk_size_gi": { Type: schema.TypeInt, @@ -537,112 +536,20 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return diags } - vcdClient := meta.(*VCDClient) - // Some arguments don't require changes in the backend if !d.HasChangesExcept("operations_timeout_minutes") { return nil } - // The ID must be already set for the update to be successful. We can't rely on GetRdesByName as there can be - // many clusters with the same name and RDE Type. - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") - if err != nil { - return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - if state != "provisioned" { - return diag.Errorf("could not update the Kubernetes cluster with ID '%s': It is in '%s' state, but should be 'provisioned'", d.Id(), state) - } - - // Gets and unmarshals the CAPI YAML to update it - capiYaml, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "spec.capiYaml") + vcdClient := meta.(*VCDClient) + clusterDetails, err := getClusterCreateDto(d, vcdClient) if err != nil { - return diag.Errorf("could not retrieve the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - // TODO: Is there a simpler way? - dec := yaml.NewDecoder(bytes.NewReader([]byte(capiYaml))) - var yamlDocs []map[string]interface{} - i := 0 - for { - yamlDocs[i] = map[string]interface{}{} - if dec.Decode(&yamlDocs[i]) != nil { - break - } - i++ - } - - if d.HasChange("ova_id") { - newOva := d.Get("ova_id") - ova, err := vcdClient.GetVAppTemplateById(newOva.(string)) - if err != nil { - return diag.Errorf("could not retrieve the new Kubernetes OVA with ID '%s': %s", newOva, err) - } - // TODO: Check whether the update can be performed - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "VCDMachineTemplate" { - yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["template"] = ova.VAppTemplate.Name - } - } - } - if d.HasChange("control_plane.0.machine_count") { - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "KubeadmControlPlane" { - yamlDoc["spec"].(map[string]interface{})["replicas"] = d.Get("control_plane.0.machine_count") - } - } - } - // The node pools can only be resized - if d.HasChange("node_pool") { - for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { - nodePool := nodePoolRaw.(map[string]interface{}) - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "KubeadmControlPlane" { - if yamlDoc["metadata"].(map[string]interface{})["name"] == nodePool["name"].(string) { - yamlDoc["spec"].(map[string]interface{})["replicas"] = nodePool["machine_count"].(int) - } - } - } - } - } - - if d.HasChange("node_health_check") { - oldNhc, newNhc := d.GetChange("node_health_check") - if oldNhc.(bool) && !newNhc.(bool) { - toDelete := 0 - for i, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "MachineHealthCheck" { - toDelete = i - } - } - yamlDocs[toDelete] = yamlDocs[len(yamlDocs)-1] // We delete the MachineHealthCheck block by putting the last doc in its place - yamlDocs = yamlDocs[:len(yamlDocs)-1] // Then we remove the last doc - } else { - // Add the YAML block - vcdKeConfig, err := getVcdKeConfiguration(d, vcdClient) - if err != nil { - return diag.FromErr(err) - } - rawYaml, err := generateMemoryHealthCheckYaml(d, vcdClient, *vcdKeConfig, d.Get("name").(string)) - if err != nil { - return diag.FromErr(err) - } - yamlBlock := map[string]interface{}{} - err = yaml.Unmarshal([]byte(rawYaml), &yamlBlock) - if err != nil { - return diag.Errorf("error updating Memory Health Check: %s", err) - } - yamlDocs = append(yamlDocs, yamlBlock) - } - util.Logger.Printf("not done but make static complains :)") + return diag.Errorf("could not create Kubernetes cluster: %s", err) } - updatedYaml, err := yaml.Marshal(yamlDocs) + capiYaml, err := generateCapiYaml(d, vcdClient, clusterDetails) if err != nil { - return diag.Errorf("error updating cluster: %s", err) + return diag.FromErr(err) // TODO } // This must be done with retries due to the possible clash on ETags @@ -657,7 +564,7 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return nil, fmt.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) } - rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = updatedYaml + rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = capiYaml rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"].(map[string]interface{})["autoRepairOnErrors"] = d.Get("auto_repair_on_errors").(bool) err = rde.Update(*rde.DefinedEntity) @@ -671,14 +578,6 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } - state, err = waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) - if err != nil { - return diag.Errorf("Kubernetes cluster update failed: %s", err) - } - if state != "provisioned" { - return diag.Errorf("Kubernetes cluster update failed, cluster is not in 'provisioned' state, but '%s'", state) - } - return resourceVcdCseKubernetesRead(ctx, d, meta) } @@ -867,6 +766,10 @@ func generateNodePoolYaml(d *schema.ResourceData, vcdClient *VCDClient, clusterD nodePool := nodePoolRaw.(map[string]interface{}) name := nodePool["name"].(string) + if nodePool["machine_count"] == 0 { + return "", fmt.Errorf("the node pool '%s' should have at least 1 node", name) + } + // Check the correctness of the compute policies in the node pool block placementPolicyId := nodePool["placement_policy_id"] vpguPolicyId := nodePool["vgpu_policy_id"] diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index b63019665..739c6b78d 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -225,11 +225,14 @@ Only the following arguments can be updated: * `ova_id`: The cluster must allow upgrading to the new TKG version * `machine_count` of the `control_plane`: Supports scaling up and down -* `machine_count` of any `node_pool`: Supports scaling up and down +* `machine_count` of any `node_pool`: Supports scaling up and down. Use caution when resizing down to 0 nodes. + The cluster must always have at least 1 running node, or else the cluster will enter an unrecoverable error state. * `auto_repair_on_errors` * `node_health_check` * `operations_timeout_minutes`: Does not require modifying the existing cluster +You can also add more `node_pool` blocks to add more node pools to the cluster. + Updating any other argument will delete the existing cluster and create a new one, if the Terraform plan is applied. Upgrading CSE version with `cse_version` is not supported as this operation would require human intervention, From c36ad68e44793a471256d7b17b213cdcfa5f6dc2 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 24 Jan 2024 16:23:09 +0100 Subject: [PATCH 053/156] Progress with read Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 1 - vcd/resource_vcd_cse_kubernetes_cluster.go | 400 ++++++++++++++++----- 3 files changed, 310 insertions(+), 93 deletions(-) diff --git a/go.mod b/go.mod index 3424c0b2a..7252b7ff4 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/kr/pretty v0.2.1 github.com/vmware/go-vcloud-director/v2 v2.22.0 - gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 ) require ( diff --git a/go.sum b/go.sum index 62c13bbfa..ec1f30035 100644 --- a/go.sum +++ b/go.sum @@ -209,6 +209,5 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 9c14d1705..c38c9e158 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -14,7 +14,7 @@ import ( "github.com/vmware/go-vcloud-director/v2/govcd" "github.com/vmware/go-vcloud-director/v2/types/v56" "github.com/vmware/go-vcloud-director/v2/util" - "os" + "gopkg.in/yaml.v3" "strconv" "strings" "text/template" @@ -24,6 +24,7 @@ import ( // supportedCseVersions is a map that contains only the supported CSE versions as keys, // and its corresponding components versions as a slice of strings. The first string is the VCDKEConfig RDE Type version, // then the CAPVCD RDE Type version and finally the CAPVCD Behavior version. +// TODO: Is this really necessary? What happens in UI if I have a 1.1.0-1.2.0-1.0.0 (4.2) cluster and then CSE is updated to 4.3? var supportedCseVersions = map[string][]string{ "4.2": { "1.1.0", // VCDKEConfig RDE Type version @@ -169,7 +170,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "node_pool": { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, // Required, but validated at runtime Description: "Defines a node pool for the cluster", Elem: &schema.Resource{ @@ -405,7 +406,7 @@ func validateCseKubernetesClusterSchema(d *schema.ResourceData) diag.Diagnostics diags = append(diags, diag.Errorf("the argument '%s' is required, but no definition was found", arg)...) } } - nodePoolsRaw := d.Get("node_pool").(*schema.Set).List() + nodePoolsRaw := d.Get("node_pool").([]interface{}) for _, nodePoolRaw := range nodePoolsRaw { nodePool := nodePoolRaw.(map[string]interface{}) for _, arg := range []string{"name"} { @@ -465,10 +466,10 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour state, err := waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) if err != nil { - return diag.Errorf("Kubernetes cluster creation failed: %s", err) + return diag.Errorf("Kubernetes cluster creation finished, but it has errors: %s", err) } if state != "provisioned" { - return diag.Errorf("Kubernetes cluster creation failed, cluster is not in 'provisioned' state, but '%s'", state) + return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state: '%s'", state) } return resourceVcdCseKubernetesRead(ctx, d, meta) @@ -477,7 +478,6 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) var diags diag.Diagnostics - behaviorVersion := supportedCseVersions[d.Get("cse_version").(string)][2] // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be // many clusters with the same name and RDE Type. @@ -487,49 +487,25 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + warns, err := saveClusterDataToState(d, vcdClient, rde, d.Get("cse_version").(string)) if err != nil { - return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) + return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } - dSet(d, "state", state) - - if state == "provisioned" { - // This can only be done if the cluster is in 'provisioned' state - invocationResult := map[string]interface{}{} - err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", behaviorVersion), types.BehaviorInvocation{}, &invocationResult) - if err != nil { - return diag.Errorf("could not invoke the behavior to obtain the Kubeconfig for the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - - kubeconfig, err := traverseMapAndGet[string](invocationResult, "entity.status.capvcd.private.kubeConfig") - if err != nil { - return diag.Errorf("could not retrieve Kubeconfig for Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - dSet(d, "kubeconfig", kubeconfig) - } else { + for _, warning := range warns { diags = append(diags, diag.Diagnostic{ Severity: diag.Warning, - Summary: "Kubernetes cluster not in 'provisioned' state", - Detail: fmt.Sprintf("Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), state), + Summary: warning.Error(), }) } - // TODO: Set - // kubernetes_version - // tkg_product_version - // capvcd_version - // cluster_resource_set_bindings - // cpi_version - // csi_version - // persistent_volumes - - d.SetId(rde.DefinedEntity.ID) // ID is already there, but just for completeness/readability if len(diags) > 0 { return diags } return nil } +// resourceVcdCseKubernetesUpdate updates the Kubernetes clusters. Note that re-creating the CAPI YAML and sending it +// back will break everything, so we must patch the YAML piece by piece. func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { diags := validateCseKubernetesClusterSchema(d) if diags != nil { @@ -542,14 +518,113 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, } vcdClient := meta.(*VCDClient) - clusterDetails, err := getClusterCreateDto(d, vcdClient) + + // The ID must be already set for the update to be successful. We can't rely on GetRdesByName as there can be + // many clusters with the same name and RDE Type. + rde, err := vcdClient.GetRdeById(d.Id()) if err != nil { - return diag.Errorf("could not create Kubernetes cluster: %s", err) + return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + if err != nil { + return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + if state != "provisioned" { + return diag.Errorf("could not update the Kubernetes cluster with ID '%s': It is in '%s' state, but should be 'provisioned'", d.Id(), state) } - capiYaml, err := generateCapiYaml(d, vcdClient, clusterDetails) + // Gets and unmarshals the CAPI YAML to update it + capiYaml, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "spec.capiYaml") if err != nil { - return diag.FromErr(err) // TODO + return diag.Errorf("could not retrieve the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + // TODO: Is there a simpler way? + dec := yaml.NewDecoder(bytes.NewReader([]byte(capiYaml))) + var yamlDocs []map[string]interface{} + i := 0 + for { + yamlDocs = append(yamlDocs, map[string]interface{}{}) + if dec.Decode(&yamlDocs[i]) != nil { + break + } + i++ + } + + if d.HasChange("ova_id") { + newOva := d.Get("ova_id") + ova, err := vcdClient.GetVAppTemplateById(newOva.(string)) + if err != nil { + return diag.Errorf("could not retrieve the new Kubernetes OVA with ID '%s': %s", newOva, err) + } + // TODO: Check whether the update can be performed + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "VCDMachineTemplate" { + yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["template"] = ova.VAppTemplate.Name + } + } + } + if d.HasChange("control_plane.0.machine_count") { + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "KubeadmControlPlane" { + yamlDoc["spec"].(map[string]interface{})["replicas"] = d.Get("control_plane.0.machine_count") + } + } + } + // The node pools can only be created and resized + var newNodePools []map[string]interface{} + if d.HasChange("node_pool") { + for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + nodePool := nodePoolRaw.(map[string]interface{}) + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "MachineDeployment" { + if yamlDoc["metadata"].(map[string]interface{})["name"] == nodePool["name"].(string) { + yamlDoc["spec"].(map[string]interface{})["replicas"] = nodePool["machine_count"].(int) + } else { + // TODO: Create node pool + newNodePools = append(newNodePools, map[string]interface{}{}) + } + } + } + } + } + if len(newNodePools) > 0 { + yamlDocs = append(yamlDocs, newNodePools...) + } + + if d.HasChange("node_health_check") { + oldNhc, newNhc := d.GetChange("node_health_check") + if oldNhc.(bool) && !newNhc.(bool) { + toDelete := 0 + for i, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "MachineHealthCheck" { + toDelete = i + } + } + yamlDocs[toDelete] = yamlDocs[len(yamlDocs)-1] // We delete the MachineHealthCheck block by putting the last doc in its place + yamlDocs = yamlDocs[:len(yamlDocs)-1] // Then we remove the last doc + } else { + // Add the YAML block + vcdKeConfig, err := getVcdKeConfiguration(d, vcdClient) + if err != nil { + return diag.FromErr(err) + } + rawYaml, err := generateMemoryHealthCheckYaml(d, vcdClient, *vcdKeConfig, d.Get("name").(string)) + if err != nil { + return diag.FromErr(err) + } + yamlBlock := map[string]interface{}{} + err = yaml.Unmarshal([]byte(rawYaml), &yamlBlock) + if err != nil { + return diag.Errorf("error updating Memory Health Check: %s", err) + } + yamlDocs = append(yamlDocs, yamlBlock) + } + util.Logger.Printf("not done but make static complains :)") + } + + updatedYaml, err := yaml.Marshal(yamlDocs) + if err != nil { + return diag.Errorf("error updating cluster: %s", err) } // This must be done with retries due to the possible clash on ETags @@ -564,10 +639,11 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return nil, fmt.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) } - rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = capiYaml + rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = updatedYaml rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"].(map[string]interface{})["autoRepairOnErrors"] = d.Get("auto_repair_on_errors").(bool) - err = rde.Update(*rde.DefinedEntity) + // err = rde.Update(*rde.DefinedEntity) + util.Logger.Printf("ADAM: PERFORM UPDATE: %v", rde.DefinedEntity.Entity) if err != nil { return nil, err } @@ -578,6 +654,14 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } + state, err = waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) + if err != nil { + return diag.Errorf("Kubernetes cluster update failed: %s", err) + } + if state != "provisioned" { + return diag.Errorf("Kubernetes cluster update failed, cluster is not in 'provisioned' state, but '%s'", state) + } + return resourceVcdCseKubernetesRead(ctx, d, meta) } @@ -588,19 +672,21 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) vcdKe := map[string]interface{}{} + logHttpResponse := util.LogHttpResponse // The following loop is constantly polling VCD to retrieve the RDE, which has a big JSON inside, so we avoid filling - // the log with these big payloads. - _ = os.Setenv("GOVCD_LOG_SKIP_HTTP_RESP", "1") + // the log with these big payloads. We use defer to be sure that we restore the initial logging state. defer func() { - _ = os.Unsetenv("GOVCD_LOG_SKIP_HTTP_RESP") + util.LogHttpResponse = logHttpResponse }() var elapsed time.Duration timeout := d.Get("operations_timeout_minutes").(int) start := time.Now() for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies operations_timeout_minutes=0, we wait forever + util.LogHttpResponse = false rde, err := vcdClient.GetRdeById(d.Id()) + util.LogHttpResponse = logHttpResponse if err != nil { if govcd.ContainsNotFound(err) { return nil // The RDE is gone, so the process is completed and there's nothing more to do @@ -627,6 +713,7 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m } } + util.Logger.Printf("[DEBUG] Cluster '%s' is still not deleted, will check again in 10 seconds", d.Id()) time.Sleep(10 * time.Second) elapsed = time.Since(start) } @@ -653,49 +740,186 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m default: return nil, fmt.Errorf("resource name must be specified as cse_version.cluster_id") } + dSet(d, "cse_version", cseVersion) rde, err := vcdClient.GetRdeById(rdeId) if err != nil { return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", d.Id(), err) } - // TODO: Set the Optional fields that are Required at runtime - dSet(d, "cse_version", cseVersion) + warns, err := saveClusterDataToState(d, vcdClient, rde, cseVersion) + if err != nil { + return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", rdeId, err) + } + for _, warn := range warns { + // We can't do much here as Import does not support Diagnostics + logForScreen(rdeId, fmt.Sprintf("got a warning during import: %s", warn)) + } + + return []*schema.ResourceData{d}, nil +} + +// saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. +// Returns a slice of warnings first and an error second. +func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *govcd.DefinedEntity, cseVersion string) ([]error, error) { + var warnings []error + + d.SetId(rde.DefinedEntity.ID) dSet(d, "name", rde.DefinedEntity.Name) - dSet(d, "ova_id", rde.DefinedEntity.Name) - dSet(d, "vdc_id", rde.DefinedEntity.Name) - dSet(d, "network_id", rde.DefinedEntity.Name) - dSet(d, "api_token_file", "******") - /*err = d.Set("control_plane", []map[string]interface{}{ - { - "name": "a", - }, - }) + dSet(d, "cse_version", cseVersion) + dSet(d, "runtime", "tkg") // Only one supported + + if rde.DefinedEntity.Org == nil { + return nil, fmt.Errorf("could not retrieve Organization information from RDE") + } + adminOrg, err := vcdClient.GetAdminOrgById(rde.DefinedEntity.Org.ID) if err != nil { - return nil, fmt.Errorf("error importing the control plane of the Kubernetes cluster with ID '%s': %s", d.Id(), err) + return nil, fmt.Errorf("could not retrieve Organization with ID '%s': %s", rde.DefinedEntity.Org.ID, err) } - err = d.Set("node_pool", []map[string]interface{}{ - { - "name": "node-pool-1", - }, - { - "name": "node-pool-2", - }, - }) + if _, ok := d.GetOk("org"); ok { + // This field is optional, as it can take the value from the VCD client + dSet(d, "org", adminOrg.AdminOrg.Name) + } + + vdcName, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "metadata.virtualDataCenterName") if err != nil { - return nil, fmt.Errorf("error importing node pools of the Kubernetes cluster with ID '%s': %s", d.Id(), err) + return nil, fmt.Errorf("could not get VDC name from 'metadata.virtualDataCenterName': %s", err) } - err = d.Set("default_storage_class", []map[string]interface{}{ - { - "name": "sc-1", - }, - }) + vdc, err := adminOrg.GetVDCByName(vdcName, false) if err != nil { - return nil, fmt.Errorf("error importing node pools of the Kubernetes cluster with ID '%s': %s", d.Id(), err) - }*/ + return nil, fmt.Errorf("could not get VDC with name %s: %s", vdcName, err) + } + dSet(d, "vdc_id", vdc.Vdc.ID) - d.SetId(rde.DefinedEntity.ID) - return []*schema.ResourceData{d}, nil + if _, ok := d.GetOk("owner"); ok { + // This field is optional, as it can take the value from the VCD client + if rde.DefinedEntity.Owner == nil { + return nil, fmt.Errorf("could not retrieve Owner information from RDE") + } + dSet(d, "owner", rde.DefinedEntity.Owner.Name) + } + + if _, ok := d.GetOk("api_token_file"); !ok { + // During imports, this field is impossible to get, so we set an artificial value, as this argument + // is required at runtime + dSet(d, "api_token_file", "******") + } + + //ip, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.capvcd.clusterApiStatus.apiEndpoints.host") + //if err != nil { + // return fmt.Errorf("could not get Control Plane IP from 'status.capvcd.clusterApiStatus.apiEndpoints.host': %s", err), nil + //} + + defaultStorageClassOptions, err := traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe.defaultStorageClassOptions") + var defaultStorageClass []map[string]interface{} + if err != nil { + if !strings.Contains(err.Error(), "does not exist in input map") { + return nil, fmt.Errorf("could not get Default Storage Class options from 'spec.vcdKe.defaultStorageClassOptions': %s", err) + } + // The object does not exist, hence the cluster does not use a default storage class + } else { + reclaimPolicy := "retain" + if defaultStorageClassOptions["useDeleteReclaimPolicy"].(bool) { + reclaimPolicy = "delete" + } + + // TODO: Get Storage profile by Name + // defaultStorageClassOptions["vcdStorageProfileName"] + + defaultStorageClass = append(defaultStorageClass, map[string]interface{}{ + // "storage_profile_id": "", + "name": defaultStorageClassOptions["k8sStorageClassName"], + "reclaim_policy": reclaimPolicy, + "filesystem": defaultStorageClassOptions["filesystem"], + }) + + } + err = d.Set("default_storage_class", defaultStorageClass) + if err != nil { + return nil, fmt.Errorf("could not save 'default_storage_class': %s", err) + } + + // TODO: USE JSON!!!! + // Gets and unmarshals the CAPI YAML to update it + capiYaml, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "spec.capiYaml") + if err != nil { + return nil, fmt.Errorf("could not retrieve the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + // TODO: Is there a simpler way? + dec := yaml.NewDecoder(bytes.NewReader([]byte(capiYaml))) + var yamlDocs []map[string]interface{} + i := 0 + for { + yamlDocs = append(yamlDocs, map[string]interface{}{}) + if dec.Decode(&yamlDocs[i]) != nil { + break + } + i++ + } + + for _, yamlDoc := range yamlDocs { + if yamlDoc["kind"] == "VCDMachineTemplate" { + catalogName := yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["catalog"] + catalog, err := adminOrg.GetCatalogByName(catalogName.(string), false) + if err != nil { + return nil, fmt.Errorf("could not retrieve OVA Catalog with name %s: %s", catalogName, err) + } + templateName := yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["template"] + tmpl, err := catalog.GetVAppTemplateByName(templateName.(string)) + if err != nil { + return nil, fmt.Errorf("could not retrieve Kubernetes template with name %s: %s", templateName, err) + } + dSet(d, "ova_id", tmpl.VAppTemplate.ID) + + // If metadata.name contains "control-plane" + // - Sizing policy, placement, storage policy, count, IP + // Else: Node pools + // Respect order in the list, easy with name field + // - Name, Sizing policy, placement, vgpu policy, storage policy, count, disk_size_gi, + } else if yamlDoc["kind"] == "VCDCluster" { + networkName := yamlDoc["spec"].(map[string]interface{})["ovdcNetwork"] + network, err := vdc.GetOrgVdcNetworkByName(networkName.(string), false) + if err != nil { + return nil, fmt.Errorf("could not retrieve Network with name %s: %s", networkName, err) + } + dSet(d, "network_id", network.OrgVDCNetwork.ID) + } + } + + state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + if err != nil { + return nil, fmt.Errorf("could not read 'status.vcdKe.state' from Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + dSet(d, "state", state) + + if state == "provisioned" { + behaviorVersion := supportedCseVersions[cseVersion][2] + + // This can only be done if the cluster is in 'provisioned' state + invocationResult := map[string]interface{}{} + err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", behaviorVersion), types.BehaviorInvocation{}, &invocationResult) + if err != nil { + return nil, fmt.Errorf("could not invoke the behavior to obtain the Kubeconfig for the Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + + kubeconfig, err := traverseMapAndGet[string](invocationResult, "entity.status.capvcd.private.kubeConfig") + if err != nil { + return nil, fmt.Errorf("could not retrieve Kubeconfig for Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + dSet(d, "kubeconfig", kubeconfig) + } else { + warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), state)) + } + + // TODO: Set + // kubernetes_version + // tkg_product_version + // capvcd_version + // cluster_resource_set_bindings + // cpi_version + // csi_version + // persistent_volumes + return warnings, nil } // getCseKubernetesClusterCreationPayload gets the payload for the RDE that will trigger a Kubernetes cluster creation. @@ -762,7 +986,7 @@ func generateNodePoolYaml(d *schema.ResourceData, vcdClient *VCDClient, clusterD buf := &bytes.Buffer{} // We can have many node pool blocks, we build a YAML object for each one of them. - for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { + for _, nodePoolRaw := range d.Get("node_pool").([]interface{}) { nodePool := nodePoolRaw.(map[string]interface{}) name := nodePool["name"].(string) @@ -836,36 +1060,29 @@ func generateMemoryHealthCheckYaml(d *schema.ResourceData, vcdClient *VCDClient, func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, rdeId string) (string, error) { var elapsed time.Duration timeout := d.Get("operations_timeout_minutes").(int) + logHttpResponse := util.LogHttpResponse + sleepTime := 30 // The following loop is constantly polling VCD to retrieve the RDE, which has a big JSON inside, so we avoid filling - // the log with these big payloads. - _ = os.Setenv("GOVCD_LOG_SKIP_HTTP_RESP", "1") + // the log with these big payloads. We use defer to be sure that we restore the initial logging state. defer func() { - _ = os.Unsetenv("GOVCD_LOG_SKIP_HTTP_RESP") + util.LogHttpResponse = logHttpResponse }() currentState := "" start := time.Now() for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies operations_timeout_minutes=0, we wait forever + util.LogHttpResponse = false rde, err := vcdClient.GetRdeById(rdeId) + util.LogHttpResponse = logHttpResponse if err != nil { return "", err } currentState, err = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") if err != nil { - util.Logger.Printf("[DEBUG] Failed getting cluster state: %s", err) + util.Logger.Printf("[DEBUG] Failed getting cluster state, but will try again in %d seconds: %s", sleepTime, err) // We ignore this error, as eventually the state should be populated } else { - - // Add some traceability in the logs and Terraform output about the progress of the cluster provisioning - eventSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.vcdKe.eventSet") - if err == nil { - latestEvent, err := traverseMapAndGet[string](eventSet[len(eventSet)-1], "additionalDetails.Detailed Event") - if err != nil { - util.Logger.Printf("[DEBUG] waiting for cluster to be provisioned. Latest event: '%s'", latestEvent) - } - } - switch currentState { case "provisioned": return currentState, nil @@ -886,8 +1103,9 @@ func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, } } + util.Logger.Printf("[DEBUG] Cluster '%s' is in '%s' state, will check again in %d seconds", d.Id(), currentState, sleepTime) elapsed = time.Since(start) - time.Sleep(30 * time.Second) + time.Sleep(time.Duration(sleepTime) * time.Second) } return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) } From 3dec966d0dcf0120bb6d58fef03af95bbc390d62 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 25 Jan 2024 13:23:48 +0100 Subject: [PATCH 054/156] Progress with read/import Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 247 +++++++++++++++------ 1 file changed, 176 insertions(+), 71 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index c38c9e158..649dc8944 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -15,6 +15,7 @@ import ( "github.com/vmware/go-vcloud-director/v2/types/v56" "github.com/vmware/go-vcloud-director/v2/util" "gopkg.in/yaml.v3" + "net/url" "strconv" "strings" "text/template" @@ -759,6 +760,60 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m return []*schema.ResourceData{d}, nil } +// cseKubernetesClusterStatus is an auxiliary structure that helps to read a Kubernetes cluster information retrieved +// from CSE RDEs +type cseKubernetesClusterStatus struct { + Cpi struct { + Version string `json:"version,omitempty"` + } `json:"cpi,omitempty"` + Csi struct { + Version string `json:"version,omitempty"` + } `json:"csi,omitempty"` + Capvcd struct { + Upgrade struct { + Current struct { + TkgVersion string `json:"tkgVersion,omitempty"` + KubernetesVersion string `json:"kubernetesVersion,omitempty"` + } `json:"current,omitempty"` + } `json:"upgrade,omitempty"` + NodePool []struct { + Name string `json:"name,omitempty"` + DiskSizeMb int `json:"diskSizeMb,omitempty"` + SizingPolicy string `json:"sizingPolicy,omitempty"` + PlacementPolicy string `json:"placementPolicy,omitempty"` + NvidiaGpuEnabled bool `json:"nvidiaGpuEnabled,omitempty"` + StorageProfile string `json:"storageProfile,omitempty"` + DesiredReplicas int `json:"desiredReplicas,omitempty"` + } `json:"nodePool,omitempty"` + K8sNetwork struct { + Pods struct { + CidrBlocks []string `json:"cidrBlocks,omitempty"` + } `json:"pods,omitempty"` + Services struct { + CidrBlocks []string `json:"cidrBlocks,omitempty"` + } `json:"services,omitempty"` + } `json:"k8sNetwork,omitempty"` + CapvcdVersion string `json:"capvcdVersion,omitempty"` + VcdProperties struct { + OrgVdcs []struct { + Name string `json:"name,omitempty"` + OvdcNetworkName string `json:"ovdcNetworkName,omitempty"` + } `json:"orgVdcs,omitempty"` + Organizations []struct { + Name string `json:"name,omitempty"` + } `json:"organizations,omitempty"` + } `json:"vcdProperties,omitempty"` + ClusterResourceSetBindings []struct { + Name string `json:"name"` + } `json:"clusterResourceSetBindings,omitempty"` + ClusterApiStatus struct { + ApiEndpoints []struct { + Host string `json:"host,omitempty"` + } `json:"apiEndpoints,omitempty"` + } `json:"clusterApiStatus,omitempty"` + } `json:"capvcd,omitempty"` +} + // saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. // Returns a slice of warnings first and an error second. func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *govcd.DefinedEntity, cseVersion string) ([]error, error) { @@ -769,27 +824,51 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g dSet(d, "cse_version", cseVersion) dSet(d, "runtime", "tkg") // Only one supported - if rde.DefinedEntity.Org == nil { - return nil, fmt.Errorf("could not retrieve Organization information from RDE") + if _, ok := rde.DefinedEntity.Entity["status"]; !ok { + return nil, fmt.Errorf("could not retrieve 'status' object from Kubernetes cluster with ID '%s'", d.Id()) } - adminOrg, err := vcdClient.GetAdminOrgById(rde.DefinedEntity.Org.ID) + + var status cseKubernetesClusterStatus + b, err := json.Marshal(rde.DefinedEntity.Entity["status"]) if err != nil { - return nil, fmt.Errorf("could not retrieve Organization with ID '%s': %s", rde.DefinedEntity.Org.ID, err) + return nil, fmt.Errorf("could not marshal RDE '%s': %s", d.Id(), err) } - if _, ok := d.GetOk("org"); ok { - // This field is optional, as it can take the value from the VCD client - dSet(d, "org", adminOrg.AdminOrg.Name) + err = json.Unmarshal(b, &status) + if err != nil { + return nil, fmt.Errorf("could not unmarshal RDE '%s': %s", d.Id(), err) + } + + util.Logger.Printf("ADAM %v", status) + + // TODO CSE: Why is this a slice??? + if len(status.Capvcd.VcdProperties.Organizations) == 0 { + return nil, fmt.Errorf("expected at least one Organization in cluster '%s'", d.Id()) } - vdcName, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "metadata.virtualDataCenterName") + // This field is optional, as it can take the value from the VCD client + if _, ok := d.GetOk("org"); ok { + dSet(d, "org", status.Capvcd.VcdProperties.Organizations[0].Name) + } + adminOrg, err := vcdClient.GetAdminOrgByName(status.Capvcd.VcdProperties.Organizations[0].Name) if err != nil { - return nil, fmt.Errorf("could not get VDC name from 'metadata.virtualDataCenterName': %s", err) + return nil, fmt.Errorf("could not get Organization with name %s: %s", status.Capvcd.VcdProperties.Organizations[0].Name, err) + } + + // TODO CSE: Why is this a slice??? + if len(status.Capvcd.VcdProperties.OrgVdcs) == 0 { + return nil, fmt.Errorf("expected at least one VDC in cluster '%s': %s", d.Id(), err) } - vdc, err := adminOrg.GetVDCByName(vdcName, false) + + vdc, err := adminOrg.GetVDCByName(status.Capvcd.VcdProperties.OrgVdcs[0].Name, false) if err != nil { - return nil, fmt.Errorf("could not get VDC with name %s: %s", vdcName, err) + return nil, fmt.Errorf("could not get VDC with name %s: %s", status.Capvcd.VcdProperties.OrgVdcs[0].Name, err) } dSet(d, "vdc_id", vdc.Vdc.ID) + network, err := vdc.GetOrgVdcNetworkByName(status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, false) + if err != nil { + return nil, fmt.Errorf("could not get Org VDC Network with name %s: %s", status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, err) + } + dSet(d, "network_id", network.OrgVDCNetwork.ID) if _, ok := d.GetOk("owner"); ok { // This field is optional, as it can take the value from the VCD client @@ -805,10 +884,89 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g dSet(d, "api_token_file", "******") } - //ip, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.capvcd.clusterApiStatus.apiEndpoints.host") - //if err != nil { - // return fmt.Errorf("could not get Control Plane IP from 'status.capvcd.clusterApiStatus.apiEndpoints.host': %s", err), nil - //} + bindings := make([]string, len(status.Capvcd.ClusterResourceSetBindings)) + for i, binding := range status.Capvcd.ClusterResourceSetBindings { + bindings[i] = binding.Name + } + err = d.Set("cluster_resource_set_bindings", bindings) + if err != nil { + return nil, fmt.Errorf("could not set 'cluster_resource_set_bindings': %s", err) + } + + dSet(d, "cpi_version", status.Cpi.Version) + dSet(d, "csi_version", status.Csi.Version) + dSet(d, "capvcd_version", status.Capvcd.CapvcdVersion) + dSet(d, "kubernetes_version", status.Capvcd.Upgrade.Current.KubernetesVersion) + dSet(d, "tkg_product_version", status.Capvcd.Upgrade.Current.TkgVersion) + if len(status.Capvcd.K8sNetwork.Pods.CidrBlocks) == 0 { + return nil, fmt.Errorf("expected at least one Pod CIDR block in cluster '%s': %s", d.Id(), err) + } + dSet(d, "pods_cidr", status.Capvcd.K8sNetwork.Pods.CidrBlocks[0]) + if len(status.Capvcd.K8sNetwork.Services.CidrBlocks) == 0 { + return nil, fmt.Errorf("expected at least one Services CIDR block in cluster '%s': %s", d.Id(), err) + } + dSet(d, "services_cidr", status.Capvcd.K8sNetwork.Services.CidrBlocks[0]) + + nodePoolBlocks := make([]map[string]interface{}, len(status.Capvcd.NodePool)-1) + controlPlaneBlocks := make([]map[string]interface{}, 1) + nameToIds := map[string]string{"": ""} // Initialize with empty value + for i, nodePool := range status.Capvcd.NodePool { + block := map[string]interface{}{} + block["machine_count"] = nodePool.DesiredReplicas + // TODO: This needs a refactoring + if nodePool.PlacementPolicy != "" { + policies, err := vcdClient.GetAllVdcComputePoliciesV2(url.Values{ + "filter": []string{fmt.Sprintf("name==%s", nodePool.PlacementPolicy)}, + }) + if err != nil { + return nil, err // TODO + } + nameToIds[nodePool.PlacementPolicy] = policies[0].VdcComputePolicyV2.ID + } + if nodePool.SizingPolicy != "" { + policies, err := vcdClient.GetAllVdcComputePoliciesV2(url.Values{ + "filter": []string{fmt.Sprintf("name==%s", nodePool.SizingPolicy)}, + }) + if err != nil { + return nil, err // TODO + } + nameToIds[nodePool.PlacementPolicy] = policies[0].VdcComputePolicyV2.ID + } + if nodePool.StorageProfile != "" { + // TODO: govcd needs Getting Storage Profiles by Name or Get all with params + fmt.Print("foo") // Otherwise make static complains + } + block["sizing_policy_id"] = nameToIds[nodePool.SizingPolicy] + if !nodePool.NvidiaGpuEnabled { + block["vgpu_policy_id"] = nameToIds[nodePool.PlacementPolicy] // It's a placement policy here + } else { + block["placement_policy_id"] = nameToIds[nodePool.PlacementPolicy] + } + block["storage_profile_id"] = nameToIds[nodePool.StorageProfile] + block["disk_size_gi"] = nodePool.DiskSizeMb / 1024 + + if strings.HasSuffix(nodePool.Name, "-control-plane-node-pool") { + // Control Plane + if len(status.Capvcd.ClusterApiStatus.ApiEndpoints) == 0 { + return nil, fmt.Errorf("could not retrieve Cluster IP") + } + block["ip"] = status.Capvcd.ClusterApiStatus.ApiEndpoints[0].Host + controlPlaneBlocks[0] = block + } else { + // Worker node + block["name"] = nodePool.Name + + nodePoolBlocks[i] = block + } + } + err = d.Set("node_pool", nodePoolBlocks) + if err != nil { + return nil, fmt.Errorf("could not set 'node_pool' pools: %s", err) + } + err = d.Set("control_plane", controlPlaneBlocks) + if err != nil { + return nil, fmt.Errorf("could not set 'control_plane': %s", err) + } defaultStorageClassOptions, err := traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe.defaultStorageClassOptions") var defaultStorageClass []map[string]interface{} @@ -839,53 +997,6 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g return nil, fmt.Errorf("could not save 'default_storage_class': %s", err) } - // TODO: USE JSON!!!! - // Gets and unmarshals the CAPI YAML to update it - capiYaml, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "spec.capiYaml") - if err != nil { - return nil, fmt.Errorf("could not retrieve the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - // TODO: Is there a simpler way? - dec := yaml.NewDecoder(bytes.NewReader([]byte(capiYaml))) - var yamlDocs []map[string]interface{} - i := 0 - for { - yamlDocs = append(yamlDocs, map[string]interface{}{}) - if dec.Decode(&yamlDocs[i]) != nil { - break - } - i++ - } - - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "VCDMachineTemplate" { - catalogName := yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["catalog"] - catalog, err := adminOrg.GetCatalogByName(catalogName.(string), false) - if err != nil { - return nil, fmt.Errorf("could not retrieve OVA Catalog with name %s: %s", catalogName, err) - } - templateName := yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["template"] - tmpl, err := catalog.GetVAppTemplateByName(templateName.(string)) - if err != nil { - return nil, fmt.Errorf("could not retrieve Kubernetes template with name %s: %s", templateName, err) - } - dSet(d, "ova_id", tmpl.VAppTemplate.ID) - - // If metadata.name contains "control-plane" - // - Sizing policy, placement, storage policy, count, IP - // Else: Node pools - // Respect order in the list, easy with name field - // - Name, Sizing policy, placement, vgpu policy, storage policy, count, disk_size_gi, - } else if yamlDoc["kind"] == "VCDCluster" { - networkName := yamlDoc["spec"].(map[string]interface{})["ovdcNetwork"] - network, err := vdc.GetOrgVdcNetworkByName(networkName.(string), false) - if err != nil { - return nil, fmt.Errorf("could not retrieve Network with name %s: %s", networkName, err) - } - dSet(d, "network_id", network.OrgVDCNetwork.ID) - } - } - state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") if err != nil { return nil, fmt.Errorf("could not read 'status.vcdKe.state' from Kubernetes cluster with ID '%s': %s", d.Id(), err) @@ -911,14 +1022,8 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), state)) } - // TODO: Set - // kubernetes_version - // tkg_product_version - // capvcd_version - // cluster_resource_set_bindings - // cpi_version - // csi_version - // persistent_volumes + // TODO: Missing ova_id, ssh_public_key, virtual_ip_subnet, auto_repair_on_errors, node_health_check, persistent_volumes + return warnings, nil } @@ -1419,7 +1524,7 @@ func generateCapiYaml(d *schema.ResourceData, vcdClient *VCDClient, clusterDetai // The final "pretty" YAML. To embed it in the final payload it must be marshaled into a one-line JSON string prettyYaml := fmt.Sprintf("%s\n%s\n%s", memoryHealthCheckYaml, nodePoolYaml, buf.String()) - // This encoder is used instead of a standard json.Marshal as the YAML contains special + // We don't use a standard json.Marshal() as the YAML contains special // characters that are not encoded properly, such as '<'. buf.Reset() enc := json.NewEncoder(buf) From 524ee2f8cfd49c759a699b9ab122856fb0a671a7 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 30 Jan 2024 09:30:42 +0100 Subject: [PATCH 055/156] checkpoint Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 2 - vcd/resource_vcd_cse_kubernetes_cluster.go | 817 +++------------------ vcd/validate_funcs.go | 2 +- 4 files changed, 106 insertions(+), 717 deletions(-) diff --git a/go.mod b/go.mod index 7252b7ff4..9718aa8e8 100644 --- a/go.mod +++ b/go.mod @@ -67,4 +67,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240116081424-dfd4388c2814 +replace github.com/vmware/go-vcloud-director/v2 => /Users/abarreiro/Documents/Development/go-vcloud-director diff --git a/go.sum b/go.sum index ec1f30035..f369f9ca4 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,6 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240116081424-dfd4388c2814 h1:LrRAvFuDqvDwo2Qx7uMLIQS9IpQdpbaLYUswpCRiBTY= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240116081424-dfd4388c2814/go.mod h1:QPxGFgrUcSyzy9IlpwDE4UNT3tsOy2047tJOPEJ4nlw= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 649dc8944..769186d9d 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -4,8 +4,6 @@ import ( "bytes" "context" _ "embed" - "encoding/base64" - "encoding/json" "fmt" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -16,9 +14,7 @@ import ( "github.com/vmware/go-vcloud-director/v2/util" "gopkg.in/yaml.v3" "net/url" - "strconv" "strings" - "text/template" "time" ) @@ -437,41 +433,67 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } vcdClient := meta.(*VCDClient) - clusterDetails, err := getClusterCreateDto(d, vcdClient) - if err != nil { - return diag.Errorf("could not create Kubernetes cluster: %s", err) + org, err := vcdClient.GetOrgFromResource(d) + if err != nil { + return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) + } + + creationData := govcd.CseClusterCreationInput{ + Name: d.Get("name").(string), + OrganizationId: org.Org.ID, + VdcId: d.Get("vdc_id").(string), + NetworkId: d.Get("network_id").(string), + KubernetesTemplateOvaId: d.Get("ova_id").(string), + CseVersion: d.Get("cse_version").(string), + ControlPlane: govcd.ControlPlaneInput{ + MachineCount: d.Get("control_plane.0.machine_count").(int), + DiskSizeGi: d.Get("control_plane.0.disk_size_gi").(int), + SizingPolicyId: d.Get("control_plane.0.sizing_policy_id").(string), + PlacementPolicyId: d.Get("control_plane.0.placement_policy_id").(string), + StorageProfileId: d.Get("control_plane.0.storage_profile_id").(string), + Ip: d.Get("control_plane.0.ip").(string), + }, } - entityMap, err := getCseKubernetesClusterCreationPayload(d, vcdClient, clusterDetails) - if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) + workerPoolsAttr := d.Get("worker_pool").(*schema.Set).List() + workerPools := make([]govcd.WorkerPoolInput, len(workerPoolsAttr)) + for i, workerPoolRaw := range workerPoolsAttr { + workerPool := workerPoolRaw.(map[string]interface{}) + workerPools[i] = govcd.WorkerPoolInput{ + Name: workerPool["name"].(string), + MachineCount: workerPool["machine_count"].(int), + DiskSizeGi: workerPool["disk_size_gi"].(int), + SizingPolicyId: workerPool["sizing_policy_id"].(string), + PlacementPolicyId: workerPool["placement_policy_id"].(string), + VGpuPolicyId: workerPool["vgpu_policy_id"].(string), + StorageProfileId: workerPool["storage_profile_id"].(string), + } } + creationData.WorkerPools = workerPools - rde, err := clusterDetails.RdeType.CreateRde(types.DefinedEntity{ - EntityType: clusterDetails.RdeType.DefinedEntityType.ID, - Name: clusterDetails.Name, - Entity: entityMap, - }, &govcd.TenantContext{ - OrgId: clusterDetails.Org.AdminOrg.ID, - OrgName: clusterDetails.Org.AdminOrg.Name, - }) - if err != nil { - return diag.Errorf("could not create Kubernetes cluster with name '%s': %s", clusterDetails.Name, err) + if _, ok := d.GetOk("default_storage_class"); ok { + creationData.DefaultStorageClass = &govcd.DefaultStorageClassInput{ + StorageProfileId: d.Get("default_storage_class.0.storage_profile_id").(string), + Name: d.Get("default_storage_class.0.name").(string), + ReclaimPolicy: d.Get("default_storage_class.0.reclaim_policy").(string), + Filesystem: d.Get("default_storage_class.0.filesystem").(string), + } } + cluster, err := vcdClient.CseCreateKubernetesCluster(creationData, time.Duration(d.Get("operations_timeout_minutes").(int))*time.Minute) + if err != nil { + if cluster != nil { + if cluster.Capvcd.Status.VcdKe.State != "provisioned" { + return diag.Errorf("Kubernetes cluster creation finished, but it is in '%s' state, not 'provisioned': '%s'", cluster.Capvcd.Status.VcdKe.State, err) + } + } + return diag.Errorf("Kubernetes cluster creation failed: %s", err) + } // We need to set the ID here to be able to distinguish this cluster from all the others that may have the same name and RDE Type. // We could use some other ways of filtering, but ID is the only accurate. // Also, the RDE is created at this point, so Terraform should trigger an update/delete next. // If the cluster can't be created due to errors, users should delete it and retry, like in UI. - d.SetId(rde.DefinedEntity.ID) - - state, err := waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) - if err != nil { - return diag.Errorf("Kubernetes cluster creation finished, but it has errors: %s", err) - } - if state != "provisioned" { - return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state: '%s'", state) - } + d.SetId(cluster.ID) return resourceVcdCseKubernetesRead(ctx, d, meta) } @@ -483,12 +505,12 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be // many clusters with the same name and RDE Type. var err error - rde, err := vcdClient.GetRdeById(d.Id()) + cluster, err := vcdClient.GetKubernetesClusterById(d.Id()) if err != nil { return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, vcdClient, rde, d.Get("cse_version").(string)) + warns, err := saveClusterDataToState(d, vcdClient, cluster, d.Get("cse_version").(string)) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } @@ -672,58 +694,15 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, // to be gone. func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - vcdKe := map[string]interface{}{} - logHttpResponse := util.LogHttpResponse - - // The following loop is constantly polling VCD to retrieve the RDE, which has a big JSON inside, so we avoid filling - // the log with these big payloads. We use defer to be sure that we restore the initial logging state. - defer func() { - util.LogHttpResponse = logHttpResponse - }() - - var elapsed time.Duration - timeout := d.Get("operations_timeout_minutes").(int) - start := time.Now() - for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies operations_timeout_minutes=0, we wait forever - util.LogHttpResponse = false - rde, err := vcdClient.GetRdeById(d.Id()) - util.LogHttpResponse = logHttpResponse - if err != nil { - if govcd.ContainsNotFound(err) { - return nil // The RDE is gone, so the process is completed and there's nothing more to do - } - return diag.Errorf("could not retrieve the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - - vcdKe, err = traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe") - if err != nil { - return diag.Errorf("JSON object 'spec.vcdKe' is not correct in the RDE '%s': %s", d.Id(), err) - } - if !vcdKe["markForDelete"].(bool) || !vcdKe["forceDelete"].(bool) { - // Mark the cluster for deletion - vcdKe["markForDelete"] = true - vcdKe["forceDelete"] = true - rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"] = vcdKe - err = rde.Update(*rde.DefinedEntity) - if err != nil { - if strings.Contains(strings.ToLower(err.Error()), "etag") { - continue // We ignore any ETag error. This just means a clash between CSE Server and Terraform, we just try again - } - return diag.Errorf("could not mark the Kubernetes cluster with ID '%s' to be deleted: %s", d.Id(), err) - } - } - - util.Logger.Printf("[DEBUG] Cluster '%s' is still not deleted, will check again in 10 seconds", d.Id()) - time.Sleep(10 * time.Second) - elapsed = time.Since(start) + cluster, err := vcdClient.GetKubernetesClusterById(d.Id()) + if err != nil { + return diag.FromErr(err) } - - // We give a hint to the user about the deletion process result - if len(vcdKe) >= 2 && vcdKe["markForDelete"].(bool) && vcdKe["forceDelete"].(bool) { - return diag.Errorf("timeout of %d minutes reached, the cluster was successfully marked for deletion but was not removed in time", timeout) + err = cluster.Delete(time.Duration(d.Get("operations_timeout_minutes").(int))) + if err != nil { + return diag.FromErr(err) } - return diag.Errorf("timeout of %d minutes reached, the cluster was not marked for deletion, please try again", timeout) } func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { @@ -760,122 +739,52 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m return []*schema.ResourceData{d}, nil } -// cseKubernetesClusterStatus is an auxiliary structure that helps to read a Kubernetes cluster information retrieved -// from CSE RDEs -type cseKubernetesClusterStatus struct { - Cpi struct { - Version string `json:"version,omitempty"` - } `json:"cpi,omitempty"` - Csi struct { - Version string `json:"version,omitempty"` - } `json:"csi,omitempty"` - Capvcd struct { - Upgrade struct { - Current struct { - TkgVersion string `json:"tkgVersion,omitempty"` - KubernetesVersion string `json:"kubernetesVersion,omitempty"` - } `json:"current,omitempty"` - } `json:"upgrade,omitempty"` - NodePool []struct { - Name string `json:"name,omitempty"` - DiskSizeMb int `json:"diskSizeMb,omitempty"` - SizingPolicy string `json:"sizingPolicy,omitempty"` - PlacementPolicy string `json:"placementPolicy,omitempty"` - NvidiaGpuEnabled bool `json:"nvidiaGpuEnabled,omitempty"` - StorageProfile string `json:"storageProfile,omitempty"` - DesiredReplicas int `json:"desiredReplicas,omitempty"` - } `json:"nodePool,omitempty"` - K8sNetwork struct { - Pods struct { - CidrBlocks []string `json:"cidrBlocks,omitempty"` - } `json:"pods,omitempty"` - Services struct { - CidrBlocks []string `json:"cidrBlocks,omitempty"` - } `json:"services,omitempty"` - } `json:"k8sNetwork,omitempty"` - CapvcdVersion string `json:"capvcdVersion,omitempty"` - VcdProperties struct { - OrgVdcs []struct { - Name string `json:"name,omitempty"` - OvdcNetworkName string `json:"ovdcNetworkName,omitempty"` - } `json:"orgVdcs,omitempty"` - Organizations []struct { - Name string `json:"name,omitempty"` - } `json:"organizations,omitempty"` - } `json:"vcdProperties,omitempty"` - ClusterResourceSetBindings []struct { - Name string `json:"name"` - } `json:"clusterResourceSetBindings,omitempty"` - ClusterApiStatus struct { - ApiEndpoints []struct { - Host string `json:"host,omitempty"` - } `json:"apiEndpoints,omitempty"` - } `json:"clusterApiStatus,omitempty"` - } `json:"capvcd,omitempty"` -} - // saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. // Returns a slice of warnings first and an error second. -func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *govcd.DefinedEntity, cseVersion string) ([]error, error) { +func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluster *govcd.CseClusterApiProviderCluster, cseVersion string) ([]error, error) { var warnings []error - d.SetId(rde.DefinedEntity.ID) - dSet(d, "name", rde.DefinedEntity.Name) + d.SetId(cluster.ID) + dSet(d, "name", cluster.Capvcd.Name) dSet(d, "cse_version", cseVersion) dSet(d, "runtime", "tkg") // Only one supported - if _, ok := rde.DefinedEntity.Entity["status"]; !ok { - return nil, fmt.Errorf("could not retrieve 'status' object from Kubernetes cluster with ID '%s'", d.Id()) - } - - var status cseKubernetesClusterStatus - b, err := json.Marshal(rde.DefinedEntity.Entity["status"]) - if err != nil { - return nil, fmt.Errorf("could not marshal RDE '%s': %s", d.Id(), err) - } - err = json.Unmarshal(b, &status) - if err != nil { - return nil, fmt.Errorf("could not unmarshal RDE '%s': %s", d.Id(), err) - } - - util.Logger.Printf("ADAM %v", status) - // TODO CSE: Why is this a slice??? - if len(status.Capvcd.VcdProperties.Organizations) == 0 { + if len(cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations) == 0 { return nil, fmt.Errorf("expected at least one Organization in cluster '%s'", d.Id()) } // This field is optional, as it can take the value from the VCD client if _, ok := d.GetOk("org"); ok { - dSet(d, "org", status.Capvcd.VcdProperties.Organizations[0].Name) + dSet(d, "org", cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations[0].Name) } - adminOrg, err := vcdClient.GetAdminOrgByName(status.Capvcd.VcdProperties.Organizations[0].Name) + adminOrg, err := vcdClient.GetAdminOrgByName(cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations[0].Name) if err != nil { - return nil, fmt.Errorf("could not get Organization with name %s: %s", status.Capvcd.VcdProperties.Organizations[0].Name, err) + return nil, fmt.Errorf("could not get Organization with name %s: %s", cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations[0].Name, err) } // TODO CSE: Why is this a slice??? - if len(status.Capvcd.VcdProperties.OrgVdcs) == 0 { + if len(cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs) == 0 { return nil, fmt.Errorf("expected at least one VDC in cluster '%s': %s", d.Id(), err) } - vdc, err := adminOrg.GetVDCByName(status.Capvcd.VcdProperties.OrgVdcs[0].Name, false) + vdc, err := adminOrg.GetVDCByName(cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].Name, false) if err != nil { - return nil, fmt.Errorf("could not get VDC with name %s: %s", status.Capvcd.VcdProperties.OrgVdcs[0].Name, err) + return nil, fmt.Errorf("could not get VDC with name %s: %s", cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].Name, err) } dSet(d, "vdc_id", vdc.Vdc.ID) - network, err := vdc.GetOrgVdcNetworkByName(status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, false) + network, err := vdc.GetOrgVdcNetworkByName(cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, false) if err != nil { - return nil, fmt.Errorf("could not get Org VDC Network with name %s: %s", status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, err) + return nil, fmt.Errorf("could not get Org VDC Network with name %s: %s", cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, err) } dSet(d, "network_id", network.OrgVDCNetwork.ID) if _, ok := d.GetOk("owner"); ok { // This field is optional, as it can take the value from the VCD client - if rde.DefinedEntity.Owner == nil { - return nil, fmt.Errorf("could not retrieve Owner information from RDE") + if cluster.Owner == "" { + return nil, fmt.Errorf("could not retrieve Owner information from Cluster") } - dSet(d, "owner", rde.DefinedEntity.Owner.Name) + dSet(d, "owner", cluster.Owner) } if _, ok := d.GetOk("api_token_file"); !ok { @@ -884,8 +793,8 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g dSet(d, "api_token_file", "******") } - bindings := make([]string, len(status.Capvcd.ClusterResourceSetBindings)) - for i, binding := range status.Capvcd.ClusterResourceSetBindings { + bindings := make([]string, len(cluster.Capvcd.Status.Capvcd.ClusterResourceSetBindings)) + for i, binding := range cluster.Capvcd.Status.Capvcd.ClusterResourceSetBindings { bindings[i] = binding.Name } err = d.Set("cluster_resource_set_bindings", bindings) @@ -893,24 +802,24 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g return nil, fmt.Errorf("could not set 'cluster_resource_set_bindings': %s", err) } - dSet(d, "cpi_version", status.Cpi.Version) - dSet(d, "csi_version", status.Csi.Version) - dSet(d, "capvcd_version", status.Capvcd.CapvcdVersion) - dSet(d, "kubernetes_version", status.Capvcd.Upgrade.Current.KubernetesVersion) - dSet(d, "tkg_product_version", status.Capvcd.Upgrade.Current.TkgVersion) - if len(status.Capvcd.K8sNetwork.Pods.CidrBlocks) == 0 { + dSet(d, "cpi_version", cluster.Capvcd.Status.Cpi.Version) + dSet(d, "csi_version", cluster.Capvcd.Status.Csi.Version) + dSet(d, "capvcd_version", cluster.Capvcd.Status.Capvcd.CapvcdVersion) + dSet(d, "kubernetes_version", cluster.Capvcd.Status.Capvcd.Upgrade.Current.KubernetesVersion) + dSet(d, "tkg_product_version", cluster.Capvcd.Status.Capvcd.Upgrade.Current.TkgVersion) + if len(cluster.Capvcd.Status.Capvcd.K8sNetwork.Pods.CidrBlocks) == 0 { return nil, fmt.Errorf("expected at least one Pod CIDR block in cluster '%s': %s", d.Id(), err) } - dSet(d, "pods_cidr", status.Capvcd.K8sNetwork.Pods.CidrBlocks[0]) - if len(status.Capvcd.K8sNetwork.Services.CidrBlocks) == 0 { + dSet(d, "pods_cidr", cluster.Capvcd.Status.Capvcd.K8sNetwork.Pods.CidrBlocks[0]) + if len(cluster.Capvcd.Status.Capvcd.K8sNetwork.Services.CidrBlocks) == 0 { return nil, fmt.Errorf("expected at least one Services CIDR block in cluster '%s': %s", d.Id(), err) } - dSet(d, "services_cidr", status.Capvcd.K8sNetwork.Services.CidrBlocks[0]) + dSet(d, "services_cidr", cluster.Capvcd.Status.Capvcd.K8sNetwork.Services.CidrBlocks[0]) - nodePoolBlocks := make([]map[string]interface{}, len(status.Capvcd.NodePool)-1) + nodePoolBlocks := make([]map[string]interface{}, len(cluster.Capvcd.Status.Capvcd.NodePool)-1) controlPlaneBlocks := make([]map[string]interface{}, 1) nameToIds := map[string]string{"": ""} // Initialize with empty value - for i, nodePool := range status.Capvcd.NodePool { + for i, nodePool := range cluster.Capvcd.Status.Capvcd.NodePool { block := map[string]interface{}{} block["machine_count"] = nodePool.DesiredReplicas // TODO: This needs a refactoring @@ -930,14 +839,17 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g if err != nil { return nil, err // TODO } - nameToIds[nodePool.PlacementPolicy] = policies[0].VdcComputePolicyV2.ID + nameToIds[nodePool.SizingPolicy] = policies[0].VdcComputePolicyV2.ID } if nodePool.StorageProfile != "" { - // TODO: govcd needs Getting Storage Profiles by Name or Get all with params - fmt.Print("foo") // Otherwise make static complains + ref, err := vdc.FindStorageProfileReference(nodePool.StorageProfile) + if err != nil { + return nil, fmt.Errorf("could not get Default Storage Class options from 'spec.vcdKe.defaultStorageClassOptions': %s", err) // TODO + } + nameToIds[nodePool.StorageProfile] = ref.ID } block["sizing_policy_id"] = nameToIds[nodePool.SizingPolicy] - if !nodePool.NvidiaGpuEnabled { + if nodePool.NvidiaGpuEnabled { // TODO: Be sure this is a worker node pool and not control plane (doesnt have this attr) block["vgpu_policy_id"] = nameToIds[nodePool.PlacementPolicy] // It's a placement policy here } else { block["placement_policy_id"] = nameToIds[nodePool.PlacementPolicy] @@ -947,10 +859,10 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g if strings.HasSuffix(nodePool.Name, "-control-plane-node-pool") { // Control Plane - if len(status.Capvcd.ClusterApiStatus.ApiEndpoints) == 0 { + if len(cluster.Capvcd.Status.Capvcd.ClusterApiStatus.ApiEndpoints) == 0 { return nil, fmt.Errorf("could not retrieve Cluster IP") } - block["ip"] = status.Capvcd.ClusterApiStatus.ApiEndpoints[0].Host + block["ip"] = cluster.Capvcd.Status.Capvcd.ClusterApiStatus.ApiEndpoints[0].Host controlPlaneBlocks[0] = block } else { // Worker node @@ -981,14 +893,17 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g reclaimPolicy = "delete" } - // TODO: Get Storage profile by Name + ref, err := vdc.FindStorageProfileReference(defaultStorageClassOptions["vcdStorageProfileName"].(string)) + if err != nil { + return nil, fmt.Errorf("could not get Default Storage Class options from 'spec.vcdKe.defaultStorageClassOptions': %s", err) // TODO + } // defaultStorageClassOptions["vcdStorageProfileName"] defaultStorageClass = append(defaultStorageClass, map[string]interface{}{ - // "storage_profile_id": "", - "name": defaultStorageClassOptions["k8sStorageClassName"], - "reclaim_policy": reclaimPolicy, - "filesystem": defaultStorageClassOptions["filesystem"], + "storage_profile_id": ref.ID, + "name": defaultStorageClassOptions["k8sStorageClassName"], + "reclaim_policy": reclaimPolicy, + "filesystem": defaultStorageClassOptions["filesystem"], }) } @@ -1026,527 +941,3 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, rde *g return warnings, nil } - -// getCseKubernetesClusterCreationPayload gets the payload for the RDE that will trigger a Kubernetes cluster creation. -// It generates a valid YAML that is embedded inside the RDE JSON, then it is returned as an unmarshaled -// generic map, that allows to be sent to VCD as it is. -func getCseKubernetesClusterCreationPayload(d *schema.ResourceData, vcdClient *VCDClient, clusterDetails *createClusterDto) (map[string]interface{}, error) { - capiYaml, err := generateCapiYaml(d, vcdClient, clusterDetails) - if err != nil { - return nil, err - } - - args := map[string]string{ - "Name": clusterDetails.Name, - "Org": clusterDetails.Org.AdminOrg.Name, - "VcdUrl": clusterDetails.VcdUrl, - "Vdc": clusterDetails.VdcName, - "Delete": "false", - "ForceDelete": "false", - "AutoRepairOnErrors": strconv.FormatBool(d.Get("auto_repair_on_errors").(bool)), - "ApiToken": clusterDetails.ApiToken, - "CapiYaml": capiYaml, - } - - if _, isStorageClassSet := d.GetOk("default_storage_class"); isStorageClassSet { - args["DefaultStorageClassStorageProfile"] = clusterDetails.UrnToNamesCache[d.Get("default_storage_class.0.storage_profile_id").(string)] - args["DefaultStorageClassName"] = d.Get("default_storage_class.0.name").(string) - if d.Get("default_storage_class.0.reclaim_policy").(string) == "delete" { - args["DefaultStorageClassUseDeleteReclaimPolicy"] = "true" - } else { - args["DefaultStorageClassUseDeleteReclaimPolicy"] = "false" - } - args["DefaultStorageClassFileSystem"] = d.Get("default_storage_class.0.filesystem").(string) - } - - rdeTmpl, err := getCseTemplateFile(d, vcdClient, "rde") - if err != nil { - return nil, err - } - - capvcdEmpty := template.Must(template.New(clusterDetails.Name).Parse(rdeTmpl)) - buf := &bytes.Buffer{} - if err := capvcdEmpty.Execute(buf, args); err != nil { - return nil, fmt.Errorf("could not render the Go template with the CAPVCD JSON: %s", err) - } - - var result interface{} - err = json.Unmarshal(buf.Bytes(), &result) - if err != nil { - return nil, fmt.Errorf("could not generate a correct CAPVCD JSON: %s", err) - } - - return result.(map[string]interface{}), nil -} - -// generateNodePoolYaml generates YAML blocks corresponding to the Kubernetes node pools. -func generateNodePoolYaml(d *schema.ResourceData, vcdClient *VCDClient, clusterDetails *createClusterDto) (string, error) { - nodePoolTmpl, err := getCseTemplateFile(d, vcdClient, "capiyaml_nodepool") - if err != nil { - return "", err - } - - nodePoolEmptyTmpl := template.Must(template.New(clusterDetails.Name + "-node-pool").Parse(nodePoolTmpl)) - resultYaml := "" - buf := &bytes.Buffer{} - - // We can have many node pool blocks, we build a YAML object for each one of them. - for _, nodePoolRaw := range d.Get("node_pool").([]interface{}) { - nodePool := nodePoolRaw.(map[string]interface{}) - name := nodePool["name"].(string) - - if nodePool["machine_count"] == 0 { - return "", fmt.Errorf("the node pool '%s' should have at least 1 node", name) - } - - // Check the correctness of the compute policies in the node pool block - placementPolicyId := nodePool["placement_policy_id"] - vpguPolicyId := nodePool["vgpu_policy_id"] - if placementPolicyId != "" && vpguPolicyId != "" { - return "", fmt.Errorf("the node pool '%s' should have either a Placement Policy or a vGPU Policy, not both", name) - } - if vpguPolicyId != "" { - placementPolicyId = vpguPolicyId // For convenience, we just use one of the variables as both cannot be set at same time - } - - if err := nodePoolEmptyTmpl.Execute(buf, map[string]string{ - "ClusterName": clusterDetails.Name, - "NodePoolName": name, - "TargetNamespace": clusterDetails.Name + "-ns", - "Catalog": clusterDetails.CatalogName, - "VAppTemplate": clusterDetails.OvaName, - "NodePoolSizingPolicy": clusterDetails.UrnToNamesCache[nodePool["sizing_policy_id"].(string)], - "NodePoolPlacementPolicy": clusterDetails.UrnToNamesCache[placementPolicyId.(string)], // Can be either Placement or vGPU - "NodePoolStorageProfile": clusterDetails.UrnToNamesCache[nodePool["storage_profile_id"].(string)], - "NodePoolDiskSize": fmt.Sprintf("%dGi", nodePool["disk_size_gi"].(int)), - "NodePoolEnableGpu": strconv.FormatBool(vpguPolicyId != ""), - "NodePoolMachineCount": strconv.Itoa(nodePool["machine_count"].(int)), - "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, - }); err != nil { - return "", fmt.Errorf("could not generate a correct Node Pool YAML: %s", err) - } - resultYaml += fmt.Sprintf("%s\n---\n", buf.String()) - buf.Reset() - } - return resultYaml, nil -} - -// generateMemoryHealthCheckYaml generates a YAML block corresponding to the Kubernetes memory health check. -func generateMemoryHealthCheckYaml(d *schema.ResourceData, vcdClient *VCDClient, vcdKeConfig vcdKeConfig, clusterName string) (string, error) { - if !d.Get("node_health_check").(bool) { - return "", nil - } - - mhcTmpl, err := getCseTemplateFile(d, vcdClient, "capiyaml_mhc") - if err != nil { - return "", err - } - - mhcEmptyTmpl := template.Must(template.New(clusterName + "-mhc").Parse(mhcTmpl)) - buf := &bytes.Buffer{} - - if err := mhcEmptyTmpl.Execute(buf, map[string]string{ - "ClusterName": clusterName, - "TargetNamespace": clusterName + "-ns", - "MaxUnhealthyNodePercentage": fmt.Sprintf("%.0f%%", vcdKeConfig.MaxUnhealthyNodesPercentage), // With the 'percentage' suffix - "NodeStartupTimeout": fmt.Sprintf("%ss", vcdKeConfig.NodeStartupTimeout), // With the 'second' suffix - "NodeUnknownTimeout": fmt.Sprintf("%ss", vcdKeConfig.NodeUnknownTimeout), // With the 'second' suffix - "NodeNotReadyTimeout": fmt.Sprintf("%ss", vcdKeConfig.NodeNotReadyTimeout), // With the 'second' suffix - }); err != nil { - return "", fmt.Errorf("could not generate a correct Memory Health Check YAML: %s", err) - } - return fmt.Sprintf("%s\n---\n", buf.String()), nil - -} - -// waitUntilClusterIsProvisioned waits for the Kubernetes cluster to be in "provisioned" state, either indefinitely (if "operations_timeout_minutes=0") -// or until this timeout is reached. If one of the states is "error", this function also checks whether "auto_repair_on_errors=true" to keep -// waiting. -func waitUntilClusterIsProvisioned(vcdClient *VCDClient, d *schema.ResourceData, rdeId string) (string, error) { - var elapsed time.Duration - timeout := d.Get("operations_timeout_minutes").(int) - logHttpResponse := util.LogHttpResponse - sleepTime := 30 - - // The following loop is constantly polling VCD to retrieve the RDE, which has a big JSON inside, so we avoid filling - // the log with these big payloads. We use defer to be sure that we restore the initial logging state. - defer func() { - util.LogHttpResponse = logHttpResponse - }() - currentState := "" - - start := time.Now() - for elapsed <= time.Duration(timeout)*time.Minute || timeout == 0 { // If the user specifies operations_timeout_minutes=0, we wait forever - util.LogHttpResponse = false - rde, err := vcdClient.GetRdeById(rdeId) - util.LogHttpResponse = logHttpResponse - if err != nil { - return "", err - } - currentState, err = traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") - if err != nil { - util.Logger.Printf("[DEBUG] Failed getting cluster state, but will try again in %d seconds: %s", sleepTime, err) - // We ignore this error, as eventually the state should be populated - } else { - switch currentState { - case "provisioned": - return currentState, nil - case "error": - // We just finish if auto-recovery is disabled, otherwise we just let CSE fixing things in background - if !d.Get("auto_repair_on_errors").(bool) { - // Try to give feedback about what went wrong, which is located in a set of events in the RDE payload - latestError := "could not parse error event" - errorSet, err := traverseMapAndGet[[]interface{}](rde.DefinedEntity.Entity, "status.capvcd.errorSet") - if err == nil { - latestError, err = traverseMapAndGet[string](errorSet[len(errorSet)-1], "additionalDetails.error") - if err != nil { - latestError = "could not parse error event" - } - } - return "", fmt.Errorf("got an error and 'auto_repair_on_errors=false', aborting. Latest error: %s", latestError) - } - } - } - - util.Logger.Printf("[DEBUG] Cluster '%s' is in '%s' state, will check again in %d seconds", d.Id(), currentState, sleepTime) - elapsed = time.Since(start) - time.Sleep(time.Duration(sleepTime) * time.Second) - } - return "", fmt.Errorf("timeout of %d minutes reached, latest cluster state obtained was '%s'", timeout, currentState) -} - -// tkgVersionBundle is a type that contains all the versions of the components of -// a Kubernetes cluster that can be obtained with the vApp Template name, downloaded -// from VMware Customer connect: -// https://customerconnect.vmware.com/downloads/details?downloadGroup=TKG-240&productId=1400 -type tkgVersionBundle struct { - EtcdVersion string - CoreDnsVersion string - TkgVersion string - TkrVersion string - KubernetesVersion string -} - -// getTkgVersionBundleFromVAppTemplateName returns a tkgVersionBundle with the details of -// all the Kubernetes cluster components versions given a valid vApp Template name, that should -// correspond to a Kubernetes template. If it is not a valid vApp Template, returns an error. -func getTkgVersionBundleFromVAppTemplateName(vcdClient *VCDClient, ovaName string) (tkgVersionBundle, error) { - result := tkgVersionBundle{} - - if strings.Contains(ovaName, "photon") { - return result, fmt.Errorf("the vApp Template '%s' uses Photon, and it is not supported", ovaName) - } - - cutPosition := strings.LastIndex(ovaName, "kube-") - if cutPosition < 0 { - return result, fmt.Errorf("the vApp Template '%s' is not a Kubernetes template OVA", ovaName) - } - parsedOvaName := strings.ReplaceAll(ovaName, ".ova", "")[cutPosition+len("kube-"):] - - // FIXME: This points to my fork, but should point to final version!! - file, err := fileFromUrlToString(vcdClient, "https://raw.githubusercontent.com/adambarreiro/terraform-provider-vcd/add-cse-cluster-resource/vcd/cse/tkg_versions.json", "json") - if err != nil { - return result, fmt.Errorf("error reading tkg_versions.json: %s", err) - } - - versionsMap := map[string]interface{}{} - err = json.Unmarshal([]byte(file), &versionsMap) - if err != nil { - return result, err - } - versionMap, ok := versionsMap[parsedOvaName] - if !ok { - return result, fmt.Errorf("the Kubernetes OVA '%s' is not supported", parsedOvaName) - } - - // The map checking above guarantees that all splits and replaces will work - result.KubernetesVersion = strings.Split(parsedOvaName, "-")[0] - result.TkrVersion = strings.ReplaceAll(strings.Split(parsedOvaName, "-")[0], "+", "---") + "-" + strings.Split(parsedOvaName, "-")[1] - result.TkgVersion = versionMap.(map[string]interface{})["tkg"].(string) - result.EtcdVersion = versionMap.(map[string]interface{})["etcd"].(string) - result.CoreDnsVersion = versionMap.(map[string]interface{})["coreDns"].(string) - return result, nil -} - -// createClusterDto is a helper struct that contains all the required elements to successfully create a Kubernetes cluster using CSE. -// This is useful to avoid querying VCD too much, as the Terraform configuration works mostly with IDs, but we require names, among -// other items that we eventually need to retrieve from VCD. -type createClusterDto struct { - Name string - VcdUrl string - Org *govcd.AdminOrg - VdcName string - OvaName string - CatalogName string - NetworkName string - RdeType *govcd.DefinedEntityType - UrnToNamesCache map[string]string // Maps unique IDs with their resource names (example: Compute policy ID with its name) - VcdKeConfig vcdKeConfig - TkgVersion tkgVersionBundle - Owner string - ApiToken string -} - -// vcdKeConfig is a type that contains only the required and relevant fields from the CSE installation configuration, -// such as the Machine Health Check settings or the container registry URL. -type vcdKeConfig struct { - MaxUnhealthyNodesPercentage float64 - NodeStartupTimeout string - NodeNotReadyTimeout string - NodeUnknownTimeout string - ContainerRegistryUrl string -} - -// getClusterCreateDto creates and returns a createClusterDto object by obtaining all the required information -// from the Terraform resource data and the target VCD. -func getClusterCreateDto(d *schema.ResourceData, vcdClient *VCDClient) (*createClusterDto, error) { - result := &createClusterDto{} - result.UrnToNamesCache = map[string]string{"": ""} // Initialize with a "zero" entry, used when there's no ID set in the Terraform schema - - name := d.Get("name").(string) - result.Name = name - - org, err := vcdClient.GetAdminOrgFromResource(d) - if err != nil { - return nil, fmt.Errorf("could not retrieve the cluster Organization: %s", err) - } - result.Org = org - - vdcId := d.Get("vdc_id").(string) - vdc, err := org.GetVDCById(vdcId, true) - if err != nil { - return nil, fmt.Errorf("could not retrieve the VDC with ID '%s': %s", vdcId, err) - } - result.VdcName = vdc.Vdc.Name - - vAppTemplateId := d.Get("ova_id").(string) - vAppTemplate, err := vcdClient.GetVAppTemplateById(vAppTemplateId) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Kubernetes OVA with ID '%s': %s", vAppTemplateId, err) - } - result.OvaName = vAppTemplate.VAppTemplate.Name - - tkgVersions, err := getTkgVersionBundleFromVAppTemplateName(vcdClient, vAppTemplate.VAppTemplate.Name) - if err != nil { - return nil, err - } - result.TkgVersion = tkgVersions - - catalogName, err := vAppTemplate.GetCatalogName() - if err != nil { - return nil, fmt.Errorf("could not retrieve the CatalogName of the OVA '%s': %s", vAppTemplateId, err) - } - result.CatalogName = catalogName - - networkId := d.Get("network_id").(string) - network, err := vdc.GetOrgVdcNetworkById(networkId, true) - if err != nil { - return nil, fmt.Errorf("could not retrieve the Org VDC NetworkName with ID '%s': %s", networkId, err) - } - result.NetworkName = network.OrgVDCNetwork.Name - - currentCseVersion := supportedCseVersions[d.Get("cse_version").(string)] - rdeType, err := vcdClient.GetRdeType("vmware", "capvcdCluster", currentCseVersion[1]) - if err != nil { - return nil, fmt.Errorf("could not retrieve RDE Type vmware:capvcdCluster:'%s': %s", currentCseVersion[1], err) - } - result.RdeType = rdeType - - // Fills the cache map that relates IDs of Storage profiles and Compute policies (the schema uses them to build a - // healthy Terraform dependency graph) with their corresponding names (the cluster YAML and CSE in general uses names only). - // Having this map minimizes the amount of queries to VCD, specially when building the set of node pools, - // as there can be a lot of them. - for _, configBlockAttr := range []string{"default_storage_class", "control_plane", "node_pool"} { - if _, ok := d.GetOk(configBlockAttr); !ok { - continue // Some blocks are optional, this is managed by the schema constraints - } - // The node_pool is a Set, but the others are already Lists - var configBlockAsList []interface{} - if _, isASet := d.Get(configBlockAttr).(*schema.Set); isASet { - configBlockAsList = d.Get(configBlockAttr).(*schema.Set).List() - } else { - configBlockAsList = d.Get(configBlockAttr).([]interface{}) - } - - // For every existing block/list, we check the inner attributes to retrieve their corresponding object names, - // like Storage Profile names and Compute Policy names. If the ID is already registered, we skip it. - for _, configBlockRaw := range configBlockAsList { - configBlock := configBlockRaw.(map[string]interface{}) - if id, ok := configBlock["storage_profile_id"]; ok { - if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; !alreadyPresent { - storageProfile, err := vcdClient.GetStorageProfileById(id.(string)) - if err != nil { - return nil, fmt.Errorf("could not get Storage Profile with ID '%s': %s", id, err) - } - result.UrnToNamesCache[id.(string)] = storageProfile.Name - } - } - // The other sub-attributes are just Compute policies, we treat them the same - for _, attribute := range []string{"sizing_policy_id", "vgpu_policy_id", "placement_policy_id"} { - id, ok := configBlock[attribute] - if !ok { - continue - } - if _, alreadyPresent := result.UrnToNamesCache[id.(string)]; alreadyPresent { - continue - } - computePolicy, err := vcdClient.GetVdcComputePolicyV2ById(id.(string)) - if err != nil { - return nil, fmt.Errorf("could not get Compute Policy with ID '%s': %s", id, err) - } - result.UrnToNamesCache[id.(string)] = computePolicy.VdcComputePolicyV2.Name - } - } - } - - vcdKeConfig, err := getVcdKeConfiguration(d, vcdClient) - if err != nil { - return nil, err - } - result.VcdKeConfig = *vcdKeConfig - - owner, ok := d.GetOk("owner") - if !ok { - sessionInfo, err := vcdClient.Client.GetSessionInfo() - if err != nil { - return nil, fmt.Errorf("error getting the owner of the cluster: %s", err) - } - owner = sessionInfo.User.Name - } - result.Owner = owner.(string) - - apiToken, err := govcd.GetTokenFromFile(d.Get("api_token_file").(string)) - if err != nil { - return nil, fmt.Errorf("API token file could not be parsed or found: %s\nPlease check that the format is the one that 'vcd_api_token' resource uses", err) - } - result.ApiToken = apiToken.RefreshToken - - result.VcdUrl = strings.Replace(vcdClient.VCDClient.Client.VCDHREF.String(), "/api", "", 1) - return result, nil -} - -// getVcdKeConfiguration gets the required information from the CSE Server configuration RDE -func getVcdKeConfiguration(d *schema.ResourceData, vcdClient *VCDClient) (*vcdKeConfig, error) { - currentCseVersion := supportedCseVersions[d.Get("cse_version").(string)] - result := &vcdKeConfig{} - - rdes, err := vcdClient.GetRdesByName("vmware", "VCDKEConfig", currentCseVersion[0], "vcdKeConfig") - if err != nil { - return nil, fmt.Errorf("could not retrieve VCDKEConfig RDE with version %s: %s", currentCseVersion[0], err) - } - if len(rdes) != 1 { - return nil, fmt.Errorf("expected exactly one VCDKEConfig RDE but got %d", len(rdes)) - } - - profiles, err := traverseMapAndGet[[]interface{}](rdes[0].DefinedEntity.Entity, "profiles") - if err != nil { - return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a 'profiles' element: %s", err) - } - if len(profiles) != 1 { - return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a single 'profiles' element, got %d", len(profiles)) - } - // TODO: Check airgapped environments: https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1.1a/VMware-Cloud-Director-Container-Service-Extension-Install-provider-4.1.1/GUID-F00BE796-B5F2-48F2-A012-546E2E694400.html - result.ContainerRegistryUrl = fmt.Sprintf("%s/tkg", profiles[0].(map[string]interface{})["containerRegistryUrl"].(string)) - - if _, ok := d.GetOk("node_health_check"); ok { - mhc, err := traverseMapAndGet[map[string]interface{}](profiles[0], "K8Config.mhc") - if err != nil { - return nil, fmt.Errorf("wrong format of VCDKEConfig, expected a 'profiles[0].K8sConfig.mhc' element: %s", err) - } - result.MaxUnhealthyNodesPercentage = mhc["maxUnhealthyNodes"].(float64) - result.NodeStartupTimeout = mhc["nodeStartupTimeout"].(string) - result.NodeNotReadyTimeout = mhc["nodeUnknownTimeout"].(string) - result.NodeUnknownTimeout = mhc["nodeNotReadyTimeout"].(string) - } - return result, nil -} - -// generateCapiYaml generates the YAML string that is required during Kubernetes cluster creation, to be embedded -// in the CAPVCD cluster JSON payload. This function picks data from the Terraform schema and the createClusterDto to -// populate several Go templates and build a final YAML. -func generateCapiYaml(d *schema.ResourceData, vcdClient *VCDClient, clusterDetails *createClusterDto) (string, error) { - clusterTmpl, err := getCseTemplateFile(d, vcdClient, "capiyaml_cluster") - if err != nil { - return "", err - } - - // This YAML snippet contains special strings, such as "%,", that render wrong using the Go template engine - sanitizedTemplate := strings.NewReplacer("%", "%%").Replace(clusterTmpl) - capiYamlEmpty := template.Must(template.New(clusterDetails.Name + "-cluster").Parse(sanitizedTemplate)) - - nodePoolYaml, err := generateNodePoolYaml(d, vcdClient, clusterDetails) - if err != nil { - return "", err - } - - memoryHealthCheckYaml, err := generateMemoryHealthCheckYaml(d, vcdClient, clusterDetails.VcdKeConfig, clusterDetails.Name) - if err != nil { - return "", err - } - - args := map[string]string{ - "ClusterName": clusterDetails.Name, - "TargetNamespace": clusterDetails.Name + "-ns", - "TkrVersion": clusterDetails.TkgVersion.TkrVersion, - "TkgVersion": clusterDetails.TkgVersion.TkgVersion, - "UsernameB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.Owner)), - "ApiTokenB64": base64.StdEncoding.EncodeToString([]byte(clusterDetails.ApiToken)), - "PodCidr": d.Get("pods_cidr").(string), - "ServiceCidr": d.Get("services_cidr").(string), - "VcdSite": clusterDetails.VcdUrl, - "Org": clusterDetails.Org.AdminOrg.Name, - "OrgVdc": clusterDetails.VdcName, - "OrgVdcNetwork": clusterDetails.NetworkName, - "Catalog": clusterDetails.CatalogName, - "VAppTemplate": clusterDetails.OvaName, - "ControlPlaneSizingPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.sizing_policy_id").(string)], - "ControlPlanePlacementPolicy": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.placement_policy_id").(string)], - "ControlPlaneStorageProfile": clusterDetails.UrnToNamesCache[d.Get("control_plane.0.storage_profile_id").(string)], - "ControlPlaneDiskSize": fmt.Sprintf("%dGi", d.Get("control_plane.0.disk_size_gi").(int)), - "ControlPlaneMachineCount": strconv.Itoa(d.Get("control_plane.0.machine_count").(int)), - "DnsVersion": clusterDetails.TkgVersion.CoreDnsVersion, - "EtcdVersion": clusterDetails.TkgVersion.EtcdVersion, - "ContainerRegistryUrl": clusterDetails.VcdKeConfig.ContainerRegistryUrl, - "KubernetesVersion": clusterDetails.TkgVersion.KubernetesVersion, - "SshPublicKey": d.Get("ssh_public_key").(string), - } - if _, ok := d.GetOk("control_plane.0.ip"); ok { - args["ControlPlaneEndpoint"] = d.Get("control_plane.0.ip").(string) - } - if _, ok := d.GetOk("virtual_ip_subnet"); ok { - args["VirtualIpSubnet"] = d.Get("virtual_ip_subnet").(string) - } - - buf := &bytes.Buffer{} - if err := capiYamlEmpty.Execute(buf, args); err != nil { - return "", fmt.Errorf("could not generate a correct CAPI YAML: %s", err) - } - // The final "pretty" YAML. To embed it in the final payload it must be marshaled into a one-line JSON string - prettyYaml := fmt.Sprintf("%s\n%s\n%s", memoryHealthCheckYaml, nodePoolYaml, buf.String()) - - // We don't use a standard json.Marshal() as the YAML contains special - // characters that are not encoded properly, such as '<'. - buf.Reset() - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - err = enc.Encode(prettyYaml) - if err != nil { - return "", fmt.Errorf("could not encode the CAPI YAML into JSON: %s", err) - } - - // Removes trailing quotes from the final JSON string - return strings.Trim(strings.TrimSpace(buf.String()), "\""), nil -} - -// getCseTemplateFile gets a Go template file corresponding to the CSE version set in the Terraform configuration -func getCseTemplateFile(d *schema.ResourceData, vcdClient *VCDClient, templateName string) (string, error) { - cseVersion := d.Get("cse_version").(string) - - // In the future, we can put here some logic for equivalent CSE versions, to avoid duplicating the same Go - // templates that didn't change among versions. - - // FIXME: This points to my fork, but should point to the final URL!! - t := fmt.Sprintf("https://raw.githubusercontent.com/adambarreiro/terraform-provider-vcd/add-cse-cluster-resource/vcd/cse/%s/%s.tmpl", cseVersion, templateName) - - return fileFromUrlToString(vcdClient, t, "tmpl") -} diff --git a/vcd/validate_funcs.go b/vcd/validate_funcs.go index 79671267d..1ac00dc9c 100644 --- a/vcd/validate_funcs.go +++ b/vcd/validate_funcs.go @@ -194,7 +194,7 @@ func matchRegex(regex, errorMessage string) schema.SchemaValidateDiagFunc { } r, err := regexp.Compile(regex) if err != nil { - return diag.Errorf("could not compile regular expression '%s'", regex) + return diag.Errorf("could not compile regular expression '%s': %s", regex, err) } if !r.MatchString(value) { return diag.Errorf("%s", errorMessage) From f24aeafac41e02f8fc3a786cceab9e4c19925cba Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 2 Feb 2024 14:00:46 +0100 Subject: [PATCH 056/156] Refactor with latest SDK changes Signed-off-by: abarreiro --- go.mod | 6 +- go.sum | 26 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 445 +++++------------- ...esource_vcd_cse_kubernetes_cluster_test.go | 60 --- vcd/structure.go | 57 --- vcd/structure_unit_test.go | 141 ------ 6 files changed, 137 insertions(+), 598 deletions(-) diff --git a/go.mod b/go.mod index 9718aa8e8..154ab6b48 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module github.com/vmware/terraform-provider-vcd/v3 -go 1.20 +go 1.21 + +toolchain go1.21.6 require ( github.com/davecgh/go-spew v1.1.1 @@ -8,7 +10,6 @@ require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/kr/pretty v0.2.1 github.com/vmware/go-vcloud-director/v2 v2.22.0 - gopkg.in/yaml.v3 v3.0.1 ) require ( @@ -18,6 +19,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) require ( diff --git a/go.sum b/go.sum index f369f9ca4..88b0c8f5b 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= @@ -10,23 +12,31 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195 h1:c4mLfegoDw6OhSJXTd2jUEQgZUQuJWtocudb97Qn9EM= github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195/go.mod h1:SLqhdZcd+dF3TEVL2RMoob5bBP5R1P1qkox+HtCBgGI= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= +github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -34,6 +44,7 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -79,8 +90,11 @@ github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv2 github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -89,6 +103,7 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= @@ -110,10 +125,13 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/peterhellberg/link v1.1.0 h1:s2+RH8EGuI/mI4QwrWGSYQCRz7uNgip9BaM04HKu5kc= github.com/peterhellberg/link v1.1.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -125,6 +143,7 @@ github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21 github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= @@ -189,6 +208,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -206,6 +226,10 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 769186d9d..c9fc1580e 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -1,7 +1,6 @@ package vcd import ( - "bytes" "context" _ "embed" "fmt" @@ -10,26 +9,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" - "github.com/vmware/go-vcloud-director/v2/types/v56" - "github.com/vmware/go-vcloud-director/v2/util" - "gopkg.in/yaml.v3" - "net/url" "strings" "time" ) -// supportedCseVersions is a map that contains only the supported CSE versions as keys, -// and its corresponding components versions as a slice of strings. The first string is the VCDKEConfig RDE Type version, -// then the CAPVCD RDE Type version and finally the CAPVCD Behavior version. -// TODO: Is this really necessary? What happens in UI if I have a 1.1.0-1.2.0-1.0.0 (4.2) cluster and then CSE is updated to 4.3? -var supportedCseVersions = map[string][]string{ - "4.2": { - "1.1.0", // VCDKEConfig RDE Type version - "1.2.0", // CAPVCD RDE Type version - "1.0.0", // CAPVCD Behavior version - }, -} - func resourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ CreateContext: resourceVcdCseKubernetesClusterCreate, @@ -44,7 +27,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Optional: true, // Required, but validated at runtime ForceNew: true, - ValidateFunc: validation.StringInSlice(getKeys(supportedCseVersions), false), + ValidateFunc: validation.StringInSlice([]string{"4.2"}, false), Description: "The CSE version to use", }, "runtime": { @@ -438,14 +421,14 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) } - creationData := govcd.CseClusterCreationInput{ + creationData := govcd.CseClusterSettings{ Name: d.Get("name").(string), OrganizationId: org.Org.ID, VdcId: d.Get("vdc_id").(string), NetworkId: d.Get("network_id").(string), KubernetesTemplateOvaId: d.Get("ova_id").(string), CseVersion: d.Get("cse_version").(string), - ControlPlane: govcd.ControlPlaneInput{ + ControlPlane: govcd.CseControlPlaneSettings{ MachineCount: d.Get("control_plane.0.machine_count").(int), DiskSizeGi: d.Get("control_plane.0.disk_size_gi").(int), SizingPolicyId: d.Get("control_plane.0.sizing_policy_id").(string), @@ -456,10 +439,10 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } workerPoolsAttr := d.Get("worker_pool").(*schema.Set).List() - workerPools := make([]govcd.WorkerPoolInput, len(workerPoolsAttr)) + workerPools := make([]govcd.CseWorkerPoolSettings, len(workerPoolsAttr)) for i, workerPoolRaw := range workerPoolsAttr { workerPool := workerPoolRaw.(map[string]interface{}) - workerPools[i] = govcd.WorkerPoolInput{ + workerPools[i] = govcd.CseWorkerPoolSettings{ Name: workerPool["name"].(string), MachineCount: workerPool["machine_count"].(int), DiskSizeGi: workerPool["disk_size_gi"].(int), @@ -472,7 +455,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour creationData.WorkerPools = workerPools if _, ok := d.GetOk("default_storage_class"); ok { - creationData.DefaultStorageClass = &govcd.DefaultStorageClassInput{ + creationData.DefaultStorageClass = &govcd.CseDefaultStorageClassSettings{ StorageProfileId: d.Get("default_storage_class.0.storage_profile_id").(string), Name: d.Get("default_storage_class.0.name").(string), ReclaimPolicy: d.Get("default_storage_class.0.reclaim_policy").(string), @@ -480,11 +463,11 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } } - cluster, err := vcdClient.CseCreateKubernetesCluster(creationData, time.Duration(d.Get("operations_timeout_minutes").(int))*time.Minute) + cluster, err := org.CseCreateKubernetesCluster(creationData, time.Duration(d.Get("operations_timeout_minutes").(int))*time.Minute) if err != nil { if cluster != nil { - if cluster.Capvcd.Status.VcdKe.State != "provisioned" { - return diag.Errorf("Kubernetes cluster creation finished, but it is in '%s' state, not 'provisioned': '%s'", cluster.Capvcd.Status.VcdKe.State, err) + if cluster.State != "provisioned" { + return diag.Errorf("Kubernetes cluster creation finished, but it is in '%s' state, not 'provisioned': '%s'", cluster.State, err) } } return diag.Errorf("Kubernetes cluster creation failed: %s", err) @@ -499,18 +482,22 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - vcdClient := meta.(*VCDClient) var diags diag.Diagnostics - // The ID must be already set for the read to be successful. We can't rely on GetRdesByName as there can be - // many clusters with the same name and RDE Type. - var err error - cluster, err := vcdClient.GetKubernetesClusterById(d.Id()) + vcdClient := meta.(*VCDClient) + org, err := vcdClient.GetOrgFromResource(d) + if err != nil { + return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) + } + + // The ID must be already set for the read to be successful. We can't rely on the name as there can be + // many clusters with the same name in the same org. + cluster, err := org.CseGetKubernetesClusterById(d.Id()) if err != nil { return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, vcdClient, cluster, d.Get("cse_version").(string)) + warns, err := saveClusterDataToState(d, cluster) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } @@ -541,149 +528,34 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, } vcdClient := meta.(*VCDClient) - - // The ID must be already set for the update to be successful. We can't rely on GetRdesByName as there can be - // many clusters with the same name and RDE Type. - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + org, err := vcdClient.GetOrgFromResource(d) if err != nil { - return diag.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - if state != "provisioned" { - return diag.Errorf("could not update the Kubernetes cluster with ID '%s': It is in '%s' state, but should be 'provisioned'", d.Id(), state) + return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) } - // Gets and unmarshals the CAPI YAML to update it - capiYaml, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "spec.capiYaml") + cluster, err := org.CseGetKubernetesClusterById(d.Id()) if err != nil { - return diag.Errorf("could not retrieve the CAPI YAML from the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - // TODO: Is there a simpler way? - dec := yaml.NewDecoder(bytes.NewReader([]byte(capiYaml))) - var yamlDocs []map[string]interface{} - i := 0 - for { - yamlDocs = append(yamlDocs, map[string]interface{}{}) - if dec.Decode(&yamlDocs[i]) != nil { - break - } - i++ - } - - if d.HasChange("ova_id") { - newOva := d.Get("ova_id") - ova, err := vcdClient.GetVAppTemplateById(newOva.(string)) - if err != nil { - return diag.Errorf("could not retrieve the new Kubernetes OVA with ID '%s': %s", newOva, err) - } - // TODO: Check whether the update can be performed - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "VCDMachineTemplate" { - yamlDoc["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["template"] = ova.VAppTemplate.Name - } - } - } - if d.HasChange("control_plane.0.machine_count") { - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "KubeadmControlPlane" { - yamlDoc["spec"].(map[string]interface{})["replicas"] = d.Get("control_plane.0.machine_count") - } - } - } - // The node pools can only be created and resized - var newNodePools []map[string]interface{} - if d.HasChange("node_pool") { - for _, nodePoolRaw := range d.Get("node_pool").(*schema.Set).List() { - nodePool := nodePoolRaw.(map[string]interface{}) - for _, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "MachineDeployment" { - if yamlDoc["metadata"].(map[string]interface{})["name"] == nodePool["name"].(string) { - yamlDoc["spec"].(map[string]interface{})["replicas"] = nodePool["machine_count"].(int) - } else { - // TODO: Create node pool - newNodePools = append(newNodePools, map[string]interface{}{}) - } - } - } + return diag.Errorf("could not get Kubernetes cluster with ID '%s': %s", d.Id(), err) + } + payload := govcd.CseClusterUpdateInput{} + if d.HasChange("worker_pool") { + workerPools := map[string]govcd.CseWorkerPoolUpdateInput{} + for _, workerPoolAttr := range d.Get("worker_pool").(*schema.Set).List() { + w := workerPoolAttr.(map[string]interface{}) + workerPools[w["name"].(string)] = govcd.CseWorkerPoolUpdateInput{MachineCount: w["machine_count"].(int)} } - } - if len(newNodePools) > 0 { - yamlDocs = append(yamlDocs, newNodePools...) + payload.WorkerPools = &workerPools } - if d.HasChange("node_health_check") { - oldNhc, newNhc := d.GetChange("node_health_check") - if oldNhc.(bool) && !newNhc.(bool) { - toDelete := 0 - for i, yamlDoc := range yamlDocs { - if yamlDoc["kind"] == "MachineHealthCheck" { - toDelete = i - } - } - yamlDocs[toDelete] = yamlDocs[len(yamlDocs)-1] // We delete the MachineHealthCheck block by putting the last doc in its place - yamlDocs = yamlDocs[:len(yamlDocs)-1] // Then we remove the last doc - } else { - // Add the YAML block - vcdKeConfig, err := getVcdKeConfiguration(d, vcdClient) - if err != nil { - return diag.FromErr(err) - } - rawYaml, err := generateMemoryHealthCheckYaml(d, vcdClient, *vcdKeConfig, d.Get("name").(string)) - if err != nil { - return diag.FromErr(err) - } - yamlBlock := map[string]interface{}{} - err = yaml.Unmarshal([]byte(rawYaml), &yamlBlock) - if err != nil { - return diag.Errorf("error updating Memory Health Check: %s", err) - } - yamlDocs = append(yamlDocs, yamlBlock) - } - util.Logger.Printf("not done but make static complains :)") - } - - updatedYaml, err := yaml.Marshal(yamlDocs) + err = cluster.Update(payload) if err != nil { - return diag.Errorf("error updating cluster: %s", err) - } - - // This must be done with retries due to the possible clash on ETags - _, err = runWithRetry( - "update cluster", - "could not update cluster", - 1*time.Minute, - nil, - func() (any, error) { - rde, err := vcdClient.GetRdeById(d.Id()) - if err != nil { - return nil, fmt.Errorf("could not update Kubernetes cluster with ID '%s': %s", d.Id(), err) - } - - rde.DefinedEntity.Entity["spec"].(map[string]interface{})["capiYaml"] = updatedYaml - rde.DefinedEntity.Entity["spec"].(map[string]interface{})["vcdKe"].(map[string]interface{})["autoRepairOnErrors"] = d.Get("auto_repair_on_errors").(bool) - - // err = rde.Update(*rde.DefinedEntity) - util.Logger.Printf("ADAM: PERFORM UPDATE: %v", rde.DefinedEntity.Entity) - if err != nil { - return nil, err + if cluster != nil { + if cluster.State != "provisioned" { + return diag.Errorf("Kubernetes cluster update finished, but it is in '%s' state, not 'provisioned': '%s'", cluster.State, err) } - return nil, nil - }, - ) - if err != nil { - return diag.FromErr(err) - } - - state, err = waitUntilClusterIsProvisioned(vcdClient, d, rde.DefinedEntity.ID) - if err != nil { + } return diag.Errorf("Kubernetes cluster update failed: %s", err) } - if state != "provisioned" { - return diag.Errorf("Kubernetes cluster update failed, cluster is not in 'provisioned' state, but '%s'", state) - } return resourceVcdCseKubernetesRead(ctx, d, meta) } @@ -695,7 +567,12 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - cluster, err := vcdClient.GetKubernetesClusterById(d.Id()) + org, err := vcdClient.GetOrgFromResource(d) + if err != nil { + return diag.Errorf("could not get Organization: %s", err) + } + + cluster, err := org.CseGetKubernetesClusterById(d.Id()) if err != nil { return diag.FromErr(err) } @@ -703,37 +580,44 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m if err != nil { return diag.FromErr(err) } + return nil } func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - vcdClient := meta.(*VCDClient) - resourceURI := strings.Split(d.Id(), ImportSeparator) - var rdeId, cseVersion string + var clusterId, orgName, cseVersion string switch len(resourceURI) { - case 2: // ImportSeparator != '.' + case 3: // ImportSeparator != '.' cseVersion = resourceURI[0] - rdeId = resourceURI[1] - case 3: // ImportSeparator == '.' + orgName = resourceURI[1] + clusterId = resourceURI[2] + case 4: // ImportSeparator == '.' cseVersion = fmt.Sprintf("%s.%s", resourceURI[0], resourceURI[1]) - rdeId = resourceURI[2] + orgName = resourceURI[2] + clusterId = resourceURI[3] default: - return nil, fmt.Errorf("resource name must be specified as cse_version.cluster_id") + return nil, fmt.Errorf("resource name must be specified as cse_version(two digits: major.minor).organization_name.cluster_id, but it was '%s'", d.Id()) } - dSet(d, "cse_version", cseVersion) + dSet(d, "cse_version", strings.Replace(cseVersion, "v", "", 1)) // We remove any 'v' prefix just in case, to avoid common errors if people use SemVer - rde, err := vcdClient.GetRdeById(rdeId) + vcdClient := meta.(*VCDClient) + org, err := vcdClient.GetOrgByName(orgName) if err != nil { - return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", d.Id(), err) + return nil, fmt.Errorf("could not get Organization with name '%s': %s", orgName, err) } - warns, err := saveClusterDataToState(d, vcdClient, rde, cseVersion) + cluster, err := org.CseGetKubernetesClusterById(clusterId) if err != nil { - return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", rdeId, err) + return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", clusterId, err) + } + + warns, err := saveClusterDataToState(d, cluster) + if err != nil { + return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", cluster.ID, err) } for _, warn := range warns { // We can't do much here as Import does not support Diagnostics - logForScreen(rdeId, fmt.Sprintf("got a warning during import: %s", warn)) + logForScreen(cluster.ID, fmt.Sprintf("got a warning during import: %s", warn)) } return []*schema.ResourceData{d}, nil @@ -741,49 +625,35 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m // saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. // Returns a slice of warnings first and an error second. -func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluster *govcd.CseClusterApiProviderCluster, cseVersion string) ([]error, error) { +func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetesCluster) ([]error, error) { var warnings []error d.SetId(cluster.ID) - dSet(d, "name", cluster.Capvcd.Name) - dSet(d, "cse_version", cseVersion) + dSet(d, "name", cluster.Name) + dSet(d, "cse_version", cluster.CseVersion) dSet(d, "runtime", "tkg") // Only one supported + dSet(d, "vdc_id", cluster.VdcId) + dSet(d, "network_id", cluster.NetworkId) + dSet(d, "cpi_version", cluster.CpiVersion) + dSet(d, "csi_version", cluster.CsiVersion) + dSet(d, "capvcd_version", cluster.CapvcdVersion) + dSet(d, "kubernetes_version", cluster.KubernetesVersion) + dSet(d, "tkg_product_version", cluster.TkgVersion) + dSet(d, "pods_cidr", cluster.PodCidr) + dSet(d, "services_cidr", cluster.ServiceCidr) + dSet(d, "ova_id", cluster.KubernetesTemplateOvaId) + dSet(d, "ssh_public_key", cluster.SshPublicKey) + dSet(d, "virtual_ip_subnet", cluster.VirtualIpSubnet) + dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) + dSet(d, "node_health_check", cluster.NodeHealthCheck) - // TODO CSE: Why is this a slice??? - if len(cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations) == 0 { - return nil, fmt.Errorf("expected at least one Organization in cluster '%s'", d.Id()) - } - - // This field is optional, as it can take the value from the VCD client if _, ok := d.GetOk("org"); ok { - dSet(d, "org", cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations[0].Name) - } - adminOrg, err := vcdClient.GetAdminOrgByName(cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations[0].Name) - if err != nil { - return nil, fmt.Errorf("could not get Organization with name %s: %s", cluster.Capvcd.Status.Capvcd.VcdProperties.Organizations[0].Name, err) - } - - // TODO CSE: Why is this a slice??? - if len(cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs) == 0 { - return nil, fmt.Errorf("expected at least one VDC in cluster '%s': %s", d.Id(), err) - } - - vdc, err := adminOrg.GetVDCByName(cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].Name, false) - if err != nil { - return nil, fmt.Errorf("could not get VDC with name %s: %s", cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].Name, err) - } - dSet(d, "vdc_id", vdc.Vdc.ID) - network, err := vdc.GetOrgVdcNetworkByName(cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, false) - if err != nil { - return nil, fmt.Errorf("could not get Org VDC Network with name %s: %s", cluster.Capvcd.Status.Capvcd.VcdProperties.OrgVdcs[0].OvdcNetworkName, err) + // This field is optional, as it can take the value from the VCD client + dSet(d, "org", cluster.OrganizationId) } - dSet(d, "network_id", network.OrgVDCNetwork.ID) if _, ok := d.GetOk("owner"); ok { // This field is optional, as it can take the value from the VCD client - if cluster.Owner == "" { - return nil, fmt.Errorf("could not retrieve Owner information from Cluster") - } dSet(d, "owner", cluster.Owner) } @@ -793,151 +663,52 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste dSet(d, "api_token_file", "******") } - bindings := make([]string, len(cluster.Capvcd.Status.Capvcd.ClusterResourceSetBindings)) - for i, binding := range cluster.Capvcd.Status.Capvcd.ClusterResourceSetBindings { - bindings[i] = binding.Name - } - err = d.Set("cluster_resource_set_bindings", bindings) + err := d.Set("cluster_resource_set_bindings", cluster.ClusterResourceSetBindings) if err != nil { - return nil, fmt.Errorf("could not set 'cluster_resource_set_bindings': %s", err) - } - - dSet(d, "cpi_version", cluster.Capvcd.Status.Cpi.Version) - dSet(d, "csi_version", cluster.Capvcd.Status.Csi.Version) - dSet(d, "capvcd_version", cluster.Capvcd.Status.Capvcd.CapvcdVersion) - dSet(d, "kubernetes_version", cluster.Capvcd.Status.Capvcd.Upgrade.Current.KubernetesVersion) - dSet(d, "tkg_product_version", cluster.Capvcd.Status.Capvcd.Upgrade.Current.TkgVersion) - if len(cluster.Capvcd.Status.Capvcd.K8sNetwork.Pods.CidrBlocks) == 0 { - return nil, fmt.Errorf("expected at least one Pod CIDR block in cluster '%s': %s", d.Id(), err) - } - dSet(d, "pods_cidr", cluster.Capvcd.Status.Capvcd.K8sNetwork.Pods.CidrBlocks[0]) - if len(cluster.Capvcd.Status.Capvcd.K8sNetwork.Services.CidrBlocks) == 0 { - return nil, fmt.Errorf("expected at least one Services CIDR block in cluster '%s': %s", d.Id(), err) - } - dSet(d, "services_cidr", cluster.Capvcd.Status.Capvcd.K8sNetwork.Services.CidrBlocks[0]) - - nodePoolBlocks := make([]map[string]interface{}, len(cluster.Capvcd.Status.Capvcd.NodePool)-1) - controlPlaneBlocks := make([]map[string]interface{}, 1) - nameToIds := map[string]string{"": ""} // Initialize with empty value - for i, nodePool := range cluster.Capvcd.Status.Capvcd.NodePool { - block := map[string]interface{}{} - block["machine_count"] = nodePool.DesiredReplicas - // TODO: This needs a refactoring - if nodePool.PlacementPolicy != "" { - policies, err := vcdClient.GetAllVdcComputePoliciesV2(url.Values{ - "filter": []string{fmt.Sprintf("name==%s", nodePool.PlacementPolicy)}, - }) - if err != nil { - return nil, err // TODO - } - nameToIds[nodePool.PlacementPolicy] = policies[0].VdcComputePolicyV2.ID - } - if nodePool.SizingPolicy != "" { - policies, err := vcdClient.GetAllVdcComputePoliciesV2(url.Values{ - "filter": []string{fmt.Sprintf("name==%s", nodePool.SizingPolicy)}, - }) - if err != nil { - return nil, err // TODO - } - nameToIds[nodePool.SizingPolicy] = policies[0].VdcComputePolicyV2.ID - } - if nodePool.StorageProfile != "" { - ref, err := vdc.FindStorageProfileReference(nodePool.StorageProfile) - if err != nil { - return nil, fmt.Errorf("could not get Default Storage Class options from 'spec.vcdKe.defaultStorageClassOptions': %s", err) // TODO - } - nameToIds[nodePool.StorageProfile] = ref.ID - } - block["sizing_policy_id"] = nameToIds[nodePool.SizingPolicy] - if nodePool.NvidiaGpuEnabled { // TODO: Be sure this is a worker node pool and not control plane (doesnt have this attr) - block["vgpu_policy_id"] = nameToIds[nodePool.PlacementPolicy] // It's a placement policy here - } else { - block["placement_policy_id"] = nameToIds[nodePool.PlacementPolicy] - } - block["storage_profile_id"] = nameToIds[nodePool.StorageProfile] - block["disk_size_gi"] = nodePool.DiskSizeMb / 1024 - - if strings.HasSuffix(nodePool.Name, "-control-plane-node-pool") { - // Control Plane - if len(cluster.Capvcd.Status.Capvcd.ClusterApiStatus.ApiEndpoints) == 0 { - return nil, fmt.Errorf("could not retrieve Cluster IP") - } - block["ip"] = cluster.Capvcd.Status.Capvcd.ClusterApiStatus.ApiEndpoints[0].Host - controlPlaneBlocks[0] = block - } else { - // Worker node - block["name"] = nodePool.Name + return nil, err + } - nodePoolBlocks[i] = block + nodePoolBlocks := make([]map[string]interface{}, len(cluster.WorkerPools)) + for i, nodePool := range cluster.WorkerPools { + nodePoolBlocks[i] = map[string]interface{}{ + "machine_count": nodePool.MachineCount, } } err = d.Set("node_pool", nodePoolBlocks) if err != nil { - return nil, fmt.Errorf("could not set 'node_pool' pools: %s", err) - } - err = d.Set("control_plane", controlPlaneBlocks) - if err != nil { - return nil, fmt.Errorf("could not set 'control_plane': %s", err) + return nil, err } - defaultStorageClassOptions, err := traverseMapAndGet[map[string]interface{}](rde.DefinedEntity.Entity, "spec.vcdKe.defaultStorageClassOptions") - var defaultStorageClass []map[string]interface{} - if err != nil { - if !strings.Contains(err.Error(), "does not exist in input map") { - return nil, fmt.Errorf("could not get Default Storage Class options from 'spec.vcdKe.defaultStorageClassOptions': %s", err) - } - // The object does not exist, hence the cluster does not use a default storage class - } else { - reclaimPolicy := "retain" - if defaultStorageClassOptions["useDeleteReclaimPolicy"].(bool) { - reclaimPolicy = "delete" - } - - ref, err := vdc.FindStorageProfileReference(defaultStorageClassOptions["vcdStorageProfileName"].(string)) - if err != nil { - return nil, fmt.Errorf("could not get Default Storage Class options from 'spec.vcdKe.defaultStorageClassOptions': %s", err) // TODO - } - // defaultStorageClassOptions["vcdStorageProfileName"] - - defaultStorageClass = append(defaultStorageClass, map[string]interface{}{ - "storage_profile_id": ref.ID, - "name": defaultStorageClassOptions["k8sStorageClassName"], - "reclaim_policy": reclaimPolicy, - "filesystem": defaultStorageClassOptions["filesystem"], - }) - - } - err = d.Set("default_storage_class", defaultStorageClass) + err = d.Set("control_plane", []map[string]interface{}{ + { + "machine_count": cluster.ControlPlane.MachineCount, + }, + }) if err != nil { - return nil, fmt.Errorf("could not save 'default_storage_class': %s", err) + return nil, err } - state, err := traverseMapAndGet[string](rde.DefinedEntity.Entity, "status.vcdKe.state") + err = d.Set("default_storage_class", map[string]interface{}{ + "storage_profile_id": cluster.DefaultStorageClass.StorageProfileId, + "name": cluster.DefaultStorageClass.Name, + "reclaim_policy": cluster.DefaultStorageClass.ReclaimPolicy, + "filesystem": cluster.DefaultStorageClass.Filesystem, + }) if err != nil { - return nil, fmt.Errorf("could not read 'status.vcdKe.state' from Kubernetes cluster with ID '%s': %s", d.Id(), err) + return nil, err } - dSet(d, "state", state) - - if state == "provisioned" { - behaviorVersion := supportedCseVersions[cseVersion][2] - // This can only be done if the cluster is in 'provisioned' state - invocationResult := map[string]interface{}{} - err := rde.InvokeBehaviorAndMarshal(fmt.Sprintf("urn:vcloud:behavior-interface:getFullEntity:cse:capvcd:%s", behaviorVersion), types.BehaviorInvocation{}, &invocationResult) - if err != nil { - return nil, fmt.Errorf("could not invoke the behavior to obtain the Kubeconfig for the Kubernetes cluster with ID '%s': %s", d.Id(), err) - } + dSet(d, "state", cluster.State) - kubeconfig, err := traverseMapAndGet[string](invocationResult, "entity.status.capvcd.private.kubeConfig") + if cluster.State == "provisioned" { + kubeconfig, err := cluster.GetKubeconfig() if err != nil { - return nil, fmt.Errorf("could not retrieve Kubeconfig for Kubernetes cluster with ID '%s': %s", d.Id(), err) + return nil, fmt.Errorf("error getting Kubeconfig for Kubernetes cluster with ID '%s': %s", cluster.ID, err) } dSet(d, "kubeconfig", kubeconfig) } else { - warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), state)) + warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), cluster.State)) } - // TODO: Missing ova_id, ssh_public_key, virtual_ip_subnet, auto_repair_on_errors, node_health_check, persistent_volumes - return warnings, nil } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 35d59ef6d..4d5034a4b 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "os" - "reflect" "strings" "testing" "time" @@ -203,62 +202,3 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { operations_timeout_minutes = 0 } ` - -// Test_getTkgVersionBundleFromVAppTemplateName requires connectivity with GitHub (that's why it's not labeled as a unit test), -// as it fetches the 'tkg_versions.json' file. -// This tests asserts that getTkgVersionBundleFromVAppTemplateName works correctly, retrieving the correct TKG versions from that file. -func Test_getTkgVersionBundleFromVAppTemplateName(t *testing.T) { - vcdClient := createSystemTemporaryVCDConnection() - tests := []struct { - name string - ovaName string - want tkgVersionBundle - wantErr string - }{ - { - name: "wrong ova name", - ovaName: "randomOVA", - want: tkgVersionBundle{}, - wantErr: "the vApp Template 'randomOVA' is not a Kubernetes template OVA", - }, - { - name: "not supported ova", - ovaName: "ubuntu-2004-kube-v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st", - want: tkgVersionBundle{}, - wantErr: "the Kubernetes OVA 'v9.99.9+vmware.9-tkg.9-b8c57a6c8c98d227f74e7b1a9eef27st' is not supported", - }, - { - name: "not supported photon ova", - ovaName: "photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46", - want: tkgVersionBundle{}, - wantErr: "the vApp Template 'photon-3-kube-v1.27.5+vmware.1-tkg.1-cac282289bb29b217b808a2b9b0c0c46' uses Photon, and it is not supported", - }, - { - name: "supported ova", - ovaName: "ubuntu-2004-kube-v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8", - want: tkgVersionBundle{ - EtcdVersion: "v3.5.6_vmware.20", - CoreDnsVersion: "v1.9.3_vmware.16", - TkgVersion: "v2.3.1", - TkrVersion: "v1.26.8---vmware.1-tkg.1", - KubernetesVersion: "v1.26.8+vmware.1", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getTkgVersionBundleFromVAppTemplateName(vcdClient, tt.ovaName) - if err != nil { - if tt.wantErr == "" { - t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got error = %v, but should have not failed", err) - } - if err.Error() != tt.wantErr { - t.Fatalf("getTkgVersionBundleFromVAppTemplateName() error = %v, wantErr = %v", err, tt.wantErr) - } - } - if !reflect.DeepEqual(got, tt.want) { - t.Fatalf("getTkgVersionBundleFromVAppTemplateName() got = %v, want = %v", got, tt.want) - } - }) - } -} diff --git a/vcd/structure.go b/vcd/structure.go index de5486e09..185577f8c 100644 --- a/vcd/structure.go +++ b/vcd/structure.go @@ -13,63 +13,6 @@ import ( "github.com/vmware/go-vcloud-director/v2/types/v56" ) -// getKeys retrieves all the keys from the given map and returns them as a slice -func getKeys[K comparable, V any](input map[K]V) []K { - result := make([]K, len(input)) - i := 0 - for k := range input { - result[i] = k - i++ - } - return result -} - -// traverseMapAndGet traverses the input interface{}, which should be a map of maps, by following the path specified as -// "keyA.keyB.keyC.keyD", doing something similar to, visually speaking, map["keyA"]["keyB"]["keyC"]["keyD"], or in other words, -// it goes inside every inner map, which are inside the initial map, until the given path is finished. -// The final value, "keyD" in the same example, should be of type ResultType, which is a generic type requested during the call -// to this function. -func traverseMapAndGet[ResultType any](input interface{}, path string) (ResultType, error) { - var nothing ResultType - if input == nil { - return nothing, fmt.Errorf("the input is nil") - } - inputMap, ok := input.(map[string]interface{}) - if !ok { - return nothing, fmt.Errorf("the input is a %T, not a map[string]interface{}", input) - } - if len(inputMap) == 0 { - return nothing, fmt.Errorf("the map is empty") - } - pathUnits := strings.Split(path, ".") - completed := false - i := 0 - var result interface{} - for !completed { - subPath := pathUnits[i] - traversed, ok := inputMap[subPath] - if !ok { - return nothing, fmt.Errorf("key '%s' does not exist in input map", subPath) - } - if i < len(pathUnits)-1 { - traversedMap, ok := traversed.(map[string]interface{}) - if !ok { - return nothing, fmt.Errorf("key '%s' is a %T, not a map, but there are still %d paths to explore", subPath, traversed, len(pathUnits)-(i+1)) - } - inputMap = traversedMap - } else { - completed = true - result = traversed - } - i++ - } - resultTyped, ok := result.(ResultType) - if !ok { - return nothing, fmt.Errorf("could not convert obtained type %T to requested %T", result, nothing) - } - return resultTyped, nil -} - func expandIPRange(configured []interface{}) (types.IPRanges, error) { ipRange := make([]*types.IPRange, 0, len(configured)) diff --git a/vcd/structure_unit_test.go b/vcd/structure_unit_test.go index 5bcf3b338..735b7ae9e 100644 --- a/vcd/structure_unit_test.go +++ b/vcd/structure_unit_test.go @@ -3,7 +3,6 @@ package vcd import ( - "reflect" "testing" ) @@ -165,143 +164,3 @@ func Test_areMarshaledJsonEqual(t *testing.T) { }) } } - -// Test_traverseMapAndGet tests traverseMapAndGet function -func Test_traverseMapAndGet(t *testing.T) { - type args struct { - input interface{} - path string - } - tests := []struct { - name string - args args - wantType string - want interface{} - wantErr string - }{ - { - name: "input is nil", - args: args{ - input: nil, - }, - wantErr: "the input is nil", - }, - { - name: "input is not a map", - args: args{ - input: "error", - }, - wantErr: "the input is a string, not a map[string]interface{}", - }, - { - name: "map is empty", - args: args{ - input: map[string]interface{}{}, - }, - wantErr: "the map is empty", - }, - { - name: "map does not have key", - args: args{ - input: map[string]interface{}{ - "keyA": "value", - }, - path: "keyB", - }, - wantErr: "key 'keyB' does not exist in input map", - }, - { - name: "map has a single simple key", - args: args{ - input: map[string]interface{}{ - "keyA": "value", - }, - path: "keyA", - }, - wantType: "string", - want: "value", - }, - { - name: "map has a single complex key", - args: args{ - input: map[string]interface{}{ - "keyA": map[string]interface{}{ - "keyB": "value", - }, - }, - path: "keyA", - }, - wantType: "map", - want: map[string]interface{}{ - "keyB": "value", - }, - }, - { - name: "map has a complex structure", - args: args{ - input: map[string]interface{}{ - "keyA": map[string]interface{}{ - "keyB": map[string]interface{}{ - "keyC": "value", - }, - }, - }, - path: "keyA.keyB.keyC", - }, - wantType: "string", - want: "value", - }, - { - name: "requested path is deeper than the map structure", - args: args{ - input: map[string]interface{}{ - "keyA": map[string]interface{}{ - "keyB": map[string]interface{}{ - "keyC": "value", - }, - }, - }, - path: "keyA.keyB.keyC.keyD", - }, - wantErr: "key 'keyC' is a string, not a map, but there are still 1 paths to explore", - }, - { - name: "obtained value does not correspond to the desired type", - args: args{ - input: map[string]interface{}{ - "keyA": map[string]interface{}{ - "keyB": map[string]interface{}{ - "keyC": map[string]interface{}{}, - }, - }, - }, - path: "keyA.keyB.keyC", - }, - wantType: "string", - wantErr: "could not convert obtained type map[string]interface {} to requested string", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var got interface{} - var err error - if tt.wantType == "string" { - got, err = traverseMapAndGet[string](tt.args.input, tt.args.path) - } else if tt.wantType == "map" { - got, err = traverseMapAndGet[map[string]interface{}](tt.args.input, tt.args.path) - } else { - t.Fatalf("wantType type not used in this test") - } - - if err != nil { - if tt.wantErr != err.Error() { - t.Errorf("traverseMapAndGet() error = %v, wantErr = %v", err, tt.wantErr) - } - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("traverseMapAndGet() got = %v, want %v", got, tt.want) - } - }) - } -} From d071e50d5ae508c5d7b8df3ee75c9a8ec042b676 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 2 Feb 2024 14:01:25 +0100 Subject: [PATCH 057/156] Refactor with latest SDK changes Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 49 ---------------------- 1 file changed, 49 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index c9fc1580e..eb7e62d17 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -324,55 +324,6 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Computed: true, Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", }, - "persistent_volumes": { - Type: schema.TypeSet, - Computed: true, - Description: "A set of persistent volumes that are present in the cluster, only available when a 'default_storage_class' was provided during cluster creation", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Computed: true, - Type: schema.TypeString, - Description: "The name of the persistent volume", - }, - "status": { - Computed: true, - Type: schema.TypeString, - Description: "The status of the persistent volume", - }, - "shared": { - Computed: true, - Type: schema.TypeString, - Description: "Whether the persistent volume is shared or not", - }, - "attached_node_count": { - Computed: true, - Type: schema.TypeInt, - Description: "How many nodes are consuming the persistent volume", - }, - "iops": { - Computed: true, - Type: schema.TypeInt, - Description: "I/O operations per second for the persistent volume", - }, - "size": { - Computed: true, - Type: schema.TypeInt, - Description: "Size of the persistent volume", - }, - "storage_profile": { - Computed: true, - Type: schema.TypeString, - Description: "Storage profile name of the persistent volume", - }, - "owner": { - Computed: true, - Type: schema.TypeString, - Description: "Owner of the persistent volume", - }, - }, - }, - }, }, } } From c5e5c80f4948d0f764d75cf77499168d566155d1 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 2 Feb 2024 15:52:47 +0100 Subject: [PATCH 058/156] Implement data source Signed-off-by: abarreiro --- vcd/datasource_vcd_cse_kubernetes_cluster.go | 277 +++++++++++++++++++ vcd/provider.go | 1 + vcd/resource_vcd_cse_kubernetes_cluster.go | 29 +- 3 files changed, 295 insertions(+), 12 deletions(-) create mode 100644 vcd/datasource_vcd_cse_kubernetes_cluster.go diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go new file mode 100644 index 000000000..6c97b8ddf --- /dev/null +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -0,0 +1,277 @@ +package vcd + +import ( + "context" + _ "embed" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func datasourceVcdCseKubernetesCluster() *schema.Resource { + return &schema.Resource{ + ReadContext: datasourceVcdCseKubernetesRead, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the Kubernetes cluster to read", + }, + "cse_version": { + Type: schema.TypeString, + Computed: true, + Description: "The CSE version used by the cluster", + }, + "runtime": { + Type: schema.TypeString, + Computed: true, + Description: "The Kubernetes runtime used by the cluster", + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of the Kubernetes cluster", + }, + "ova_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", + }, + "org_id": { + Type: schema.TypeString, + Computed: true, + Description: "The name of organization that owns this Kubernetes cluster", + }, + "vdc_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the VDC that hosts the Kubernetes cluster", + }, + "network_id": { + Type: schema.TypeString, + Computed: true, + Description: "The ID of the network that the Kubernetes cluster uses", + }, + "owner": { + Type: schema.TypeString, + Computed: true, + Description: "The user that created the cluster", + }, + "ssh_public_key": { + Type: schema.TypeString, + Computed: true, + Description: "The SSH public key used to login into the cluster nodes", + }, + "control_plane": { + Type: schema.TypeList, + Computed: true, + Description: "Defines the control plane for the cluster", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of nodes that the control plane has", + }, + "disk_size_gi": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk size, in Gibibytes (Gi), of the control plane nodes", + }, + "sizing_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Sizing policy of the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Placement policy of the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Computed: true, + Description: "Storage profile of the control plane nodes", + }, + "ip": { + Type: schema.TypeString, + Computed: true, + Description: "IP of the control plane", + }, + }, + }, + }, + "node_pool": { + Type: schema.TypeList, + Computed: true, + Description: "Defines a node pool for the cluster", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: "The name of this node pool", + }, + "machine_count": { + Type: schema.TypeInt, + Computed: true, + Description: "The number of nodes that this node pool has", + }, + "disk_size_gi": { + Type: schema.TypeInt, + Computed: true, + Description: "Disk size, in Gibibytes (Gi), of the control plane nodes", + }, + "sizing_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Sizing policy of the control plane nodes", + }, + "placement_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "VM Placement policy of the control plane nodes", + }, + "vgpu_policy_id": { + Type: schema.TypeString, + Computed: true, + Description: "vGPU policy of the control plane nodes", + }, + "storage_profile_id": { + Type: schema.TypeString, + Computed: true, + Description: "Storage profile of the control plane nodes", + }, + }, + }, + }, + "default_storage_class": { + Type: schema.TypeList, + Computed: true, + Description: "The default storage class of the cluster, if any", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_profile_id": { + Computed: true, + Type: schema.TypeString, + Description: "ID of the storage profile used by the storage class", + }, + "name": { + Computed: true, + Type: schema.TypeString, + Description: "Name of the storage class", + }, + "reclaim_policy": { + Computed: true, + Type: schema.TypeString, + Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", + }, + "filesystem": { + Computed: true, + Type: schema.TypeString, + Description: "Filesystem of the storage class, can be either 'ext4' or 'xfs'", + }, + }, + }, + }, + "pods_cidr": { + Type: schema.TypeString, + Computed: true, + Description: "CIDR that the Kubernetes pods use", + }, + "services_cidr": { + Type: schema.TypeString, + Computed: true, + Description: "CIDR that the Kubernetes services use", + }, + "virtual_ip_subnet": { + Type: schema.TypeString, + Computed: true, + Description: "Virtual IP subnet of the cluster", + }, + "auto_repair_on_errors": { + Type: schema.TypeBool, + Computed: true, + Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", + }, + "node_health_check": { + Type: schema.TypeBool, + Computed: true, + Description: "After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules", + }, + "kubernetes_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of Kubernetes installed in this cluster", + }, + "tkg_product_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of TKG installed in this cluster", + }, + "capvcd_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of CAPVCD used by this cluster", + }, + "cluster_resource_set_bindings": { + Type: schema.TypeSet, + Computed: true, + Description: "The cluster resource set bindings of this cluster", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "cpi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Cloud Provider Interface used by this cluster", + }, + "csi_version": { + Type: schema.TypeString, + Computed: true, + Description: "The version of the Container Storage Interface used by this cluster", + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: "The state of the cluster, can be 'provisioning', 'provisioned', 'deleting' or 'error'. Useful to check whether the Kubernetes cluster is in a stable status", + }, + "kubeconfig": { + Type: schema.TypeString, + Computed: true, + Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", + }, + }, + } +} + +func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + vcdClient := meta.(*VCDClient) + org, err := vcdClient.GetOrgFromResource(d) + if err != nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Get("cluster_id"), err) + } + + cluster, err := org.CseGetKubernetesClusterById(d.Get("cluster_id").(string)) + if err != nil { + return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Get("cluster_id"), err) + } + + warns, err := saveClusterDataToState(d, cluster, "") + if err != nil { + return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) + } + for _, warning := range warns { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: warning.Error(), + }) + } + + if len(diags) > 0 { + return diags + } + return nil +} diff --git a/vcd/provider.go b/vcd/provider.go index 24fb80c9f..c2ac313f0 100644 --- a/vcd/provider.go +++ b/vcd/provider.go @@ -153,6 +153,7 @@ var globalDataSourceMap = map[string]*schema.Resource{ "vcd_nsxt_edgegateway_dns": datasourceVcdNsxtEdgeGatewayDns(), // 3.11 "vcd_vgpu_profile": datasourceVcdVgpuProfile(), // 3.11 "vcd_vm_vgpu_policy": datasourceVcdVmVgpuPolicy(), // 3.11 + "vcd_cse_kubernetes_cluster": datasourceVcdCseKubernetesCluster(), // 3.12 } var globalResourceMap = map[string]*schema.Resource{ diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index eb7e62d17..05b8e285e 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -448,7 +448,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, cluster) + warns, err := saveClusterDataToState(d, cluster, org.Org.Name) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } @@ -562,7 +562,7 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", clusterId, err) } - warns, err := saveClusterDataToState(d, cluster) + warns, err := saveClusterDataToState(d, cluster, org.Org.Name) if err != nil { return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", cluster.ID, err) } @@ -576,7 +576,7 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m // saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. // Returns a slice of warnings first and an error second. -func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetesCluster) ([]error, error) { +func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetesCluster, orgName string) ([]error, error) { var warnings []error d.SetId(cluster.ID) @@ -598,9 +598,20 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) dSet(d, "node_health_check", cluster.NodeHealthCheck) - if _, ok := d.GetOk("org"); ok { - // This field is optional, as it can take the value from the VCD client - dSet(d, "org", cluster.OrganizationId) + if orgName == "" { + // Data source + dSet(d, "org_id", cluster.OrganizationId) + } else { + // Resource + if _, ok := d.GetOk("org"); ok { + // This field is optional, as it can take the value from the VCD client + dSet(d, "org", orgName) + } + if _, ok := d.GetOk("api_token_file"); !ok { + // During imports, this field is impossible to get, so we set an artificial value, as this argument + // is required at runtime + dSet(d, "api_token_file", "******") + } } if _, ok := d.GetOk("owner"); ok { @@ -608,12 +619,6 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "owner", cluster.Owner) } - if _, ok := d.GetOk("api_token_file"); !ok { - // During imports, this field is impossible to get, so we set an artificial value, as this argument - // is required at runtime - dSet(d, "api_token_file", "******") - } - err := d.Set("cluster_resource_set_bindings", cluster.ClusterResourceSetBindings) if err != nil { return nil, err From c6fa38534974a23fad1fb4d93999f81948bd5d47 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 2 Feb 2024 16:15:51 +0100 Subject: [PATCH 059/156] Docs Signed-off-by: abarreiro --- .changes/v3.12.0/1195-features.md | 4 +- .../d/cse_kubernetes_cluster.html.markdown | 38 +++++++++++++++++++ .../r/cse_kubernetes_cluster.html.markdown | 4 ++ website/vcd.erb | 8 +++- 4 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 website/docs/d/cse_kubernetes_cluster.html.markdown diff --git a/.changes/v3.12.0/1195-features.md b/.changes/v3.12.0/1195-features.md index 01af870a5..c890e0a40 100644 --- a/.changes/v3.12.0/1195-features.md +++ b/.changes/v3.12.0/1195-features.md @@ -1,4 +1,4 @@ -* **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension +* **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension 4.2 installed and running [GH-1195] -* **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension +* **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension 4.2 installed and running [GH-1195] \ No newline at end of file diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown new file mode 100644 index 000000000..ebea884ed --- /dev/null +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -0,0 +1,38 @@ +--- +layout: "vcd" +page_title: "VMware Cloud Director: vcd_cse_kubernetes_cluster" +sidebar_current: "docs-vcd-data-source-cse-kubernetes-cluster" +description: |- + Provides a resource to read Kubernetes clusters from VMware Cloud Director with Container Service Extension installed and running. +--- + +# vcd\_cse\_kubernetes\_cluster + +Provides a data source to read Kubernetes clusters in VMware Cloud Director with Container Service Extension (CSE) installed and running. + +Supported in provider *v3.12+* + +Supports the following **Container Service Extension** versions: + +* 4.2 + +-> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) + +## Example Usage + +```hcl +data "vcd_cse_kubernetes_cluster" "my_cluster" { + cluster_id = "urn:vcloud:entity:vmware:capvcdCluster:e8e82bcc-50a1-484f-9dd0-20965ab3e865" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `cluster_id` - (Required) Unequivocally identifies a cluster in VCD + +## Attribute Reference + +All attributes defined in [vcd_cse_kubernetes_cluster](/providers/vmware/vcd/latest/docs/resources/cse_kubernetes_cluster) resource are supported. +Also, the resource arguments are also available as read-only attributes. diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 739c6b78d..a44a1c768 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -12,6 +12,10 @@ Provides a resource to manage Kubernetes clusters in VMware Cloud Director with Supported in provider *v3.12+* +Supports the following **Container Service Extension** versions: + +* 4.2 + -> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) ## Example Usage diff --git a/website/vcd.erb b/website/vcd.erb index 9d64957ed..e56791e55 100644 --- a/website/vcd.erb +++ b/website/vcd.erb @@ -407,7 +407,10 @@ vcd_vgpu_profile > - vcd_vm_vgpu_policy + vcd_vm_vgpu_policy + + > + vcd_cse_kubernetes_cluster @@ -723,6 +726,9 @@ > vcd_vm_vgpu_policy + > + vcd_cse_kubernetes_cluster + From 46a4ae5b81a28a66a0f4259fa89674c33e037e41 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 10:59:36 +0100 Subject: [PATCH 060/156] Fix cse_version field Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 41 ++++++++++------------ 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 05b8e285e..6b99ea40b 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -5,6 +5,7 @@ import ( _ "embed" "fmt" "github.com/hashicorp/go-cty/cty" + semver "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -27,7 +28,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Optional: true, // Required, but validated at runtime ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"4.2"}, false), + ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0"}, false), Description: "The CSE version to use", }, "runtime": { @@ -276,7 +277,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 60, - Description: "The time, in minutes, to wait for the cluster operations to be successfully completed. For example, during cluster creation/update, it should be in `provisioned`" + + Description: "The time, in minutes, to wait for the cluster operations to be successfully completed. For example, during cluster creation, it should be in `provisioned`" + "state before the timeout is reached, otherwise the operation will return an error. For cluster deletion, this timeout" + "specifies the time to wait until the cluster is completely deleted. Setting this argument to `0` means to wait indefinitely", ValidateDiagFunc: minimumValue(0, "timeout must be at least 0 (no timeout)"), @@ -366,6 +367,11 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diags } + cseVersion, err := semver.NewSemver(d.Get("cse_version").(string)) + if err != nil { + return diag.Errorf("the introduced 'cse_version=%s' is not valid: %s", d.Get("cse_version"), err) + } + vcdClient := meta.(*VCDClient) org, err := vcdClient.GetOrgFromResource(d) if err != nil { @@ -378,7 +384,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour VdcId: d.Get("vdc_id").(string), NetworkId: d.Get("network_id").(string), KubernetesTemplateOvaId: d.Get("ova_id").(string), - CseVersion: d.Get("cse_version").(string), + CseVersion: *cseVersion, ControlPlane: govcd.CseControlPlaneSettings{ MachineCount: d.Get("control_plane.0.machine_count").(int), DiskSizeGi: d.Get("control_plane.0.disk_size_gi").(int), @@ -498,7 +504,7 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, payload.WorkerPools = &workerPools } - err = cluster.Update(payload) + err = cluster.Update(payload, false) if err != nil { if cluster != nil { if cluster.State != "provisioned" { @@ -536,30 +542,19 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { resourceURI := strings.Split(d.Id(), ImportSeparator) - var clusterId, orgName, cseVersion string - switch len(resourceURI) { - case 3: // ImportSeparator != '.' - cseVersion = resourceURI[0] - orgName = resourceURI[1] - clusterId = resourceURI[2] - case 4: // ImportSeparator == '.' - cseVersion = fmt.Sprintf("%s.%s", resourceURI[0], resourceURI[1]) - orgName = resourceURI[2] - clusterId = resourceURI[3] - default: - return nil, fmt.Errorf("resource name must be specified as cse_version(two digits: major.minor).organization_name.cluster_id, but it was '%s'", d.Id()) - } - dSet(d, "cse_version", strings.Replace(cseVersion, "v", "", 1)) // We remove any 'v' prefix just in case, to avoid common errors if people use SemVer + if len(resourceURI) != 2 { + return nil, fmt.Errorf("resource name must be specified as organization_name.cluster_id, but it was '%s'", d.Id()) + } vcdClient := meta.(*VCDClient) - org, err := vcdClient.GetOrgByName(orgName) + org, err := vcdClient.GetOrgByName(resourceURI[0]) if err != nil { - return nil, fmt.Errorf("could not get Organization with name '%s': %s", orgName, err) + return nil, fmt.Errorf("could not get Organization with name '%s': %s", resourceURI[0], err) } - cluster, err := org.CseGetKubernetesClusterById(clusterId) + cluster, err := org.CseGetKubernetesClusterById(resourceURI[1]) if err != nil { - return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", clusterId, err) + return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", resourceURI[1], err) } warns, err := saveClusterDataToState(d, cluster, org.Org.Name) @@ -579,7 +574,6 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetesCluster, orgName string) ([]error, error) { var warnings []error - d.SetId(cluster.ID) dSet(d, "name", cluster.Name) dSet(d, "cse_version", cluster.CseVersion) dSet(d, "runtime", "tkg") // Only one supported @@ -666,5 +660,6 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), cluster.State)) } + d.SetId(cluster.ID) return warnings, nil } From abc38333c919f2fb44c25c0ba8b86266fbdb778b Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 11:00:20 +0100 Subject: [PATCH 061/156] Fix cse_version field Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 6b99ea40b..a41145bfc 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -28,8 +28,21 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Optional: true, // Required, but validated at runtime ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0"}, false), + ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0", "4.2.1"}, false), Description: "The CSE version to use", + DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { + // This custom diff function allows to correctly compare versions + oldVersion, err := semver.NewVersion(oldValue) + if err != nil { + return false + } + newVersion, err := semver.NewVersion(newValue) + if err != nil { + return false + } + return oldVersion.Equal(newVersion) + }, + DiffSuppressOnRefresh: true, }, "runtime": { Type: schema.TypeString, From 50b20cc3956f5dcd64da0ed42b26103b42ca705f Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 11:43:01 +0100 Subject: [PATCH 062/156] Add 4.2.0 Signed-off-by: abarreiro --- .../v4.2.0/entities/tkgmcluster.json.template | 29 + .../v4.2.0/entities/vcdkeconfig.json.template | 89 +++ .../3.11-cse-install-1-provider-config.tf | 34 + ...-cse-install-2-cse-server-prerequisites.tf | 281 +++++++ .../3.11-cse-install-3-cse-server-settings.tf | 46 ++ .../install/step1/terraform.tfvars.example | 61 ++ .../v4.2.0/install/step1/variables.tf | 164 ++++ .../3.11-cse-install-4-provider-config.tf | 42 + .../3.11-cse-install-5-infrastructure.tf | 449 +++++++++++ .../install/step2/3.11-cse-install-6-ovas.tf | 75 ++ .../3.11-cse-install-7-cse-server-init.tf | 105 +++ .../step2/3.11-cse-install-8-optionals.tf | 27 + .../install/step2/terraform.tfvars.example | 106 +++ .../v4.2.0/install/step2/variables.tf | 255 ++++++ .../schemas/capvcd-type-schema-v1.3.0.json | 472 ++++++++++++ .../vcdkeconfig-type-schema-v1.1.0.json | 727 ++++++++++++++++++ 16 files changed, 2962 insertions(+) create mode 100644 examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template create mode 100644 examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template create mode 100644 examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-3-cse-server-settings.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example create mode 100644 examples/container-service-extension/v4.2.0/install/step1/variables.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-5-infrastructure.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-6-ovas.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-7-cse-server-init.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-8-optionals.tf create mode 100644 examples/container-service-extension/v4.2.0/install/step2/terraform.tfvars.example create mode 100644 examples/container-service-extension/v4.2.0/install/step2/variables.tf create mode 100644 examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json create mode 100644 examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json diff --git a/examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template b/examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template new file mode 100644 index 000000000..88896c2c6 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template @@ -0,0 +1,29 @@ +{ + "apiVersion": "capvcd.vmware.com/v1.1", + "kind": "CAPVCDCluster", + "name": "${name}", + "metadata": { + "name": "${name}", + "orgName": "${org}", + "site": "${vcd_url}", + "virtualDataCenterName": "${vdc}" + }, + "spec": { + "vcdKe": { + "isVCDKECluster": true, + "markForDelete": ${delete}, + "forceDelete": ${force_delete}, + "autoRepairOnErrors": ${auto_repair_on_errors}, + "defaultStorageClassOptions": { + "filesystem": "${default_storage_class_filesystem}", + "k8sStorageClassName": "${default_storage_class_name}", + "vcdStorageProfileName": "${default_storage_class_storage_profile}", + "useDeleteReclaimPolicy": ${default_storage_class_delete_reclaim_policy} + }, + "secure": { + "apiToken": "${api_token}" + } + }, + "capiYaml": ${capi_yaml} + } +} diff --git a/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template b/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template new file mode 100644 index 000000000..9a3ef0523 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template @@ -0,0 +1,89 @@ +{ + "profiles": [ + { + "name": "production", + "active": true, + "serverConfig": { + "rdePollIntervalInMin": 1, + "heartbeatWatcherTimeoutInMin": 10, + "staleHeartbeatIntervalInMin": 30 + }, + "K8Config": { + "certificateAuthorities": [ + ${k8s_cluster_certificates} + ], + "cni": { + "name": "antrea", + "version": "" + }, + "cpi": { + "name": "cpi for cloud director", + "version": "${cpi_version}" + }, + "csi": [ + { + "name": "csi for cloud director", + "version": "${csi_version}" + } + ], + "mhc": { + "maxUnhealthyNodes": ${max_unhealthy_node_percentage}, + "nodeStartupTimeout": "${node_startup_timeout}", + "nodeNotReadyTimeout": "${node_not_ready_timeout}", + "nodeUnknownTimeout": "${node_unknown_timeout}" + }, + "rdeProjectorVersion": "0.6.0" + }, + "vcdConfig": { + "sysLogger": { + "host": "${syslog_host}", + "port": "${syslog_port}" + } + }, + "githubConfig": { + "githubPersonalAccessToken": "" + }, + "bootstrapClusterConfig": { + "capiEcosystem": { + "infraProvider": { + "name": "capvcd", + "version": "v${capvcd_version}", + "capvcdRde": { + "nss": "capvcdCluster", + "vendor": "vmware", + "version": "1.2.0" + } + }, + "coreCapiVersion": "v1.4.0", + "bootstrapProvider": { + "name": "CAPBK", + "version": "v1.4.0" + }, + "controlPlaneProvider": { + "name": "KCP", + "version": "v1.4.0" + }, + "certManagerVersion": "v1.11.1" + }, + "certificateAuthorities": [ + ${bootstrap_vm_certificates} + ], + "clusterctl": { + "version": "v1.4.0", + "clusterctlyaml": "" + }, + "dockerVersion": "", + "kindVersion": "v0.19.0", + "kindestNodeVersion": "v1.27.1", + "kubectlVersion": "", + "proxyConfig": { + "noProxy": "${no_proxy}", + "httpProxy": "${http_proxy}", + "httpsProxy": "${https_proxy}" + }, + "sizingPolicy": "${bootstrap_vm_sizing_policy}" + }, + "containerRegistryUrl": "${container_registry_url}" + } + ] +} diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf new file mode 100644 index 000000000..18e0ad66b --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf @@ -0,0 +1,34 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation, step 1: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# ------------------------------------------------------------------------------------------------------------ + +# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +terraform { + required_providers { + vcd = { + source = "vmware/vcd" + version = ">= 3.11" + } + } +} + +provider "vcd" { + url = "${var.vcd_url}/api" + user = var.administrator_user + password = var.administrator_password + auth_type = "integrated" + sysorg = var.administrator_org + org = var.administrator_org + allow_unverified_ssl = var.insecure_login + logging = true + logging_file = "cse_install_step1.log" +} diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf new file mode 100644 index 000000000..21195d235 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -0,0 +1,281 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation, step 1: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * This file contains the same resources created by the "Configure Settings for CSE Server > Set Up Prerequisites" step in the +# UI wizard. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# This is the RDE Interface required to create the "VCDKEConfig" RDE Type. +# This should not be changed. +resource "vcd_rde_interface" "vcdkeconfig_interface" { + vendor = "vmware" + nss = "VCDKEConfig" + version = "1.0.0" + name = "VCDKEConfig" +} + +# This resource will manage the "VCDKEConfig" RDE Type required to instantiate the CSE Server configuration. +# The schema URL points to the JSON schema hosted in the terraform-provider-vcd repository. +# This should not be changed. +resource "vcd_rde_type" "vcdkeconfig_type" { + vendor = "vmware" + nss = "VCDKEConfig" + version = "1.1.0" + name = "VCD-KE RDE Schema" + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json" + interface_ids = [vcd_rde_interface.vcdkeconfig_interface.id] +} + +# This RDE Interface exists in VCD, so it must be fetched with a RDE Interface data source. This RDE Interface is used to be +# able to create the "capvcdCluster" RDE Type. +# This should not be changed. +data "vcd_rde_interface" "kubernetes_interface" { + vendor = "vmware" + nss = "k8s" + version = "1.0.0" +} + +# This is the interface required to create the "CAPVCD" Runtime Defined Entity Type. +# This should not be changed. +resource "vcd_rde_interface" "cse_interface" { + vendor = "cse" + nss = "capvcd" + version = "1.0.0" + name = "cseInterface" +} + +# This RDE Interface behavior is required to be able to obtain the Kubeconfig and other important information. +# This should not be changed. +resource "vcd_rde_interface_behavior" "capvcd_behavior" { + rde_interface_id = vcd_rde_interface.cse_interface.id + name = "getFullEntity" + execution = { + "type" : "noop" + "id" : "getFullEntity" + } +} + +# This RDE Interface will create the "capvcdCluster" RDE Type required to create Kubernetes clusters. +# The schema URL points to the JSON schema hosted in the terraform-provider-vcd repository. +# This should not be changed. +resource "vcd_rde_type" "capvcdcluster_type" { + vendor = "vmware" + nss = "capvcdCluster" + version = "1.3.0" + name = "CAPVCD Cluster" + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json" + interface_ids = [data.vcd_rde_interface.kubernetes_interface.id] + + depends_on = [vcd_rde_interface_behavior.capvcd_behavior] # Interface Behaviors must be created before any RDE Type +} + +# Access Level for the CAPVCD Type Behavior +# This should not be changed. +resource "vcd_rde_type_behavior_acl" "capvcd_behavior_acl" { + rde_type_id = vcd_rde_type.capvcdcluster_type.id + behavior_id = vcd_rde_interface_behavior.capvcd_behavior.id + access_level_ids = ["urn:vcloud:accessLevel:FullControl"] +} + +# This role is having only the minimum set of rights required for the CSE Server to function. +# It is created in the "System" provider organization scope. +# This should not be changed. +resource "vcd_role" "cse_admin_role" { + org = var.administrator_org + name = "CSE Admin Role" + description = "Used for administrative purposes" + rights = [ + "API Tokens: Manage", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator Full access", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator View", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Full Access", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Modify", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator Full access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View" + ] +} + +# This will allow to have a user with a limited set of rights that can access the Provider area of VCD. +# This user will be used by the CSE Server, with an API token that must be created in Step 2. +# This should not be changed. +resource "vcd_org_user" "cse_admin" { + org = var.administrator_org + name = var.cse_admin_username + password = var.cse_admin_password + role = vcd_role.cse_admin_role.name +} + +# This resource manages the Rights Bundle required by tenants to create and consume Kubernetes clusters. +# This should not be changed. +resource "vcd_rights_bundle" "k8s_clusters_rights_bundle" { + name = "Kubernetes Clusters Rights Bundle" + description = "Rights bundle with required rights for managing Kubernetes clusters" + rights = [ + "API Tokens: Manage", + "Access All Organization VDCs", + "Catalog: View Published Catalogs", + "Certificate Library: Manage", + "Certificate Library: View", + "General: Administrator View", + "Organization vDC Gateway: Configure Load Balancer", + "Organization vDC Gateway: Configure NAT", + "Organization vDC Gateway: View Load Balancer", + "Organization vDC Gateway: View NAT", + "Organization vDC Gateway: View", + "Organization vDC Named Disk: Create", + "Organization vDC Named Disk: Edit Properties", + "Organization vDC Named Disk: View Properties", + "Organization vDC Shared Named Disk: Create", + "vApp: Allow All Extra Config", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator Full access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator View", + "vmware:tkgcluster: Full Access", + "vmware:tkgcluster: Modify", + "vmware:tkgcluster: View", + "vmware:tkgcluster: Administrator View", + "vmware:tkgcluster: Administrator Full access", + ] + publish_to_all_tenants = true # This needs to be published to all the Organizations +} + + +# With the Rights Bundle specified above, we need also a new Role for tenant users who want to create and manage +# Kubernetes clusters. +# This should not be changed. +resource "vcd_global_role" "k8s_cluster_author" { + name = "Kubernetes Cluster Author" + description = "Role to create Kubernetes clusters" + rights = [ + "API Tokens: Manage", + "Access All Organization VDCs", + "Catalog: Add vApp from My Cloud", + "Catalog: View Private and Shared Catalogs", + "Catalog: View Published Catalogs", + "Certificate Library: View", + "Organization vDC Compute Policy: View", + "Organization vDC Disk: View IOPS", + "Organization vDC Gateway: Configure Load Balancer", + "Organization vDC Gateway: Configure NAT", + "Organization vDC Gateway: View", + "Organization vDC Gateway: View Load Balancer", + "Organization vDC Gateway: View NAT", + "Organization vDC Named Disk: Create", + "Organization vDC Named Disk: Delete", + "Organization vDC Named Disk: Edit Properties", + "Organization vDC Named Disk: View Encryption Status", + "Organization vDC Named Disk: View Properties", + "Organization vDC Network: View Properties", + "Organization vDC Shared Named Disk: Create", + "Organization vDC: VM-VM Affinity Edit", + "Organization: View", + "UI Plugins: View", + "VAPP_VM_METADATA_TO_VCENTER", + "vApp Template / Media: Copy", + "vApp Template / Media: Edit", + "vApp Template / Media: View", + "vApp Template: Checkout", + "vApp: Allow All Extra Config", + "vApp: Copy", + "vApp: Create / Reconfigure", + "vApp: Delete", + "vApp: Download", + "vApp: Edit Properties", + "vApp: Edit VM CPU", + "vApp: Edit VM Compute Policy", + "vApp: Edit VM Hard Disk", + "vApp: Edit VM Memory", + "vApp: Edit VM Network", + "vApp: Edit VM Properties", + "vApp: Manage VM Password Settings", + "vApp: Power Operations", + "vApp: Sharing", + "vApp: Snapshot Operations", + "vApp: Upload", + "vApp: Use Console", + "vApp: VM Boot Options", + "vApp: View ACL", + "vApp: View VM and VM's Disks Encryption Status", + "vApp: View VM metrics", + "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator Full access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View", + "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Administrator View", + "vmware:tkgcluster: Full Access", + "vmware:tkgcluster: Modify", + "vmware:tkgcluster: View", + ] + + publish_to_all_tenants = true # This needs to be published to all the Organizations + + # As we use rights created by the CAPVCD Type created previously, we need to depend on it + depends_on = [ + vcd_rights_bundle.k8s_clusters_rights_bundle + ] +} + +# The VM Sizing Policies defined below MUST be created as they are specified in this HCL. These are the default +# policies required by CSE to create TKGm clusters. +# This should not be changed. +resource "vcd_vm_sizing_policy" "tkg_xl" { + name = "TKG extra-large" + description = "Extra-large VM sizing policy for a Kubernetes cluster node (8 CPU, 32GB memory)" + cpu { + count = 8 + } + memory { + size_in_mb = "32768" + } +} + +resource "vcd_vm_sizing_policy" "tkg_l" { + name = "TKG large" + description = "Large VM sizing policy for a Kubernetes cluster node (4 CPU, 16GB memory)" + cpu { + count = 4 + } + memory { + size_in_mb = "16384" + } +} + +resource "vcd_vm_sizing_policy" "tkg_m" { + name = "TKG medium" + description = "Medium VM sizing policy for a Kubernetes cluster node (2 CPU, 8GB memory)" + cpu { + count = 2 + } + memory { + size_in_mb = "8192" + } +} + +resource "vcd_vm_sizing_policy" "tkg_s" { + name = "TKG small" + description = "Small VM sizing policy for a Kubernetes cluster node (2 CPU, 4GB memory)" + cpu { + count = 2 + } + memory { + size_in_mb = "4048" + } +} diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-3-cse-server-settings.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-3-cse-server-settings.tf new file mode 100644 index 000000000..270000e5d --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-3-cse-server-settings.tf @@ -0,0 +1,46 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation, step 1: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * This file contains the same resources created by the "Configure Settings for CSE Server > Set Configuration Parameters" step in the +# UI wizard. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on the resource for context. +# ------------------------------------------------------------------------------------------------------------ + +# This RDE configures the CSE Server. It can be customised through variables, and the bootstrap_cluster_sizing_policy +# can also be changed. +# Other than that, this should be applied as it is. +resource "vcd_rde" "vcdkeconfig_instance" { + org = var.administrator_org + name = "vcdKeConfig" + rde_type_id = vcd_rde_type.vcdkeconfig_type.id + resolve = true + input_entity = templatefile(var.vcdkeconfig_template_filepath, { + capvcd_version = var.capvcd_version + cpi_version = var.cpi_version + csi_version = var.csi_version + rde_projector_version = var.rde_projector_version + github_personal_access_token = var.github_personal_access_token + bootstrap_vm_sizing_policy = vcd_vm_sizing_policy.tkg_s.name # References the small VM Sizing Policy, it can be changed. + no_proxy = var.no_proxy + http_proxy = var.http_proxy + https_proxy = var.https_proxy + syslog_host = var.syslog_host + syslog_port = var.syslog_port + node_startup_timeout = var.node_startup_timeout + node_not_ready_timeout = var.node_not_ready_timeout + node_unknown_timeout = var.node_unknown_timeout + max_unhealthy_node_percentage = var.max_unhealthy_node_percentage + container_registry_url = var.container_registry_url + k8s_cluster_certificates = join(",", var.k8s_cluster_certificates) + bootstrap_vm_certificates = join(",", var.bootstrap_vm_certificates) + }) +} diff --git a/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example b/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example new file mode 100644 index 000000000..b4a5f86f3 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example @@ -0,0 +1,61 @@ +# Change configuration to your needs and rename to 'terraform.tfvars' +# For more details about the variables specified here, please read the guide first: +# https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install + +# ------------------------------------------------ +# VCD Provider config +# ------------------------------------------------ + +vcd_url = "https://vcd.my-awesome-corp.com" +administrator_user = "administrator" +administrator_password = "change-me" +administrator_org = "System" +insecure_login = "false" + +# ------------------------------------------------ +# CSE Server Pre-requisites +# ------------------------------------------------ + +# This user will be created by the Terraform configuration, so you can +# customise what its username and password will be. +# This user will have an API token that must be consumed by the CSE Server. +cse_admin_username = "cse_admin" +cse_admin_password = "change-me" + +# ------------------------------------------------ +# CSE Server Settings +# ------------------------------------------------ + +# These are required to create the Runtime Defined Entity that will contain the CSE Server configuration (vcdKeConfig) +# To know more about the specific versions, please refer to the CSE documentation. +# The values set here correspond to CSE 4.2.0: +vcdkeconfig_template_filepath = "../../entities/vcdkeconfig.json.template" +capvcd_version = "1.2.0" +cpi_version = "1.5.0" +csi_version = "1.5.0" +rde_projector_version = "0.7.0" + +# Optional but recommended to avoid rate limiting when configuring the TKGm clusters. +# Create this one in https://github.com/settings/tokens +github_personal_access_token = "" + +# Node will be considered unhealthy and remediated if joining the cluster takes longer than this timeout (seconds) +node_startup_timeout = "900" +# A newly joined node will be considered unhealthy and remediated if it cannot host workloads for longer than this timeout (seconds) +node_not_ready_timeout = "300" +# A healthy node will be considered unhealthy and remediated if it is unreachable for longer than this timeout (seconds) +node_unknown_timeout = "300" +# Remediation will be suspended when the number of unhealthy nodes exceeds this percentage. +# (100% means that unhealthy nodes will always be remediated, while 0% means that unhealthy nodes will never be remediated) +max_unhealthy_node_percentage = 100 + +# URL from where TKG clusters will fetch container images +container_registry_url = "projects.registry.vmware.com" + +# Certificate(s) to allow the ephemeral VM (created during cluster creation) to authenticate with. +# For example, when pulling images from a container registry. (Copy and paste .cert file contents) +k8s_cluster_certificates = [] + +# Certificate(s) to allow clusters to authenticate with. +# For example, when pulling images from a container registry. (Copy and paste .cert file contents) +bootstrap_vm_certificates = [] diff --git a/examples/container-service-extension/v4.2.0/install/step1/variables.tf b/examples/container-service-extension/v4.2.0/install/step1/variables.tf new file mode 100644 index 000000000..123d9db5f --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step1/variables.tf @@ -0,0 +1,164 @@ +# ------------------------------------------------ +# Provider config +# ------------------------------------------------ + +variable "vcd_url" { + description = "The VCD URL (Example: 'https://vcd.my-company.com')" + type = string +} + +variable "insecure_login" { + description = "Allow unverified SSL connections when operating with VCD" + type = bool + default = false +} + +variable "administrator_user" { + description = "The VCD administrator user (Example: 'administrator')" + default = "administrator" + type = string +} + +variable "administrator_password" { + description = "The VCD administrator password" + type = string + sensitive = true +} + +variable "administrator_org" { + description = "The VCD administrator organization (Example: 'System')" + type = string + default = "System" +} + +# ------------------------------------------------ +# CSE Server Pre-requisites +# ------------------------------------------------ + +variable "cse_admin_username" { + description = "The CSE administrator user that will be created (Example: 'cse-admin')" + type = string +} + +variable "cse_admin_password" { + description = "The password to set for the CSE administrator to be created" + type = string + sensitive = true +} + +# ------------------------------------------------ +# CSE Server Settings +# ------------------------------------------------ + +variable "vcdkeconfig_template_filepath" { + type = string + description = "Path to the VCDKEConfig JSON template" + default = "../../entities/vcdkeconfig.json.template" +} + +variable "capvcd_version" { + type = string + description = "Version of CAPVCD" + default = "1.2.0" +} + +variable "cpi_version" { + type = string + description = "VCDKEConfig: Cloud Provider Interface version" + default = "1.5.0" +} + +variable "csi_version" { + type = string + description = "VCDKEConfig: Container Storage Interface version" + default = "1.5.0" +} + +variable "rde_projector_version" { + type = string + description = "VCDKEConfig: RDE Projector version" + default = "0.7.0" +} + +variable "github_personal_access_token" { + type = string + description = "VCDKEConfig: Prevents potential github rate limiting errors during cluster creation and deletion" + default = "" + sensitive = true +} + +variable "no_proxy" { + type = string + description = "VCDKEConfig: List of comma-separated domains without spaces" + default = "localhost,127.0.0.1,cluster.local,.svc" +} + +variable "http_proxy" { + type = string + description = "VCDKEConfig: Address of your HTTP proxy server" + default = "" +} + +variable "https_proxy" { + type = string + description = "VCDKEConfig: Address of your HTTPS proxy server" + default = "" +} + +variable "syslog_host" { + type = string + description = "VCDKEConfig: Domain for system logs" + default = "" +} + +variable "syslog_port" { + type = string + description = "VCDKEConfig: Port for system logs" + default = "" +} + +variable "node_startup_timeout" { + type = string + description = "VCDKEConfig: Node will be considered unhealthy and remediated if joining the cluster takes longer than this timeout (seconds)" + default = "900" +} + +variable "node_not_ready_timeout" { + type = string + description = "VCDKEConfig: A newly joined node will be considered unhealthy and remediated if it cannot host workloads for longer than this timeout (seconds)" + default = "300" +} + +variable "node_unknown_timeout" { + type = string + description = "VCDKEConfig: A healthy node will be considered unhealthy and remediated if it is unreachable for longer than this timeout (seconds)" + default = "300" +} + +variable "max_unhealthy_node_percentage" { + type = number + description = "VCDKEConfig: Remediation will be suspended when the number of unhealthy nodes exceeds this percentage. (100% means that unhealthy nodes will always be remediated, while 0% means that unhealthy nodes will never be remediated)" + default = 100 + validation { + condition = var.max_unhealthy_node_percentage >= 0 && var.max_unhealthy_node_percentage <= 100 + error_message = "The value must be a percentage, hence between 0 and 100" + } +} + +variable "container_registry_url" { + type = string + description = "VCDKEConfig: URL from where TKG clusters will fetch container images" + default = "projects.registry.vmware.com" +} + +variable "bootstrap_vm_certificates" { + type = list(string) + description = "VCDKEConfig: Certificate(s) to allow the ephemeral VM (created during cluster creation) to authenticate with. For example, when pulling images from a container registry. (Copy and paste .cert file contents)" + default = [] +} + +variable "k8s_cluster_certificates" { + type = list(string) + description = "VCDKEConfig: Certificate(s) to allow clusters to authenticate with. For example, when pulling images from a container registry. (Copy and paste .cert file contents)" + default = [] +} diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf new file mode 100644 index 000000000..3a573270d --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf @@ -0,0 +1,42 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation, step 2: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * The installation process is split into two steps as the first one creates a CSE admin user that needs to be +# used in a "provider" block in the second one. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# ------------------------------------------------------------------------------------------------------------ + +# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +terraform { + required_providers { + vcd = { + source = "vmware/vcd" + version = ">= 3.11" + } + time = { + source = "hashicorp/time" + version = "0.9.1" + } + local = { + source = "hashicorp/local" + version = "2.4.0" + } + } +} + +provider "vcd" { + url = "${var.vcd_url}/api" + user = var.administrator_user + password = var.administrator_password + auth_type = "integrated" + sysorg = var.administrator_org + org = var.administrator_org + allow_unverified_ssl = var.insecure_login + logging = true + logging_file = "cse_install_step2.log" +} diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-5-infrastructure.tf b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-5-infrastructure.tf new file mode 100644 index 000000000..7d6961cad --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-5-infrastructure.tf @@ -0,0 +1,449 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# +# * Please review this file carefully, as it shapes the structure of your organization, hence you should customise +# it to your needs. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# The two resources below will create the two Organizations mentioned in the CSE documentation: +# https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/index.html + +# The Solutions Organization will host the CSE Server and its intended to be used by CSE Administrators only. +# The TKGm clusters are NOT placed here. The attributes related to lease are set to unlimited, as the CSE +# Server should be always up and running in order to process requests. +resource "vcd_org" "solutions_organization" { + name = "solutions_org" + full_name = "Solutions Organization" + is_enabled = true + delete_force = true + delete_recursive = true + + vapp_lease { + maximum_runtime_lease_in_sec = 0 + power_off_on_runtime_lease_expiration = false + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } + + vapp_template_lease { + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } +} + +# The Tenant Organization will host the TKGm clusters and its intended to be used by tenants. +# The TKGm clusters must be placed here. The attributes related to lease are set to unlimited, as the TKGm clusters vApps +# should not be powered off. +resource "vcd_org" "tenant_organization" { + name = "tenant_org" + full_name = "Tenant Organization" + is_enabled = true + delete_force = true + delete_recursive = true + + vapp_lease { + maximum_runtime_lease_in_sec = 0 + power_off_on_runtime_lease_expiration = false + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } + + vapp_template_lease { + maximum_storage_lease_in_sec = 0 + delete_on_storage_lease_expiration = false + } +} + +# This section will create one VDC per organization. To create the VDCs we need to fetch some elements like +# Provider VDC, Edge Clusters, etc. +data "vcd_provider_vdc" "nsxt_pvdc" { + name = var.provider_vdc_name +} + +data "vcd_nsxt_edge_cluster" "nsxt_edgecluster" { + org = vcd_org.tenant_organization.name + provider_vdc_id = data.vcd_provider_vdc.nsxt_pvdc.id + name = var.nsxt_edge_cluster_name +} + +# Fetch the VM Sizing Policies created in step 1 +data "vcd_vm_sizing_policy" "tkg_s" { + name = "TKG small" +} + +data "vcd_vm_sizing_policy" "tkg_m" { + name = "TKG medium" +} + +data "vcd_vm_sizing_policy" "tkg_l" { + name = "TKG large" +} + +data "vcd_vm_sizing_policy" "tkg_xl" { + name = "TKG extra-large" +} + +# The VDC that will host the Kubernetes clusters. +resource "vcd_org_vdc" "tenant_vdc" { + name = "tenant_vdc" + description = "Tenant VDC" + org = vcd_org.tenant_organization.name + + allocation_model = "AllocationVApp" # You can use other models. + network_pool_name = var.network_pool_name + provider_vdc_name = data.vcd_provider_vdc.nsxt_pvdc.name + edge_cluster_id = data.vcd_nsxt_edge_cluster.nsxt_edgecluster.id + + # You can tune these arguments to your fit your needs. + network_quota = 50 + compute_capacity { + cpu { + allocated = 0 + } + + memory { + allocated = 0 + } + } + + # You can tune these arguments to your fit your needs. + storage_profile { + name = "*" + limit = 0 + default = true + } + + # You can tune these arguments to your fit your needs. + enabled = true + enable_thin_provisioning = true + enable_fast_provisioning = true + delete_force = true + delete_recursive = true + + # Make sure you specify the required VM Sizing Policies managed by the data sources specified above. + default_compute_policy_id = data.vcd_vm_sizing_policy.tkg_s.id + vm_sizing_policy_ids = [ + data.vcd_vm_sizing_policy.tkg_xl.id, + data.vcd_vm_sizing_policy.tkg_l.id, + data.vcd_vm_sizing_policy.tkg_m.id, + data.vcd_vm_sizing_policy.tkg_s.id, + ] +} + +# The VDC that will host the CSE server and other provider-level items +resource "vcd_org_vdc" "solutions_vdc" { + name = "solutions_vdc" + description = "Solutions VDC" + org = vcd_org.solutions_organization.name + + allocation_model = "AllocationVApp" # You can use other models + network_pool_name = var.network_pool_name + provider_vdc_name = data.vcd_provider_vdc.nsxt_pvdc.name + edge_cluster_id = data.vcd_nsxt_edge_cluster.nsxt_edgecluster.id + + # You can tune these arguments to your fit your needs + network_quota = 10 + compute_capacity { + cpu { + allocated = 0 + } + + memory { + allocated = 0 + } + } + + # You can tune these arguments to your fit your needs + storage_profile { + name = "*" + limit = 0 + default = true + } + + # You can tune these arguments to your fit your needs + enabled = true + enable_thin_provisioning = true + enable_fast_provisioning = true + delete_force = true + delete_recursive = true +} + +# The networking setup specified below will configure one Provider Gateway + Edge Gateway + Routed network per +# organization. You can customise this section according to your needs. + +data "vcd_nsxt_manager" "cse_nsxt_manager" { + name = var.nsxt_manager_name +} + +data "vcd_nsxt_tier0_router" "solutions_tier0_router" { + name = var.solutions_nsxt_tier0_router_name + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id +} + +resource "vcd_external_network_v2" "solutions_tier0" { + name = "solutions_tier0" + + nsxt_network { + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id + nsxt_tier0_router_id = data.vcd_nsxt_tier0_router.solutions_tier0_router.id + } + + ip_scope { + gateway = var.solutions_provider_gateway_gateway_ip + prefix_length = var.solutions_provider_gateway_gateway_prefix_length + + dynamic "static_ip_pool" { + for_each = var.solutions_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +data "vcd_nsxt_tier0_router" "tenant_tier0_router" { + name = var.tenant_nsxt_tier0_router_name + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id +} + +resource "vcd_external_network_v2" "tenant_tier0" { + name = "tenant_tier0" + + nsxt_network { + nsxt_manager_id = data.vcd_nsxt_manager.cse_nsxt_manager.id + nsxt_tier0_router_id = data.vcd_nsxt_tier0_router.tenant_tier0_router.id + } + + ip_scope { + gateway = var.tenant_provider_gateway_gateway_ip + prefix_length = var.tenant_provider_gateway_gateway_prefix_length + + dynamic "static_ip_pool" { + for_each = var.tenant_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +# This Edge Gateway will consume automatically the available IPs from the Provider Gateway. +resource "vcd_nsxt_edgegateway" "solutions_edgegateway" { + org = vcd_org.solutions_organization.name + owner_id = vcd_org_vdc.solutions_vdc.id + + name = "solutions_edgegateway" + external_network_id = vcd_external_network_v2.solutions_tier0.id + + subnet { + gateway = var.solutions_provider_gateway_gateway_ip + prefix_length = var.solutions_provider_gateway_gateway_prefix_length + primary_ip = var.solutions_provider_gateway_static_ip_ranges[0][0] + + dynamic "allocated_ips" { + for_each = var.solutions_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +# This Edge Gateway will consume automatically the available IPs from the Provider Gateway. +resource "vcd_nsxt_edgegateway" "tenant_edgegateway" { + org = vcd_org.tenant_organization.name + owner_id = vcd_org_vdc.tenant_vdc.id + + name = "tenant_edgegateway" + external_network_id = vcd_external_network_v2.tenant_tier0.id + + subnet { + gateway = var.tenant_provider_gateway_gateway_ip + prefix_length = var.tenant_provider_gateway_gateway_prefix_length + primary_ip = var.tenant_provider_gateway_static_ip_ranges[0][0] + + dynamic "allocated_ips" { + for_each = var.tenant_provider_gateway_static_ip_ranges + iterator = ip + content { + start_address = ip.value[0] + end_address = ip.value[1] + } + } + } +} + +# CSE requires ALB to be configured to support the LoadBalancers that are deployed by the CPI of VMware Cloud Director. +resource "vcd_nsxt_alb_controller" "cse_avi_controller" { + name = "cse_alb_controller" + username = var.alb_controller_username + password = var.alb_controller_password + url = var.alb_controller_url +} + +data "vcd_nsxt_alb_importable_cloud" "cse_importable_cloud" { + name = var.alb_importable_cloud_name + controller_id = vcd_nsxt_alb_controller.cse_avi_controller.id +} + +resource "vcd_nsxt_alb_cloud" "cse_nsxt_alb_cloud" { + name = "cse_nsxt_alb_cloud" + + controller_id = vcd_nsxt_alb_controller.cse_avi_controller.id + importable_cloud_id = data.vcd_nsxt_alb_importable_cloud.cse_importable_cloud.id + network_pool_id = data.vcd_nsxt_alb_importable_cloud.cse_importable_cloud.network_pool_id +} + +resource "vcd_nsxt_alb_service_engine_group" "cse_alb_seg" { + name = "cse_alb_seg" + alb_cloud_id = vcd_nsxt_alb_cloud.cse_nsxt_alb_cloud.id + importable_service_engine_group_name = "Default-Group" + reservation_model = "SHARED" +} + +# We introduce a sleep to wait for the provider part of ALB to be ready before the assignment to the Edge gateways +resource "time_sleep" "cse_alb_wait" { + depends_on = [vcd_nsxt_alb_service_engine_group.cse_alb_seg] + create_duration = "30s" +} + +## ALB for solutions edge gateway +resource "vcd_nsxt_alb_settings" "solutions_alb_settings" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + is_active = true + + # This dependency is required to make sure that provider part of operations is done + depends_on = [time_sleep.cse_alb_wait] +} + +resource "vcd_nsxt_alb_edgegateway_service_engine_group" "solutions_assignment" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_alb_settings.solutions_alb_settings.edge_gateway_id + service_engine_group_id = vcd_nsxt_alb_service_engine_group.cse_alb_seg.id + reserved_virtual_services = 50 + max_virtual_services = 50 +} + +resource "vcd_nsxt_alb_edgegateway_service_engine_group" "tenant_assignment" { + org = vcd_org.tenant_organization.name + edge_gateway_id = vcd_nsxt_alb_settings.tenant_alb_settings.edge_gateway_id + service_engine_group_id = vcd_nsxt_alb_service_engine_group.cse_alb_seg.id + reserved_virtual_services = 50 + max_virtual_services = 50 +} + +## ALB for tenant edge gateway +resource "vcd_nsxt_alb_settings" "tenant_alb_settings" { + org = vcd_org.tenant_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + is_active = true + + # This dependency is required to make sure that provider part of operations is done + depends_on = [time_sleep.cse_alb_wait] +} + +# We create a Routed network in the Solutions organization that will be used by the CSE Server. +resource "vcd_network_routed_v2" "solutions_routed_network" { + org = vcd_org.solutions_organization.name + name = "solutions_routed_network" + description = "Solutions routed network" + + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + + gateway = var.solutions_routed_network_gateway_ip + prefix_length = var.solutions_routed_network_prefix_length + + static_ip_pool { + start_address = var.solutions_routed_network_ip_pool_start_address + end_address = var.solutions_routed_network_ip_pool_end_address + } + + dns1 = var.solutions_routed_network_dns + dns_suffix = var.solutions_routed_network_dns_suffix +} + +# We create a Routed network in the Tenant organization that will be used by the Kubernetes clusters. +resource "vcd_network_routed_v2" "tenant_routed_network" { + org = vcd_org.tenant_organization.name + name = "tenant_net_routed" + description = "Routed network for the K8s clusters" + + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + + gateway = var.tenant_routed_network_gateway_ip + prefix_length = var.tenant_routed_network_prefix_length + + static_ip_pool { + start_address = var.tenant_routed_network_ip_pool_start_address + end_address = var.tenant_routed_network_ip_pool_end_address + } + + dns1 = var.tenant_routed_network_dns + dns_suffix = var.tenant_routed_network_dns_suffix +} + +# We need SNAT rules in both networks to provide with Internet connectivity. +resource "vcd_nsxt_nat_rule" "solutions_nat" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + + name = "Solutions SNAT rule" + rule_type = "SNAT" + description = "Solutions SNAT rule" + + external_address = var.solutions_snat_external_ip + internal_address = var.solutions_snat_internal_network_cidr + logging = true +} + +resource "vcd_nsxt_nat_rule" "tenant_nat" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + + name = "Tenant SNAT rule" + rule_type = "SNAT" + description = "Tenant SNAT rule" + + external_address = var.tenant_snat_external_ip + internal_address = var.tenant_snat_internal_network_cidr + logging = true +} + +# WARNING: Please adjust this rule to your needs. The CSE Server requires Internet access to be configured. +resource "vcd_nsxt_firewall" "solutions_firewall" { + org = vcd_org.solutions_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.solutions_edgegateway.id + + rule { + action = "ALLOW" + name = "Allow all traffic" + direction = "IN_OUT" + ip_protocol = "IPV4_IPV6" + } +} + +# WARNING: Please adjust this rule to your needs. The Bootstrap clusters and final Kubernetes clusters require Internet access to be configured. +resource "vcd_nsxt_firewall" "tenant_firewall" { + org = vcd_org.tenant_organization.name + edge_gateway_id = vcd_nsxt_edgegateway.tenant_edgegateway.id + + rule { + action = "ALLOW" + name = "Allow all traffic" + direction = "IN_OUT" + ip_protocol = "IPV4_IPV6" + } +} diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-6-ovas.tf b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-6-ovas.tf new file mode 100644 index 000000000..5b2dfa919 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-6-ovas.tf @@ -0,0 +1,75 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# In this section we create two Catalogs, one to host all CSE Server OVAs and another one to host TKGm OVAs. +# They are created in the Solutions organization and only the TKGm will be shared as read-only. This will guarantee +# that only CSE admins can manage OVAs. +resource "vcd_catalog" "cse_catalog" { + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + name = "cse_catalog" + + delete_force = "true" + delete_recursive = "true" + + # In this example, everything is created from scratch, so it is needed to wait for the VDC to be available, so the + # Catalog can be created. + depends_on = [ + vcd_org_vdc.solutions_vdc + ] +} + +resource "vcd_catalog" "tkgm_catalog" { + org = vcd_org.solutions_organization.name # References the Solutions Organization + name = "tkgm_catalog" + + delete_force = "true" + delete_recursive = "true" + + # In this example, everything is created from scratch, so it is needed to wait for the VDC to be available, so the + # Catalog can be created. + depends_on = [ + vcd_org_vdc.solutions_vdc + ] +} + +# We share the TKGm Catalog with the Tenant Organization created previously. +resource "vcd_catalog_access_control" "tkgm_catalog_ac" { + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + catalog_id = vcd_catalog.tkgm_catalog.id + shared_with_everyone = false + shared_with { + org_id = vcd_org.tenant_organization.id # Shared with the Tenant Organization + access_level = "ReadOnly" + } +} + +# We upload a minimum set of OVAs for CSE to work. Read the official documentation to check +# where to find the OVAs: +# https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/index.html +resource "vcd_catalog_vapp_template" "tkgm_ova" { + for_each = toset(var.tkgm_ova_files) + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + catalog_id = vcd_catalog.tkgm_catalog.id # References the TKGm Catalog created previously + + name = replace(each.key, ".ova", "") + description = replace(each.key, ".ova", "") + ova_path = format("%s/%s", var.tkgm_ova_folder, each.key) +} + +resource "vcd_catalog_vapp_template" "cse_ova" { + org = vcd_org.solutions_organization.name # References the Solutions Organization created previously + catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog created previously + + name = replace(var.cse_ova_file, ".ova", "") + description = replace(var.cse_ova_file, ".ova", "") + ova_path = format("%s/%s", var.cse_ova_folder, var.cse_ova_file) +} + diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-7-cse-server-init.tf b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-7-cse-server-init.tf new file mode 100644 index 000000000..1d7db0eda --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-7-cse-server-init.tf @@ -0,0 +1,105 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# Log in to VCD with the cse_admin username created above. This will be used to provision +# an API token that must be consumed by the CSE Server. +# This should not be changed. +provider "vcd" { + alias = "cse_admin" + url = "${var.vcd_url}/api" + user = var.cse_admin_username + password = var.cse_admin_password + auth_type = "integrated" + org = var.administrator_org + allow_unverified_ssl = var.insecure_login + logging = true + logging_file = "cse_install_cse_admin.log" +} + +# Generates an API token for the CSE Admin user, that will be used to instantiate the CSE Server. +# This should not be changed. +resource "vcd_api_token" "cse_admin_token" { + provider = vcd.cse_admin + name = "CSE Admin API Token" + file_name = var.cse_admin_api_token_file + allow_token_file = true +} + +data "local_file" "cse_admin_token_file" { + filename = vcd_api_token.cse_admin_token.file_name +} + +# This is the CSE Server vApp +resource "vcd_vapp" "cse_server_vapp" { + org = vcd_org.solutions_organization.name + vdc = vcd_org_vdc.solutions_vdc.name + name = "CSE Server vApp" + + lease { + runtime_lease_in_sec = 0 + storage_lease_in_sec = 0 + } +} + +# The CSE Server vApp network that will consume an existing routed network from +# the solutions organization. +resource "vcd_vapp_org_network" "cse_server_network" { + org = vcd_org.solutions_organization.name + vdc = vcd_org_vdc.solutions_vdc.name + + vapp_name = vcd_vapp.cse_server_vapp.name + org_network_name = vcd_network_routed_v2.solutions_routed_network.name + + reboot_vapp_on_removal = true +} + +# The CSE Server VM. It requires guest properties to be introduced for it to work +# properly. You can troubleshoot it by checking the cse.log file. +resource "vcd_vapp_vm" "cse_server_vm" { + org = vcd_org.solutions_organization.name + vdc = vcd_org_vdc.solutions_vdc.name + + vapp_name = vcd_vapp.cse_server_vapp.name + name = "CSE Server VM" + + vapp_template_id = vcd_catalog_vapp_template.cse_ova.id + + network { + type = "org" + name = vcd_vapp_org_network.cse_server_network.org_network_name + ip_allocation_mode = "POOL" + } + + guest_properties = { + + # VCD host + "cse.vcdHost" = var.vcd_url + + # CSE Server org + "cse.vAppOrg" = vcd_org.solutions_organization.name + + # CSE admin account's Access Token + "cse.vcdRefreshToken" = jsondecode(data.local_file.cse_admin_token_file.content)["refresh_token"] + + # CSE admin account's username + "cse.vcdUsername" = var.cse_admin_username + + # CSE admin account's org + "cse.userOrg" = vcd_org.solutions_organization.name + } + + customization { + force = false + enabled = true + allow_local_admin_password = true + auto_generate_password = true + } +} diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-8-optionals.tf b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-8-optionals.tf new file mode 100644 index 000000000..fe2c0c0d6 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-8-optionals.tf @@ -0,0 +1,27 @@ +# ------------------------------------------------------------------------------------------------------------ +# CSE 4.2.0 installation: +# +# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# before applying this configuration. +# +# * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. +# Other than that, this snippet should be applied as it is. +# You can check the comments on each resource/data source for more help and context. +# ------------------------------------------------------------------------------------------------------------ + +# This resource installs the UI Plugin. It can be useful for tenant users that are not familiar with +# Terraform. +resource "vcd_ui_plugin" "k8s_container_clusters_ui_plugin" { + count = var.k8s_container_clusters_ui_plugin_path == "" ? 0 : 1 + plugin_path = var.k8s_container_clusters_ui_plugin_path + enabled = true + tenant_ids = [ + data.vcd_org.system_org.id, + vcd_org.solutions_organization.id, + vcd_org.tenant_organization.id, + ] +} + +data "vcd_org" "system_org" { + name = var.administrator_org +} diff --git a/examples/container-service-extension/v4.2.0/install/step2/terraform.tfvars.example b/examples/container-service-extension/v4.2.0/install/step2/terraform.tfvars.example new file mode 100644 index 000000000..204a477bf --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step2/terraform.tfvars.example @@ -0,0 +1,106 @@ +# Change configuration to your needs and rename to 'terraform.tfvars' +# For more details about the variables specified here, please read the guide first: +# https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install + +# ------------------------------------------------ +# VCD Provider config +# ------------------------------------------------ + +vcd_url = "https://vcd.my-awesome-corp.com" +administrator_user = "administrator" +administrator_password = "change-me" +administrator_org = "System" +insecure_login = "false" + +# ------------------------------------------------ +# Infrastructure +# ------------------------------------------------ + +# These variables are required to create both the Solutions NSX-T VDC and Tenant NSX-T VDC +# The values here need to exist already in your VCD appliance. +provider_vdc_name = "change-me" # Name of an existing PVDC that can be used to create VDCs +nsxt_edge_cluster_name = "change-me" # Name of an existing NSX-T Edge Cluster that can be used to create VDCs +network_pool_name = "change-me" # Name of an existing network pool that can be used to create VDCs + +# These variables are used to build a basic networking setup to run the CSE Server +# and the TKGm clusters +nsxt_manager_name = "change-me" # Name of an existing NSX-T manager, required to create the Provider Gateways + +# These are all required to create the Solutions Organization Provider Gateway. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +solutions_nsxt_tier0_router_name = "change-me" # The name of an existing NSX-T Tier 0 router +solutions_provider_gateway_gateway_ip = "10.20.30.250" # Gateway IP to use in the Solutions Provider Gateway +solutions_provider_gateway_gateway_prefix_length = "19" # Prefix length to use in the Solutions Provider Gateway +solutions_provider_gateway_static_ip_ranges = [ # IP ranges to use in the Solutions Provider Gateway + ["10.20.30.16", "10.20.30.16"], # Single IP + ["10.20.30.20", "10.20.30.25"], # Many IPs +] + +# These are all required to create the Tenant Organization Provider Gateway. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +tenant_nsxt_tier0_router_name = "change-me" # The name of an existing NSX-T Tier 0 router +tenant_provider_gateway_gateway_ip = "10.30.20.150" # Gateway IP to use in the Tenant Provider Gateway +tenant_provider_gateway_gateway_prefix_length = "19" # Prefix length to use in the Tenant Provider Gateway +tenant_provider_gateway_static_ip_ranges = [ # IP ranges to use in the Tenant Provider Gateway + ["10.30.20.14", "10.30.20.14"], # Single IP + ["10.30.20.30", "10.30.20.37"], # Many IPs +] + +# These will configure the Routed network for the Solutions Organization VDC. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +solutions_routed_network_gateway_ip = "192.168.0.1" # Required. Gateway IP for the Routed network inside the Solutions Organization +solutions_routed_network_prefix_length = "24" # Required. Prefix length for the Routed network inside the Solutions Organization +solutions_routed_network_ip_pool_start_address = "192.168.0.2" # Required. First IP for the Routed network pool +solutions_routed_network_ip_pool_end_address = "192.168.0.254" # Required. Last IP for the Routed network pool +solutions_snat_external_ip = "10.20.30.25" # Required. For example, pick the last IP from solutions_provider_gateway_static_ip_ranges +solutions_snat_internal_network_cidr = "192.168.0.0/24" # Required. It should match the Routed network IP addresses +solutions_routed_network_dns = "" # Optional, if you need DNS +solutions_routed_network_dns_suffix = "" # Optional, if you need DNS + +# These will configure the Routed network for the Tenant Organization VDC. +# Please adjust these to your needs, as the values here are just examples, and review the Terraform configuration if needed. +tenant_routed_network_gateway_ip = "10.0.0.1" # Required. Gateway IP for the Routed network inside the Tenant Organization +tenant_routed_network_prefix_length = "16" # Required. Prefix length for the Routed network inside the Tenant Organization +tenant_routed_network_ip_pool_start_address = "10.0.0.2" # Required. First IP for the Routed network pool +tenant_routed_network_ip_pool_end_address = "10.0.255.254" # Required. Last IP for the Routed network pool +tenant_snat_external_ip = "10.30.20.37" # Required. For example, pick the last IP from tenant_provider_gateway_static_ip_ranges +tenant_snat_internal_network_cidr = "10.0.0.0/16" # Required. It should match the Routed network IP addresses +tenant_routed_network_dns = "" # Optional, if you need DNS +tenant_routed_network_dns_suffix = "" # Optional, if you need DNS + +# These are required to create a new ALB setup in VCD that will be used by TKGm clusters. +# Your VCD should have an existing ALB deployment that will be imported, the values below must correspond to +# the existing controller to be imported into VCD: +alb_controller_username = "admin" # Username to access the ALB Controller +alb_controller_password = "change-me" # Password to access the ALB Controller +alb_controller_url = "https://alb-ctrl.my-awesome-corp.com" # URL of the ALB Controller +alb_importable_cloud_name = "change-me" # Name of the Cloud to import to create a Service Engine Group + +# ------------------------------------------------ +# Catalog and OVAs +# ------------------------------------------------ + +# These variables are required to upload the necessary OVAs to the Solutions Organization shared catalog. +# You can find the download links in the guide referenced at the top of this file. +tkgm_ova_folder = "/home/changeme/tkgm-folder" # An existing absolute path to a folder containing TKGm OVAs +tkgm_ova_files = [ # Existing TKGm OVAs + "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc.ova" +] +cse_ova_folder = "/home/changeme/cse-folder" # An existing absolute path to a folder containing CSE Server OVAs +cse_ova_file = "VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova" # An existing CSE Server OVA + +# ------------------------------------------------ +# CSE Server initialization +# ------------------------------------------------ + +cse_admin_username = "cse_admin" # This must be the same user created in step 1 +cse_admin_password = "change-me" # This must be the same password of the user created in step 1 +cse_admin_api_token_file = "cse_admin_api_token.json" # This file will contain the API token of the CSE Admin user, store it carefully. + +# ------------------------------------------------ +# Other configuration +# ------------------------------------------------ +# This path points to the .zip file that contains the bundled Kubernetes Container Clusters UI Plugin. +# It is optional: if not set, it won't be installed. +# Remember to remove older CSE UI plugins if present (for example 3.x plugins) before installing this one. +k8s_container_clusters_ui_plugin_path = "/home/change-me/container-ui-plugin-4.1.zip" diff --git a/examples/container-service-extension/v4.2.0/install/step2/variables.tf b/examples/container-service-extension/v4.2.0/install/step2/variables.tf new file mode 100644 index 000000000..0836d3e69 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/install/step2/variables.tf @@ -0,0 +1,255 @@ +# ------------------------------------------------ +# Provider config +# ------------------------------------------------ + +variable "vcd_url" { + description = "The VCD URL (Example: 'https://vcd.my-company.com')" + type = string +} + +variable "insecure_login" { + description = "Allow unverified SSL connections when operating with VCD" + type = bool + default = false +} + +variable "administrator_user" { + description = "The VCD administrator user (Example: 'administrator')" + default = "administrator" + type = string +} + +variable "administrator_password" { + description = "The VCD administrator password" + type = string + sensitive = true +} + +variable "administrator_org" { + description = "The VCD administrator organization (Example: 'System')" + type = string + default = "System" +} + +# ------------------------------------------------ +# Infrastructure +# ------------------------------------------------ + +variable "provider_vdc_name" { + description = "The Provider VDC that will be used to create the required VDCs" + type = string +} + +variable "nsxt_edge_cluster_name" { + description = "The NSX-T Edge Cluster name, that relates to the specified Provider VDC" + type = string +} + +variable "network_pool_name" { + description = "The network pool to be used on VDC creation" + type = string +} + +variable "nsxt_manager_name" { + description = "NSX-T manager name, required to create the Provider Gateways" + type = string +} + +variable "solutions_nsxt_tier0_router_name" { + description = "Name of an existing NSX-T tier-0 router to create the Solutions Provider Gateway" + type = string +} + +variable "solutions_provider_gateway_gateway_ip" { + description = "Gateway IP for the Solutions Provider Gateway" + type = string +} + +variable "solutions_provider_gateway_gateway_prefix_length" { + description = "Prefix length for the Solutions Provider Gateway" + type = string +} + +variable "solutions_provider_gateway_static_ip_ranges" { + type = list(list(string)) + description = "List of pairs of public IPs for the Solutions Provider Gateway" +} + +variable "tenant_nsxt_tier0_router_name" { + description = "Name of an existing NSX-T tier-0 router to create the Tenant Provider Gateway" + type = string +} + +variable "tenant_provider_gateway_gateway_ip" { + description = "Gateway IP for the Tenant Provider Gateway" + type = string +} + +variable "tenant_provider_gateway_gateway_prefix_length" { + description = "Prefix length for the Tenant Provider Gateway" + type = string +} + +variable "tenant_provider_gateway_static_ip_ranges" { + type = list(list(string)) + description = "List of pairs of public IPs for the Tenant Provider Gateway" +} + +variable "solutions_routed_network_gateway_ip" { + description = "Gateway IP for the Solutions routed network" + type = string +} + +variable "solutions_routed_network_prefix_length" { + description = "Prefix length for the Solutions routed network" + type = string +} + +variable "solutions_routed_network_ip_pool_start_address" { + description = "Start address for the IP pool of the Solutions routed network" + type = string +} + +variable "solutions_routed_network_ip_pool_end_address" { + description = "End address for the IP pool of the Solutions routed network" + type = string +} + +variable "solutions_snat_external_ip" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the external IP, which should be one of the Provider Gateway available IPs" + type = string +} + +variable "solutions_snat_internal_network_cidr" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the internal subnet CIDR, which should correspond to the routed network IPs" + type = string +} + +variable "solutions_routed_network_dns" { + description = "Custom DNS server IP to use for the Solutions routed network" + type = string + default = "" +} + +variable "solutions_routed_network_dns_suffix" { + description = "Custom DNS suffix to use for the Solutions routed network" + type = string + default = "" +} + +variable "tenant_routed_network_gateway_ip" { + description = "Gateway IP for the Tenant routed network" + type = string +} + +variable "tenant_routed_network_prefix_length" { + description = "Prefix length for the Tenant routed network" + type = string +} + +variable "tenant_routed_network_ip_pool_start_address" { + description = "Start address for the IP pool of the Tenant routed network" + type = string +} + +variable "tenant_routed_network_ip_pool_end_address" { + description = "End address for the IP pool of the Tenant routed network" + type = string +} + +variable "tenant_snat_external_ip" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the external IP, which should be one of the Provider Gateway available IPs" + type = string +} + +variable "tenant_snat_internal_network_cidr" { + description = "Used to create a SNAT rule to allow connectivity. This specifies the internal subnet CIDR, which should correspond to the routed network IPs" + type = string +} + +variable "tenant_routed_network_dns" { + description = "Custom DNS server IP to use for the Tenant routed network" + type = string + default = "" +} + +variable "tenant_routed_network_dns_suffix" { + description = "Custom DNS suffix to use for the Tenant routed network" + type = string + default = "" +} + +variable "alb_controller_username" { + description = "The user to create an ALB Controller with" + type = string +} + +variable "alb_controller_password" { + description = "The password for the user that will be used to create the ALB Controller" + type = string +} + +variable "alb_controller_url" { + description = "The URL to create the ALB Controller" + type = string +} + +variable "alb_importable_cloud_name" { + description = "Name of an available importable cloud to be able to create an ALB NSX-T Cloud" + type = string +} + +# ------------------------------------------------ +# Catalog and OVAs +# ------------------------------------------------ + +variable "tkgm_ova_folder" { + description = "Absolute path to the TKGm OVA files, with no file name (Example: '/home/bob/Downloads/tkgm')" + type = string +} + +variable "tkgm_ova_files" { + description = "A set of TKGm OVA file names, with no path (Example: 'ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc.ova')" + type = set(string) +} + +variable "cse_ova_folder" { + description = "Absolute path to the CSE OVA file, with no file name (Example: '/home/bob/Downloads/cse')" + type = string +} + +variable "cse_ova_file" { + description = "CSE OVA file name, with no path (Example: 'VMware_Cloud_Director_Container_Service_Extension-4.0.1.62-21109756.ova')" + type = string +} + +# ------------------------------------------------ +# CSE Server initialization +# ------------------------------------------------ + +variable "cse_admin_username" { + description = "The CSE administrator user that was created in step 1" + type = string +} + +variable "cse_admin_password" { + description = "The password to set for the CSE administrator user that was created in step 1" + type = string + sensitive = true +} + +variable "cse_admin_api_token_file" { + description = "The file where the API Token for the CSE Administrator will be stored" + type = string + default = "cse_admin_api_token.json" +} + +# ------------------------------------------------ +# Other configuration +# ------------------------------------------------ + +variable "k8s_container_clusters_ui_plugin_path" { + type = string + description = "Path to the Kubernetes Container Clusters UI Plugin zip file" + default = "" +} diff --git a/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json b/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json new file mode 100644 index 000000000..f033b1c0d --- /dev/null +++ b/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json @@ -0,0 +1,472 @@ +{ + "definitions": { + "k8sNetwork": { + "type": "object", + "description": "The network-related settings for the cluster.", + "properties": { + "pods": { + "type": "object", + "description": "The network settings for Kubernetes pods.", + "properties": { + "cidrBlocks": { + "type": "array", + "description": "Specifies a range of IP addresses to use for Kubernetes pods.", + "items": { + "type": "string" + } + } + } + }, + "services": { + "type": "object", + "description": "The network settings for Kubernetes services", + "properties": { + "cidrBlocks": { + "type": "array", + "description": "The range of IP addresses to use for Kubernetes services", + "items": { + "type": "string" + } + } + } + } + } + } + }, + "type": "object", + "required": [ + "kind", + "metadata", + "apiVersion", + "spec" + ], + "properties": { + "kind": { + "enum": [ + "CAPVCDCluster" + ], + "type": "string", + "description": "The kind of the Kubernetes cluster.", + "title": "The kind of the Kubernetes cluster.", + "default": "CAPVCDCluster" + }, + "spec": { + "type": "object", + "properties": { + "capiYaml": { + "type": "string", + "title": "CAPI yaml", + "description": "User specification of the CAPI yaml; It is user's responsibility to embed the correct CAPI yaml generated as per instructions - https://github.com/vmware/cluster-api-provider-cloud-director/blob/main/docs/CLUSTERCTL.md#generate-cluster-manifests-for-workload-cluster" + }, + "yamlSet": { + "type": "array", + "items": { + "type": "string" + }, + "title": "User specified K8s Yaml strings", + "description": "User specified K8s Yaml strings to be applied on the target cluster. The component Projector will process this property periodically." + }, + "vcdKe": { + "type": "object", + "properties": { + "isVCDKECluster": { + "type": "boolean", + "title": "User's intent to have this specification processed by VCDKE", + "description": "Does user wants this specification to be processed by the VCDKE component of CSE stack?" + }, + "markForDelete": { + "type": "boolean", + "title": "User's intent to delete the cluster", + "description": "Mark the cluster for deletion", + "default": false + }, + "autoRepairOnErrors": { + "type": "boolean", + "title": "User's intent to let the VCDKE repair/recreate the cluster", + "description": "User's intent to let the VCDKE repair/recreate the cluster on any errors during cluster creation", + "default": true + }, + "forceDelete": { + "type": "boolean", + "title": "User's intent to delete the cluster forcefully", + "description": "User's intent to delete the cluster forcefully", + "default": false + }, + "defaultStorageClassOptions": { + "type": "object", + "properties": { + "vcdStorageProfileName": { + "type": "string", + "title": "Name of the VCD storage profile", + "description": "Name of the VCD storage profile" + }, + "k8sStorageClassName": { + "type": "string", + "title": "Name of the Kubernetes storage class to be created", + "description": "Name of the Kubernetes storage class to be created" + }, + "useDeleteReclaimPolicy": { + "type": "boolean", + "title": "Reclaim policy of the Kubernetes storage class", + "description": "Reclaim policy of the Kubernetes storage class" + }, + "fileSystem": { + "type": "string", + "title": "Default file System of the volumes", + "description": "Default file System of the volumes to be created from the default storage class" + } + }, + "title": "Default Storage class options to be set on the target cluster", + "description": "Default Storage class options to be set on the target cluster" + }, + "secure": { + "type": "object", + "x-vcloud-restricted": ["private", "secure"], + "properties": { + "apiToken": { + "type": "string", + "title": "API Token (Refresh Token) of the user", + "description": "API Token (Refresh Token) of the user." + } + }, + "title": "Encrypted data", + "description": "Fields under this section will be encrypted" + } + }, + "title": "User specification for VCDKE component", + "description": "User specification for VCDKE component" + } + }, + "title": "User specification for the cluster", + "description": "User specification for the cluster" + }, + "metadata": { + "type": "object", + "properties": { + "orgName": { + "type": "string", + "description": "The name of the Organization in which cluster needs to be created or managed.", + "title": "The name of the Organization in which cluster needs to be created or managed." + }, + "virtualDataCenterName": { + "type": "string", + "description": "The name of the Organization data center in which the cluster need to be created or managed.", + "title": "The name of the Organization data center in which the cluster need to be created or managed." + }, + "name": { + "type": "string", + "description": "The name of the cluster.", + "title": "The name of the cluster." + }, + "site": { + "type": "string", + "description": "Fully Qualified Domain Name (https://VCD-FQDN.com) of the VCD site in which the cluster is deployed", + "title": "Fully Qualified Domain Name of the VCD site in which the cluster is deployed" + } + }, + "title": "User specification of the metadata of the cluster", + "description": "User specification of the metadata of the cluster" + }, + "status": { + "type": "object", + "x-vcloud-restricted": "protected", + "properties": { + "capvcd": { + "type": "object", + "properties": { + "phase": { + "type": "string" + }, + "kubernetes": { + "type": "string" + }, + "errorSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "eventSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "k8sNetwork": { + "$ref": "#/definitions/k8sNetwork" + }, + "uid": { + "type": "string" + }, + "parentUid": { + "type": "string" + }, + "useAsManagementCluster": { + "type": "boolean" + }, + "clusterApiStatus": { + "type": "object", + "properties": { + "phase": { + "type": "string", + "description": "The phase describing the control plane infrastructure deployment." + }, + "apiEndpoints": { + "type": "array", + "description": "Control Plane load balancer endpoints", + "items": { + "host": { + "type": "string" + }, + "port": { + "type": "integer" + } + } + } + } + }, + "nodePool": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "name of the node pool" + }, + "sizingPolicy": { + "type": "string", + "description": "name of the sizing policy used by the node pool" + }, + "placementPolicy": { + "type": "string", + "description": "name of the sizing policy used by the node pool" + }, + "diskSizeMb": { + "type": "integer", + "description": "disk size of the VMs in the node pool in MB" + }, + "nvidiaGpuEnabled": { + "type": "boolean", + "description": "boolean indicating if the node pools have nvidia GPU enabled" + }, + "storageProfile": { + "type": "string", + "description": "storage profile used by the node pool" + }, + "desiredReplicas": { + "type": "integer", + "description": "desired replica count of the nodes in the node pool" + }, + "availableReplicas": { + "type": "integer", + "description": "number of available replicas in the node pool" + } + } + } + }, + "clusterResourceSet": { + "properties": {}, + "type": "object" + }, + "clusterResourceSetBindings": { + "type": "array", + "items": { + "type": "object", + "properties": { + "clusterResourceSetName": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "name": { + "type": "string" + }, + "applied": { + "type": "boolean" + }, + "lastAppliedTime": { + "type": "string" + } + } + } + }, + "capvcdVersion": { + "type": "string" + }, + "vcdProperties": { + "type": "object", + "properties": { + "organizations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "id": { + "type": "string" + } + } + } + }, + "site": { + "type": "string" + }, + "orgVdcs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "id": { + "type": "string" + }, + "ovdcNetworkName": { + "type": "string" + } + } + } + } + } + }, + "upgrade": { + "type": "object", + "description": "determines the state of upgrade. If no upgrade is issued, only the existing version is stored.", + "properties": { + "current": { + "type": "object", + "properties": { + "kubernetesVersion": { + "type": "string", + "description": "current kubernetes version of the cluster. If being upgraded, will represent target kubernetes version of the cluster." + }, + "tkgVersion": { + "type": "string", + "description": "current TKG version of the cluster. If being upgraded, will represent the tarkget TKG version of the cluster." + } + } + }, + "previous": { + "type": "object", + "properties": { + "kubernetesVersion": { + "type": "string", + "description": "the kubernetes version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source kubernetes version from which the cluster is being upgraded." + }, + "tkgVersion": { + "type": "string", + "description": "the TKG version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source TKG versoin from which the cluster is being upgraded." + } + } + }, + "ready": { + "type": "boolean", + "description": "boolean indicating the status of the cluster upgrade." + } + } + }, + "private": { + "type": "object", + "x-vcloud-restricted": ["private", "secure"], + "description": "Placeholder for the properties invisible and secure to non-admin users.", + "properties": { + "kubeConfig": { + "type": "string", + "description": "Kube config to access the Kubernetes cluster." + } + } + }, + "vcdResourceSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "createdByVersion": { + "type": "string", + "description": "CAPVCD version used to create the cluster" + } + }, + "title": "CAPVCD's view of the current status of the cluster", + "description": "CAPVCD's view of the current status of the cluster" + }, + "vcdKe": { + "type": "object", + "properties": { + "state": { + "type": "string", + "title": "VCDKE's view of the current state of the cluster", + "description": "VCDKE's view of the current state of the cluster - provisioning/provisioned/error" + }, + "vcdKeVersion": { + "type": "string", + "title": "VCDKE/CSE product version", + "description": "The VCDKE version with which the cluster is originally created" + }, + "defaultStorageClass": { + "type": "object", + "properties": { + "vcdStorageProfileName": { + "type": "string", + "title": "Name of the VCD storage profile", + "description": "Name of the VCD storage profile" + }, + "k8sStorageClassName": { + "type": "string", + "title": "Name of the Kubernetes storage class to be created", + "description": "Name of the Kubernetes storage class to be created" + }, + "useDeleteReclaimPolicy": { + "type": "boolean", + "title": "Reclaim policy of the Kubernetes storage class", + "description": "Reclaim policy of the Kubernetes storage class" + }, + "fileSystem": { + "type": "string", + "title": "Default file System of the volumes", + "description": "Default file System of the volumes to be created from the default storage class" + } + }, + "title": "Default Storage class options to be set on the target cluster", + "description": "Default Storage class options to be set on the target cluster" + } + }, + "title": "VCDKE's view of the current status of the cluster", + "description": "Current status of the cluster from VCDKE's point of view" + }, + "cpi": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the Cloud Provider Interface", + "description": "Name of the CPI" + }, + "version": { + "type": "string", + "title": "Product version of the CPI", + "description": "Product version of the CPI" + } + }, + "title": "CPI for VCD's view of the current status of the cluster", + "description": "CPI for VCD's view of the current status of the cluster" + } + }, + "title": "Current status of the cluster", + "description": "Current status of the cluster. The subsections are updated by various components of CSE stack - VCDKE, Projector, CAPVCD, CPI, CSI and Extensions" + }, + "apiVersion": { + "type": "string", + "default": "capvcd.vmware.com/v1.2", + "description": "The version of the payload format" + } + } +} \ No newline at end of file diff --git a/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json b/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json new file mode 100644 index 000000000..000b87445 --- /dev/null +++ b/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json @@ -0,0 +1,727 @@ +{ + "type": "object", + "required": [ + "kind", + "metadata", + "apiVersion", + "spec" + ], + "properties": { + "kind": { + "enum": [ + "CAPVCDCluster" + ], + "type": "string", + "title": "The kind of the Kubernetes cluster.", + "default": "CAPVCDCluster", + "description": "The kind of the Kubernetes cluster." + }, + "spec": { + "type": "object", + "title": "User specification for the cluster", + "properties": { + "vcdKe": { + "type": "object", + "title": "User specification for VCDKE component", + "properties": { + "secure": { + "type": "object", + "title": "Encrypted data", + "properties": { + "apiToken": { + "type": "string", + "title": "API Token (Refresh Token) of the user", + "description": "API Token (Refresh Token) of the user." + } + }, + "description": "Fields under this section will be encrypted", + "x-vcloud-restricted": [ + "private", + "secure" + ] + }, + "forceDelete": { + "type": "boolean", + "title": "User's intent to delete the cluster forcefully", + "default": false, + "description": "User's intent to delete the cluster forcefully" + }, + "markForDelete": { + "type": "boolean", + "title": "User's intent to delete the cluster", + "default": false, + "description": "Mark the cluster for deletion" + }, + "isVCDKECluster": { + "type": "boolean", + "title": "User's intent to have this specification processed by VCDKE", + "description": "Does user wants this specification to be processed by the VCDKE component of CSE stack?" + }, + "autoRepairOnErrors": { + "type": "boolean", + "title": "User's intent to let the VCDKE repair/recreate the cluster", + "default": true, + "description": "User's intent to let the VCDKE repair/recreate the cluster on any errors during cluster creation" + }, + "defaultStorageClassOptions": { + "type": "object", + "title": "Default Storage class options to be set on the target cluster", + "properties": { + "fileSystem": { + "type": "string", + "title": "Default file System of the volumes", + "description": "Default file System of the volumes to be created from the default storage class" + }, + "k8sStorageClassName": { + "type": "string", + "title": "Name of the Kubernetes storage class to be created", + "description": "Name of the Kubernetes storage class to be created" + }, + "vcdStorageProfileName": { + "type": "string", + "title": "Name of the VCD storage profile", + "description": "Name of the VCD storage profile" + }, + "useDeleteReclaimPolicy": { + "type": "boolean", + "title": "Reclaim policy of the Kubernetes storage class", + "description": "Reclaim policy of the Kubernetes storage class" + } + }, + "description": "Default Storage class options to be set on the target cluster" + } + }, + "description": "User specification for VCDKE component" + }, + "yamlSet": { + "type": "array", + "items": { + "type": "string" + }, + "title": "User specified K8s Yaml strings", + "description": "User specified K8s Yaml strings to be applied on the target cluster. The component Projector will process this property periodically." + }, + "capiYaml": { + "type": "string", + "title": "CAPI yaml", + "description": "User specification of the CAPI yaml; It is user's responsibility to embed the correct CAPI yaml generated as per instructions - https://github.com/vmware/cluster-api-provider-cloud-director/blob/main/docs/CLUSTERCTL.md#generate-cluster-manifests-for-workload-cluster" + }, + "projector": { + "type": "object", + "title": "User specification for Projector component", + "properties": { + "operations": { + "type": "array", + "items": { + "type": "object", + "required": [ + "verb", + "values", + "valueType", + "sequence" + ], + "properties": { + "name": { + "type": "string", + "title": "Name of the operation", + "description": "The name of the operation, if applicable." + }, + "verb": { + "enum": [ + "apply", + "create" + ], + "type": "string", + "title": "Kubernetes command of the operation", + "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" + }, + "values": { + "type": "array", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ] + }, + "title": "Value of the operation", + "description": "Array of values used for the operation.Type of the values must be consistent with the valueType", + "x-vcloud-restricted": [ + "private", + "secure" + ] + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "minimum": 1, + "description": "Specifies the sequence/order in which the operation should be executed." + }, + "valueType": { + "enum": [ + "yamlLink", + "yamlString" + ], + "type": "string", + "title": "Value type of the operation", + "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." + }, + "retryUntilSuccess": { + "type": "boolean", + "title": "Operation will be retried until it succeeds", + "default": false, + "description": "Operation will be retried until it succeeds" + } + } + }, + "title": "Operations to be executed by the component projector", + "description": "User-specified operations to be applied on the target cluster. " + } + }, + "description": "Defines the operations to be executed by the component projector", + "x-vcloud-restricted": "private" + } + }, + "description": "User specification for the cluster" + }, + "status": { + "type": "object", + "title": "Current status of the cluster", + "properties": { + "cpi": { + "type": "object", + "title": "CPI for VCD's view of the current status of the cluster", + "properties": { + "name": { + "type": "string", + "title": "Name of the Cloud Provider Interface", + "description": "Name of the CPI" + }, + "version": { + "type": "string", + "title": "Product version of the CPI", + "description": "Product version of the CPI" + } + }, + "description": "CPI for VCD's view of the current status of the cluster" + }, + "vcdKe": { + "type": "object", + "title": "VCDKE's view of the current status of the cluster", + "properties": { + "state": { + "type": "string", + "title": "VCDKE's view of the current state of the cluster", + "description": "VCDKE's view of the current state of the cluster - provisioning/provisioned/error" + }, + "vcdKeVersion": { + "type": "string", + "title": "VCDKE/CSE product version", + "description": "The VCDKE version with which the cluster is originally created" + }, + "defaultStorageClass": { + "type": "object", + "title": "Default Storage class options to be set on the target cluster", + "properties": { + "fileSystem": { + "type": "string", + "title": "Default file System of the volumes", + "description": "Default file System of the volumes to be created from the default storage class" + }, + "k8sStorageClassName": { + "type": "string", + "title": "Name of the Kubernetes storage class to be created", + "description": "Name of the Kubernetes storage class to be created" + }, + "vcdStorageProfileName": { + "type": "string", + "title": "Name of the VCD storage profile", + "description": "Name of the VCD storage profile" + }, + "useDeleteReclaimPolicy": { + "type": "boolean", + "title": "Reclaim policy of the Kubernetes storage class", + "description": "Reclaim policy of the Kubernetes storage class" + } + }, + "description": "Default Storage class options to be set on the target cluster" + } + }, + "description": "Current status of the cluster from VCDKE's point of view" + }, + "capvcd": { + "type": "object", + "title": "CAPVCD's view of the current status of the cluster", + "properties": { + "uid": { + "type": "string" + }, + "phase": { + "type": "string" + }, + "private": { + "type": "object", + "properties": { + "kubeConfig": { + "type": "string", + "description": "Kube config to access the Kubernetes cluster." + } + }, + "description": "Placeholder for the properties invisible and secure to non-admin users.", + "x-vcloud-restricted": [ + "private", + "secure" + ] + }, + "upgrade": { + "type": "object", + "properties": { + "ready": { + "type": "boolean", + "description": "boolean indicating the status of the cluster upgrade." + }, + "current": { + "type": "object", + "properties": { + "tkgVersion": { + "type": "string", + "description": "current TKG version of the cluster. If being upgraded, will represent the tarkget TKG version of the cluster." + }, + "kubernetesVersion": { + "type": "string", + "description": "current kubernetes version of the cluster. If being upgraded, will represent target kubernetes version of the cluster." + } + } + }, + "previous": { + "type": "object", + "properties": { + "tkgVersion": { + "type": "string", + "description": "the TKG version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source TKG versoin from which the cluster is being upgraded." + }, + "kubernetesVersion": { + "type": "string", + "description": "the kubernetes version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source kubernetes version from which the cluster is being upgraded." + } + } + } + }, + "description": "determines the state of upgrade. If no upgrade is issued, only the existing version is stored." + }, + "errorSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "eventSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "nodePool": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "name of the node pool" + }, + "diskSizeMb": { + "type": "integer", + "description": "disk size of the VMs in the node pool in MB" + }, + "sizingPolicy": { + "type": "string", + "description": "name of the sizing policy used by the node pool" + }, + "storageProfile": { + "type": "string", + "description": "storage profile used by the node pool" + }, + "desiredReplicas": { + "type": "integer", + "description": "desired replica count of the nodes in the node pool" + }, + "placementPolicy": { + "type": "string", + "description": "name of the sizing policy used by the node pool" + }, + "nvidiaGpuEnabled": { + "type": "boolean", + "description": "boolean indicating if the node pools have nvidia GPU enabled" + }, + "availableReplicas": { + "type": "integer", + "description": "number of available replicas in the node pool" + } + } + } + }, + "parentUid": { + "type": "string" + }, + "k8sNetwork": { + "$ref": "#/definitions/k8sNetwork" + }, + "kubernetes": { + "type": "string" + }, + "capvcdVersion": { + "type": "string" + }, + "vcdProperties": { + "type": "object", + "properties": { + "site": { + "type": "string" + }, + "orgVdcs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + }, + "ovdcNetworkName": { + "type": "string" + } + } + } + }, + "organizations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "name": { + "type": "string" + } + } + } + } + } + }, + "vcdResourceSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + } + }, + "clusterApiStatus": { + "type": "object", + "properties": { + "phase": { + "type": "string", + "description": "The phase describing the control plane infrastructure deployment." + }, + "apiEndpoints": { + "type": "array", + "items": { + "host": { + "type": "string" + }, + "port": { + "type": "integer" + } + }, + "description": "Control Plane load balancer endpoints" + } + } + }, + "createdByVersion": { + "type": "string", + "description": "CAPVCD version used to create the cluster" + }, + "clusterResourceSet": { + "type": "object", + "properties": {} + }, + "useAsManagementCluster": { + "type": "boolean" + }, + "clusterResourceSetBindings": { + "type": "array", + "items": { + "type": "object", + "properties": { + "kind": { + "type": "string" + }, + "name": { + "type": "string" + }, + "applied": { + "type": "boolean" + }, + "lastAppliedTime": { + "type": "string" + }, + "clusterResourceSetName": { + "type": "string" + } + } + } + } + }, + "description": "CAPVCD's view of the current status of the cluster" + }, + "projector": { + "type": "object", + "title": "Current Status of the Projector Component", + "properties": { + "name": { + "type": "string", + "title": "Projector Name", + "description": "The name of the projector component." + }, + "version": { + "type": "string", + "title": "Projector Version", + "description": "The product version of the projector component." + }, + "errorSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + }, + "title": "Error Set", + "description": "An array containing error information related to the operations of the projector component." + }, + "eventSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + }, + "title": "Event Set", + "description": "An array containing event information related to the operations of the projector component." + }, + "retrySet": { + "type": "array", + "items": { + "type": "object", + "properties": { + "operation": { + "type": "object", + "title": "Spec of the operation to be retried", + "properties": { + "name": { + "type": "string", + "title": "Name of the operation", + "description": "Name of the operation" + }, + "verb": { + "enum": [ + "apply", + "create" + ], + "type": "string", + "title": "Kubernetes command of the operation", + "description": "Kubernetes command of the operation" + }, + "values": { + "type": "array", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ] + }, + "title": "Value of the operation", + "description": "Array of values used for the operation.Type of the values must be consistent with the valueType", + "x-vcloud-restricted": [ + "private", + "secure" + ] + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "description": "Sequence number of the operation" + }, + "valueType": { + "enum": [ + "yamlLink", + "yamlString" + ], + "type": "string", + "title": "Value type of the operation", + "description": "Value type of the operation" + }, + "retryUntilSuccess": { + "type": "boolean", + "title": "Operation will be retried until it succeeds", + "description": "Operation will be retried until it succeeds" + } + }, + "description": "Spec of the operation to be retried" + }, + "createTimeStamp": { + "type": "string", + "title": "The timestamp at which this operation failed for the first time", + "description": "The timestamp at which this operation failed for the first time" + } + } + }, + "title": "The operations to be retried by the Projector", + "description": "The operations to be retried by the Projector" + }, + "operations": { + "type": "array", + "items": { + "type": "object", + "title": "Operation Status", + "properties": { + "name": { + "type": "string", + "title": "Name of the operation", + "description": "The name of the operation, if applicable." + }, + "verb": { + "enum": [ + "apply", + "create" + ], + "type": "string", + "title": "Kubernetes command of the operation", + "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" + }, + "output": { + "type": "string", + "title": "Output", + "description": "The execution output of the operation." + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "description": "Specifies the sequence/order in which the operation should be executed." + }, + "valueType": { + "enum": [ + "yamlLink", + "yamlString" + ], + "type": "string", + "title": "Value type the operation", + "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." + }, + "forceDelete": { + "type": "boolean", + "title": "Flag which indicates whether the operation should be forcefully deleted.", + "description": "Indicates whether the operation should be forcefully deleted." + } + }, + "description": "Status of a specific operation executed in the projector component." + }, + "title": "Operation Status of Projector after Execution", + "description": "An array containing the status of operations executed in the projector component." + }, + "lastAppliedSequence": { + "type": "integer", + "title": "Last Applied Sequence", + "default": 1, + "minimum": 1, + "description": "The sequence number of the last applied operation in the projector component." + }, + "lastAppliedTimestamp": { + "type": "string", + "title": "Last Applied Timestamp", + "description": "The timestamp of the last applied operation in the projector component." + } + }, + "description": "Current status of the projector component. It reflects the operation execution status of the projector component." + } + }, + "description": "Current status of the cluster. The subsections are updated by various components of CSE stack - VCDKE, Projector, CAPVCD, CPI, CSI and Extensions", + "x-vcloud-restricted": "protected" + }, + "metadata": { + "type": "object", + "title": "User specification of the metadata of the cluster", + "properties": { + "name": { + "type": "string", + "title": "The name of the cluster.", + "description": "The name of the cluster." + }, + "site": { + "type": "string", + "title": "Fully Qualified Domain Name of the VCD site in which the cluster is deployed", + "description": "Fully Qualified Domain Name (https://VCD-FQDN.com) of the VCD site in which the cluster is deployed" + }, + "orgName": { + "type": "string", + "title": "The name of the Organization in which cluster needs to be created or managed.", + "description": "The name of the Organization in which cluster needs to be created or managed." + }, + "virtualDataCenterName": { + "type": "string", + "title": "The name of the Organization data center in which the cluster need to be created or managed.", + "description": "The name of the Organization data center in which the cluster need to be created or managed." + } + }, + "description": "User specification of the metadata of the cluster" + }, + "apiVersion": { + "type": "string", + "default": "capvcd.vmware.com/v1.2", + "description": "The version of the payload format" + } + }, + "definitions": { + "k8sNetwork": { + "type": "object", + "properties": { + "pods": { + "type": "object", + "properties": { + "cidrBlocks": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Specifies a range of IP addresses to use for Kubernetes pods." + } + }, + "description": "The network settings for Kubernetes pods." + }, + "services": { + "type": "object", + "properties": { + "cidrBlocks": { + "type": "array", + "items": { + "type": "string" + }, + "description": "The range of IP addresses to use for Kubernetes services" + } + }, + "description": "The network settings for Kubernetes services" + } + }, + "description": "The network-related settings for the cluster." + } + } +} From 48c556912e841514923df4b305ebab85c341dac9 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 12:00:47 +0100 Subject: [PATCH 063/156] # Signed-off-by: abarreiro --- .../v4.2.0/entities/tkgmcluster.json.template | 29 - .../v4.2.0/entities/vcdkeconfig.json.template | 9 +- .../schemas/capvcd-type-schema-v1.3.0.json | 261 ++++- .../vcdkeconfig-type-schema-v1.1.0.json | 908 +++++------------- 4 files changed, 517 insertions(+), 690 deletions(-) delete mode 100644 examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template diff --git a/examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template b/examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template deleted file mode 100644 index 88896c2c6..000000000 --- a/examples/container-service-extension/v4.2.0/entities/tkgmcluster.json.template +++ /dev/null @@ -1,29 +0,0 @@ -{ - "apiVersion": "capvcd.vmware.com/v1.1", - "kind": "CAPVCDCluster", - "name": "${name}", - "metadata": { - "name": "${name}", - "orgName": "${org}", - "site": "${vcd_url}", - "virtualDataCenterName": "${vdc}" - }, - "spec": { - "vcdKe": { - "isVCDKECluster": true, - "markForDelete": ${delete}, - "forceDelete": ${force_delete}, - "autoRepairOnErrors": ${auto_repair_on_errors}, - "defaultStorageClassOptions": { - "filesystem": "${default_storage_class_filesystem}", - "k8sStorageClassName": "${default_storage_class_name}", - "vcdStorageProfileName": "${default_storage_class_storage_profile}", - "useDeleteReclaimPolicy": ${default_storage_class_delete_reclaim_policy} - }, - "secure": { - "apiToken": "${api_token}" - } - }, - "capiYaml": ${capi_yaml} - } -} diff --git a/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template b/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template index 9a3ef0523..3d2b0c5ac 100644 --- a/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template +++ b/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template @@ -8,6 +8,11 @@ "heartbeatWatcherTimeoutInMin": 10, "staleHeartbeatIntervalInMin": 30 }, + "vcdKeInstances": [ + { + "name": "vcd-container-service-extension" + } + ], "K8Config": { "certificateAuthorities": [ ${k8s_cluster_certificates} @@ -32,7 +37,7 @@ "nodeNotReadyTimeout": "${node_not_ready_timeout}", "nodeUnknownTimeout": "${node_unknown_timeout}" }, - "rdeProjectorVersion": "0.6.0" + "rdeProjectorVersion": "${rde_projector_version}" }, "vcdConfig": { "sysLogger": { @@ -51,7 +56,7 @@ "capvcdRde": { "nss": "capvcdCluster", "vendor": "vmware", - "version": "1.2.0" + "version": "1.3.0" } }, "coreCapiVersion": "v1.4.0", diff --git a/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json b/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json index f033b1c0d..f4b9db1f2 100644 --- a/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json +++ b/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json @@ -66,6 +66,84 @@ "title": "User specified K8s Yaml strings", "description": "User specified K8s Yaml strings to be applied on the target cluster. The component Projector will process this property periodically." }, + "projector": { + "type": "object", + "x-vcloud-restricted": "private", + "title": "User specification for Projector component", + "description": "Defines the operations to be executed by the component projector", + "properties": { + "operations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "verb": { + "type": "string", + "enum": [ + "apply", + "create" + ], + "title": "Kubernetes command of the operation", + "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" + }, + "name": { + "type": "string", + "title": "Name of the operation", + "description": "The name of the operation, if applicable." + }, + "valueType": { + "type": "string", + "enum": [ + "yamlLink", + "yamlString" + ], + "title": "Value type of the operation", + "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." + }, + "values": { + "type": "array", + "x-vcloud-restricted": [ + "private", + "secure" + ], + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ] + }, + "title": "Value of the operation", + "description": "Array of values used for the operation.Type of the values must be consistent with the valueType" + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "minimum": 1, + "description": "Specifies the sequence/order in which the operation should be executed." + }, + "retryUntilSuccess": { + "type": "boolean", + "title": "Operation will be retried until it succeeds", + "description": "Operation will be retried until it succeeds", + "default": false + } + }, + "required": [ + "verb", + "values", + "valueType", + "sequence" + ] + }, + "title": "Operations to be executed by the component projector", + "description": "User-specified operations to be applied on the target cluster. " + } + } + }, "vcdKe": { "type": "object", "properties": { @@ -121,7 +199,10 @@ }, "secure": { "type": "object", - "x-vcloud-restricted": ["private", "secure"], + "x-vcloud-restricted": [ + "private", + "secure" + ], "properties": { "apiToken": { "type": "string", @@ -374,7 +455,10 @@ }, "private": { "type": "object", - "x-vcloud-restricted": ["private", "secure"], + "x-vcloud-restricted": [ + "private", + "secure" + ], "description": "Placeholder for the properties invisible and secure to non-admin users.", "properties": { "kubeConfig": { @@ -458,6 +542,177 @@ }, "title": "CPI for VCD's view of the current status of the cluster", "description": "CPI for VCD's view of the current status of the cluster" + }, + "projector": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Projector Name", + "description": "The name of the projector component." + }, + "version": { + "type": "string", + "title": "Projector Version", + "description": "The product version of the projector component." + }, + "errorSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + }, + "title": "Error Set", + "description": "An array containing error information related to the operations of the projector component." + }, + "eventSet": { + "type": "array", + "items": { + "type": "object", + "properties": {} + }, + "title": "Event Set", + "description": "An array containing event information related to the operations of the projector component." + }, + "lastAppliedSequence": { + "type": "integer", + "minimum": 1, + "default": 1, + "title": "Last Applied Sequence", + "description": "The sequence number of the last applied operation in the projector component." + }, + "lastAppliedTimestamp": { + "type": "string", + "title": "Last Applied Timestamp", + "description": "The timestamp of the last applied operation in the projector component." + }, + "operations": { + "type": "array", + "items": { + "type": "object", + "properties": { + "verb": { + "type": "string", + "enum": [ + "apply", + "create" + ], + "title": "Kubernetes command of the operation", + "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" + }, + "name": { + "type": "string", + "title": "Name of the operation", + "description": "The name of the operation, if applicable." + }, + "valueType": { + "type": "string", + "enum": [ + "yamlLink", + "yamlString" + ], + "title": "Value type the operation", + "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "description": "Specifies the sequence/order in which the operation should be executed." + }, + "forceDelete": { + "type": "boolean", + "title": "Flag which indicates whether the operation should be forcefully deleted.", + "description": "Indicates whether the operation should be forcefully deleted." + }, + "output": { + "type": "string", + "title": "Output", + "description": "The execution output of the operation." + } + }, + "title": "Operation Status", + "description": "Status of a specific operation executed in the projector component." + }, + "title": "Operation Status of Projector after Execution", + "description": "An array containing the status of operations executed in the projector component." + }, + "retrySet": { + "type": "array", + "items": { + "type": "object", + "properties": { + "operation": { + "type": "object", + "properties": { + "verb": { + "type": "string", + "enum": [ + "apply", + "create" + ], + "title": "Kubernetes command of the operation", + "description": "Kubernetes command of the operation" + }, + "name": { + "type": "string", + "title": "Name of the operation", + "description": "Name of the operation" + }, + "valueType": { + "type": "string", + "enum": [ + "yamlLink", + "yamlString" + ], + "title": "Value type of the operation", + "description": "Value type of the operation" + }, + "values": { + "type": "array", + "x-vcloud-restricted": [ + "private", + "secure" + ], + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "object" + } + ] + }, + "title": "Value of the operation", + "description": "Array of values used for the operation.Type of the values must be consistent with the valueType" + }, + "sequence": { + "type": "integer", + "title": "Sequence number of the operation", + "description": "Sequence number of the operation" + }, + "retryUntilSuccess": { + "type": "boolean", + "title": "Operation will be retried until it succeeds", + "description": "Operation will be retried until it succeeds" + } + }, + "title": "Spec of the operation to be retried", + "description": "Spec of the operation to be retried" + }, + "createTimeStamp": { + "type": "string", + "title": "The timestamp at which this operation failed for the first time", + "description": "The timestamp at which this operation failed for the first time" + } + } + }, + "title": "The operations to be retried by the Projector", + "description": "The operations to be retried by the Projector" + } + }, + "title": "Current Status of the Projector Component", + "description": "Current status of the projector component. It reflects the operation execution status of the projector component." } }, "title": "Current status of the cluster", @@ -469,4 +724,4 @@ "description": "The version of the payload format" } } -} \ No newline at end of file +} diff --git a/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json b/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json index 000b87445..1f721919a 100644 --- a/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json +++ b/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json @@ -1,727 +1,323 @@ { "type": "object", - "required": [ - "kind", - "metadata", - "apiVersion", - "spec" - ], "properties": { - "kind": { - "enum": [ - "CAPVCDCluster" - ], - "type": "string", - "title": "The kind of the Kubernetes cluster.", - "default": "CAPVCDCluster", - "description": "The kind of the Kubernetes cluster." - }, - "spec": { - "type": "object", - "title": "User specification for the cluster", - "properties": { - "vcdKe": { + "profiles": { + "type": "array", + "items": [ + { "type": "object", - "title": "User specification for VCDKE component", "properties": { - "secure": { - "type": "object", - "title": "Encrypted data", - "properties": { - "apiToken": { - "type": "string", - "title": "API Token (Refresh Token) of the user", - "description": "API Token (Refresh Token) of the user." - } - }, - "description": "Fields under this section will be encrypted", - "x-vcloud-restricted": [ - "private", - "secure" - ] - }, - "forceDelete": { - "type": "boolean", - "title": "User's intent to delete the cluster forcefully", - "default": false, - "description": "User's intent to delete the cluster forcefully" - }, - "markForDelete": { - "type": "boolean", - "title": "User's intent to delete the cluster", - "default": false, - "description": "Mark the cluster for deletion" - }, - "isVCDKECluster": { - "type": "boolean", - "title": "User's intent to have this specification processed by VCDKE", - "description": "Does user wants this specification to be processed by the VCDKE component of CSE stack?" + "name": { + "type": "string" }, - "autoRepairOnErrors": { - "type": "boolean", - "title": "User's intent to let the VCDKE repair/recreate the cluster", - "default": true, - "description": "User's intent to let the VCDKE repair/recreate the cluster on any errors during cluster creation" + "active": { + "type": "boolean" }, - "defaultStorageClassOptions": { - "type": "object", - "title": "Default Storage class options to be set on the target cluster", - "properties": { - "fileSystem": { - "type": "string", - "title": "Default file System of the volumes", - "description": "Default file System of the volumes to be created from the default storage class" - }, - "k8sStorageClassName": { - "type": "string", - "title": "Name of the Kubernetes storage class to be created", - "description": "Name of the Kubernetes storage class to be created" - }, - "vcdStorageProfileName": { - "type": "string", - "title": "Name of the VCD storage profile", - "description": "Name of the VCD storage profile" - }, - "useDeleteReclaimPolicy": { - "type": "boolean", - "title": "Reclaim policy of the Kubernetes storage class", - "description": "Reclaim policy of the Kubernetes storage class" - } - }, - "description": "Default Storage class options to be set on the target cluster" - } - }, - "description": "User specification for VCDKE component" - }, - "yamlSet": { - "type": "array", - "items": { - "type": "string" - }, - "title": "User specified K8s Yaml strings", - "description": "User specified K8s Yaml strings to be applied on the target cluster. The component Projector will process this property periodically." - }, - "capiYaml": { - "type": "string", - "title": "CAPI yaml", - "description": "User specification of the CAPI yaml; It is user's responsibility to embed the correct CAPI yaml generated as per instructions - https://github.com/vmware/cluster-api-provider-cloud-director/blob/main/docs/CLUSTERCTL.md#generate-cluster-manifests-for-workload-cluster" - }, - "projector": { - "type": "object", - "title": "User specification for Projector component", - "properties": { - "operations": { + "vcdKeInstances": { "type": "array", - "items": { - "type": "object", - "required": [ - "verb", - "values", - "valueType", - "sequence" - ], - "properties": { - "name": { - "type": "string", - "title": "Name of the operation", - "description": "The name of the operation, if applicable." - }, - "verb": { - "enum": [ - "apply", - "create" - ], - "type": "string", - "title": "Kubernetes command of the operation", - "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" - }, - "values": { - "type": "array", - "items": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "object" - } - ] + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" }, - "title": "Value of the operation", - "description": "Array of values used for the operation.Type of the values must be consistent with the valueType", - "x-vcloud-restricted": [ - "private", - "secure" - ] - }, - "sequence": { - "type": "integer", - "title": "Sequence number of the operation", - "minimum": 1, - "description": "Specifies the sequence/order in which the operation should be executed." - }, - "valueType": { - "enum": [ - "yamlLink", - "yamlString" - ], - "type": "string", - "title": "Value type of the operation", - "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." - }, - "retryUntilSuccess": { - "type": "boolean", - "title": "Operation will be retried until it succeeds", - "default": false, - "description": "Operation will be retried until it succeeds" + "version": { + "type": "string", + "default": "4.1.0" + }, + "vcdKeInstanceId": { + "type": "string" + } } } - }, - "title": "Operations to be executed by the component projector", - "description": "User-specified operations to be applied on the target cluster. " - } - }, - "description": "Defines the operations to be executed by the component projector", - "x-vcloud-restricted": "private" - } - }, - "description": "User specification for the cluster" - }, - "status": { - "type": "object", - "title": "Current status of the cluster", - "properties": { - "cpi": { - "type": "object", - "title": "CPI for VCD's view of the current status of the cluster", - "properties": { - "name": { - "type": "string", - "title": "Name of the Cloud Provider Interface", - "description": "Name of the CPI" - }, - "version": { - "type": "string", - "title": "Product version of the CPI", - "description": "Product version of the CPI" - } - }, - "description": "CPI for VCD's view of the current status of the cluster" - }, - "vcdKe": { - "type": "object", - "title": "VCDKE's view of the current status of the cluster", - "properties": { - "state": { - "type": "string", - "title": "VCDKE's view of the current state of the cluster", - "description": "VCDKE's view of the current state of the cluster - provisioning/provisioned/error" - }, - "vcdKeVersion": { - "type": "string", - "title": "VCDKE/CSE product version", - "description": "The VCDKE version with which the cluster is originally created" + ] }, - "defaultStorageClass": { + "serverConfig": { "type": "object", - "title": "Default Storage class options to be set on the target cluster", "properties": { - "fileSystem": { - "type": "string", - "title": "Default file System of the volumes", - "description": "Default file System of the volumes to be created from the default storage class" + "rdePollIntervalInMin": { + "type": "integer", + "description": "Server polls and processes the RDEs for every #rdePollIntervalInMin minutes." }, - "k8sStorageClassName": { - "type": "string", - "title": "Name of the Kubernetes storage class to be created", - "description": "Name of the Kubernetes storage class to be created" - }, - "vcdStorageProfileName": { - "type": "string", - "title": "Name of the VCD storage profile", - "description": "Name of the VCD storage profile" + "heartbeatWatcherTimeoutInMin": { + "type": "integer", + "description": "The watcher thread kills itself if it does not receive heartbeat with in #heartbeatWatcherTimeoutInMin from the associated worker thread. Eventually worker also dies off as it can no longer post to the already closed heartbeat channel." }, - "useDeleteReclaimPolicy": { - "type": "boolean", - "title": "Reclaim policy of the Kubernetes storage class", - "description": "Reclaim policy of the Kubernetes storage class" + "staleHeartbeatIntervalInMin": { + "type": "integer", + "description": "New worker waits for about #staleHeartbeatIntervalinMin before it calls the current heartbeat stale and picks up the RDE. The value must always be greater than #heartbeatWatcherTimeoutInmin" } - }, - "description": "Default Storage class options to be set on the target cluster" - } - }, - "description": "Current status of the cluster from VCDKE's point of view" - }, - "capvcd": { - "type": "object", - "title": "CAPVCD's view of the current status of the cluster", - "properties": { - "uid": { - "type": "string" + } }, - "phase": { - "type": "string" + "vcdConfig": { + "type": "object", + "properties": { + "sysLogger": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "port": { + "type": "string" + } + }, + "required": [ + "host", + "port" + ] + } + } }, - "private": { + "githubConfig": { "type": "object", "properties": { - "kubeConfig": { - "type": "string", - "description": "Kube config to access the Kubernetes cluster." + "githubPersonalAccessToken": { + "type": "string" } - }, - "description": "Placeholder for the properties invisible and secure to non-admin users.", - "x-vcloud-restricted": [ - "private", - "secure" - ] + } }, - "upgrade": { + "bootstrapClusterConfig": { "type": "object", "properties": { - "ready": { - "type": "boolean", - "description": "boolean indicating the status of the cluster upgrade." + "sizingPolicy": { + "type": "string" + }, + "dockerVersion": { + "type": "string" + }, + "kindVersion": { + "type": "string", + "default": "v0.19.0" + }, + "kindestNodeVersion": { + "type": "string", + "default": "v1.27.1", + "description": "Image tag of kindest/node container, used by KinD to deploy a cluster" }, - "current": { + "kubectlVersion": { + "type": "string" + }, + "clusterctl": { "type": "object", "properties": { - "tkgVersion": { + "version": { "type": "string", - "description": "current TKG version of the cluster. If being upgraded, will represent the tarkget TKG version of the cluster." + "default": "v1.4.0" }, - "kubernetesVersion": { - "type": "string", - "description": "current kubernetes version of the cluster. If being upgraded, will represent target kubernetes version of the cluster." + "clusterctlyaml": { + "type": "string" } } }, - "previous": { + "capiEcosystem": { "type": "object", "properties": { - "tkgVersion": { + "coreCapiVersion": { "type": "string", - "description": "the TKG version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source TKG versoin from which the cluster is being upgraded." + "default": "v1.4.0" + }, + "controlPlaneProvider": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "v1.4.0" + } + } }, - "kubernetesVersion": { + "bootstrapProvider": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "v1.4.0" + } + } + }, + "infraProvider": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "v1.1.0" + }, + "capvcdRde": { + "type": "object", + "properties": { + "vendor": { + "type": "string" + }, + "nss": { + "type": "string" + }, + "version": { + "type": "string" + } + } + } + } + }, + "certManagerVersion": { "type": "string", - "description": "the kubernetes version from which the cluster was upgraded from. If cluster upgrade is still in progress, the field will represent the source kubernetes version from which the cluster is being upgraded." + "default": "v1.11.1" } } - } - }, - "description": "determines the state of upgrade. If no upgrade is issued, only the existing version is stored." - }, - "errorSet": { - "type": "array", - "items": { - "type": "object", - "properties": {} - } - }, - "eventSet": { - "type": "array", - "items": { - "type": "object", - "properties": {} - } - }, - "nodePool": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string", - "description": "name of the node pool" - }, - "diskSizeMb": { - "type": "integer", - "description": "disk size of the VMs in the node pool in MB" - }, - "sizingPolicy": { - "type": "string", - "description": "name of the sizing policy used by the node pool" - }, - "storageProfile": { - "type": "string", - "description": "storage profile used by the node pool" - }, - "desiredReplicas": { - "type": "integer", - "description": "desired replica count of the nodes in the node pool" - }, - "placementPolicy": { - "type": "string", - "description": "name of the sizing policy used by the node pool" - }, - "nvidiaGpuEnabled": { - "type": "boolean", - "description": "boolean indicating if the node pools have nvidia GPU enabled" - }, - "availableReplicas": { - "type": "integer", - "description": "number of available replicas in the node pool" - } - } - } - }, - "parentUid": { - "type": "string" - }, - "k8sNetwork": { - "$ref": "#/definitions/k8sNetwork" - }, - "kubernetes": { - "type": "string" - }, - "capvcdVersion": { - "type": "string" - }, - "vcdProperties": { - "type": "object", - "properties": { - "site": { - "type": "string" }, - "orgVdcs": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - }, - "ovdcNetworkName": { - "type": "string" - } + "proxyConfig": { + "type": "object", + "properties": { + "httpProxy": { + "type": "string" + }, + "httpsProxy": { + "type": "string" + }, + "noProxy": { + "type": "string" } } }, - "organizations": { + "certificateAuthorities": { "type": "array", + "description": "Certificates to be used as the certificate authority in the bootstrap (ephemeral) VM", "items": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "name": { - "type": "string" - } - } + "type": "string" } } } }, - "vcdResourceSet": { - "type": "array", - "items": { - "type": "object", - "properties": {} - } - }, - "clusterApiStatus": { + "K8Config": { "type": "object", "properties": { - "phase": { - "type": "string", - "description": "The phase describing the control plane infrastructure deployment." - }, - "apiEndpoints": { + "csi": { "type": "array", - "items": { - "host": { + "items": [ + { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string", + "default": "1.4.0" + } + }, + "required": [ + "name", + "version" + ] + } + ] + }, + "cpi": { + "type": "object", + "properties": { + "name": { "type": "string" }, - "port": { - "type": "integer" + "version": { + "type": "string", + "default": "1.4.0" } }, - "description": "Control Plane load balancer endpoints" - } - } - }, - "createdByVersion": { - "type": "string", - "description": "CAPVCD version used to create the cluster" - }, - "clusterResourceSet": { - "type": "object", - "properties": {} - }, - "useAsManagementCluster": { - "type": "boolean" - }, - "clusterResourceSetBindings": { - "type": "array", - "items": { - "type": "object", - "properties": { - "kind": { - "type": "string" - }, - "name": { - "type": "string" - }, - "applied": { - "type": "boolean" - }, - "lastAppliedTime": { - "type": "string" + "required": [ + "name", + "version" + ] + }, + "cni": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "version": { + "type": "string" + } }, - "clusterResourceSetName": { - "type": "string" - } - } - } - } - }, - "description": "CAPVCD's view of the current status of the cluster" - }, - "projector": { - "type": "object", - "title": "Current Status of the Projector Component", - "properties": { - "name": { - "type": "string", - "title": "Projector Name", - "description": "The name of the projector component." - }, - "version": { - "type": "string", - "title": "Projector Version", - "description": "The product version of the projector component." - }, - "errorSet": { - "type": "array", - "items": { - "type": "object", - "properties": {} - }, - "title": "Error Set", - "description": "An array containing error information related to the operations of the projector component." - }, - "eventSet": { - "type": "array", - "items": { - "type": "object", - "properties": {} - }, - "title": "Event Set", - "description": "An array containing event information related to the operations of the projector component." - }, - "retrySet": { - "type": "array", - "items": { - "type": "object", - "properties": { - "operation": { - "type": "object", - "title": "Spec of the operation to be retried", - "properties": { - "name": { - "type": "string", - "title": "Name of the operation", - "description": "Name of the operation" - }, - "verb": { - "enum": [ - "apply", - "create" - ], - "type": "string", - "title": "Kubernetes command of the operation", - "description": "Kubernetes command of the operation" - }, - "values": { - "type": "array", - "items": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "object" - } - ] - }, - "title": "Value of the operation", - "description": "Array of values used for the operation.Type of the values must be consistent with the valueType", - "x-vcloud-restricted": [ - "private", - "secure" - ] - }, - "sequence": { - "type": "integer", - "title": "Sequence number of the operation", - "description": "Sequence number of the operation" - }, - "valueType": { - "enum": [ - "yamlLink", - "yamlString" - ], - "type": "string", - "title": "Value type of the operation", - "description": "Value type of the operation" - }, - "retryUntilSuccess": { - "type": "boolean", - "title": "Operation will be retried until it succeeds", - "description": "Operation will be retried until it succeeds" - } + "required": [ + "name", + "version" + ] + }, + "rdeProjectorVersion": { + "type": "string", + "default": "0.6.0" + }, + "mhc": { + "type": "object", + "description": "Parameters to configure MachineHealthCheck", + "properties": { + "maxUnhealthyNodes": { + "type": "number", + "default": 100, + "minimum": 1, + "maximum": 100, + "description": "Dictates whether MHC should remediate the machine if the given percentage of nodes in the cluster are down" + }, + "nodeStartupTimeout": { + "type": "string", + "default": "900s", + "description": "Determines how long a MachineHealthCheck should wait for a Node to join the cluster, before considering a Machine unhealthy." }, - "description": "Spec of the operation to be retried" + "nodeNotReadyTimeout": { + "type": "string", + "default": "300s", + "description": "Determines how long MachineHealthCheck should wait for before remediating Machines if the Node Ready condition is False" + }, + "nodeUnknownTimeout": { + "type": "string", + "default": "300s", + "description": "Determines how long MachineHealthCheck should wait for before remediating machines if the Node Ready condition is Unknown" + } }, - "createTimeStamp": { - "type": "string", - "title": "The timestamp at which this operation failed for the first time", - "description": "The timestamp at which this operation failed for the first time" + "required": [ + "maxUnhealthyNodes", + "nodeStartupTimeout", + "nodeNotReadyTimeout", + "nodeUnknownTimeout" + ] + }, + "certificateAuthorities": { + "type": "array", + "description": "Certificates to be used as the certificate authority", + "items": { + "type": "string" } } }, - "title": "The operations to be retried by the Projector", - "description": "The operations to be retried by the Projector" - }, - "operations": { - "type": "array", - "items": { - "type": "object", - "title": "Operation Status", - "properties": { - "name": { - "type": "string", - "title": "Name of the operation", - "description": "The name of the operation, if applicable." - }, - "verb": { - "enum": [ - "apply", - "create" - ], - "type": "string", - "title": "Kubernetes command of the operation", - "description": "Specifies the Kubernetes command for the operation. Apply supports yamlLink, yamlString; Delete supports KubernetesKind; Create supports yamlLink, yamlString" - }, - "output": { - "type": "string", - "title": "Output", - "description": "The execution output of the operation." - }, - "sequence": { - "type": "integer", - "title": "Sequence number of the operation", - "description": "Specifies the sequence/order in which the operation should be executed." - }, - "valueType": { - "enum": [ - "yamlLink", - "yamlString" - ], - "type": "string", - "title": "Value type the operation", - "description": "Specifies the type of values to be used (e.g., yamlString, yamlLink, K8sKind, cseContainer)." - }, - "forceDelete": { - "type": "boolean", - "title": "Flag which indicates whether the operation should be forcefully deleted.", - "description": "Indicates whether the operation should be forcefully deleted." - } - }, - "description": "Status of a specific operation executed in the projector component." - }, - "title": "Operation Status of Projector after Execution", - "description": "An array containing the status of operations executed in the projector component." - }, - "lastAppliedSequence": { - "type": "integer", - "title": "Last Applied Sequence", - "default": 1, - "minimum": 1, - "description": "The sequence number of the last applied operation in the projector component." + "required": [ + "csi", + "cpi", + "cni" + ] }, - "lastAppliedTimestamp": { + "containerRegistryUrl": { "type": "string", - "title": "Last Applied Timestamp", - "description": "The timestamp of the last applied operation in the projector component." + "default": "projects.registry.vmware.com" } }, - "description": "Current status of the projector component. It reflects the operation execution status of the projector component." - } - }, - "description": "Current status of the cluster. The subsections are updated by various components of CSE stack - VCDKE, Projector, CAPVCD, CPI, CSI and Extensions", - "x-vcloud-restricted": "protected" - }, - "metadata": { - "type": "object", - "title": "User specification of the metadata of the cluster", - "properties": { - "name": { - "type": "string", - "title": "The name of the cluster.", - "description": "The name of the cluster." - }, - "site": { - "type": "string", - "title": "Fully Qualified Domain Name of the VCD site in which the cluster is deployed", - "description": "Fully Qualified Domain Name (https://VCD-FQDN.com) of the VCD site in which the cluster is deployed" - }, - "orgName": { - "type": "string", - "title": "The name of the Organization in which cluster needs to be created or managed.", - "description": "The name of the Organization in which cluster needs to be created or managed." - }, - "virtualDataCenterName": { - "type": "string", - "title": "The name of the Organization data center in which the cluster need to be created or managed.", - "description": "The name of the Organization data center in which the cluster need to be created or managed." + "required": [ + "name", + "active" + ] } - }, - "description": "User specification of the metadata of the cluster" - }, - "apiVersion": { - "type": "string", - "default": "capvcd.vmware.com/v1.2", - "description": "The version of the payload format" + ] } }, - "definitions": { - "k8sNetwork": { - "type": "object", - "properties": { - "pods": { - "type": "object", - "properties": { - "cidrBlocks": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Specifies a range of IP addresses to use for Kubernetes pods." - } - }, - "description": "The network settings for Kubernetes pods." - }, - "services": { - "type": "object", - "properties": { - "cidrBlocks": { - "type": "array", - "items": { - "type": "string" - }, - "description": "The range of IP addresses to use for Kubernetes services" - } - }, - "description": "The network settings for Kubernetes services" - } - }, - "description": "The network-related settings for the cluster." - } - } + "required": [ + "profiles" + ] } From 3b4a62cb63baac6dfc9a9e76df4e763a8e2c1607 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 12:54:06 +0100 Subject: [PATCH 064/156] Add org traversal right Signed-off-by: abarreiro --- .../install/step1/3.11-cse-install-2-cse-server-prerequisites.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf index 21195d235..36d0a616a 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -96,6 +96,7 @@ resource "vcd_role" "cse_admin_role" { description = "Used for administrative purposes" rights = [ "API Tokens: Manage", + "Organization: Traversal", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator Full access", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator View", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Full Access", From 143c5a793e97f89089c307c6fe7977b2b39bcae1 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 14:20:59 +0100 Subject: [PATCH 065/156] Add vcd_version data source Signed-off-by: abarreiro --- .../3.11-cse-install-1-provider-config.tf | 16 +++- ...-cse-install-2-cse-server-prerequisites.tf | 5 +- .../install/step1/terraform.tfvars.example | 1 + .../v4.2.0/install/step1/variables.tf | 5 ++ vcd/datasource_vcd_version.go | 79 +++++++++++++++++++ vcd/datasource_vcd_version_test.go | 52 ++++++++++++ vcd/provider.go | 1 + 7 files changed, 154 insertions(+), 5 deletions(-) create mode 100644 vcd/datasource_vcd_version.go create mode 100644 vcd/datasource_vcd_version_test.go diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf index 18e0ad66b..75182f04f 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf @@ -11,12 +11,12 @@ # Other than that, this snippet should be applied as it is. # ------------------------------------------------------------------------------------------------------------ -# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. terraform { required_providers { vcd = { source = "vmware/vcd" - version = ">= 3.11" + version = ">= 3.12" } } } @@ -32,3 +32,15 @@ provider "vcd" { logging = true logging_file = "cse_install_step1.log" } + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} + +# There are some special rights an elements introduced in VCD 10.5.1 +data "vcd_version" "gte_1051" { + condition = ">= 10.5.1" + fail_if_not_match = false +} \ No newline at end of file diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf index 36d0a616a..f87775322 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -94,9 +94,8 @@ resource "vcd_role" "cse_admin_role" { org = var.administrator_org name = "CSE Admin Role" description = "Used for administrative purposes" - rights = [ + rights = concat([ "API Tokens: Manage", - "Organization: Traversal", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator Full access", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator View", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Full Access", @@ -107,7 +106,7 @@ resource "vcd_role" "cse_admin_role" { "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View" - ] + ], data.vcd_version.gte_1051.matches_condition ? ["Organization: Traversal"] : []) } # This will allow to have a user with a limited set of rights that can access the Provider area of VCD. diff --git a/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example b/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example index b4a5f86f3..d4e883a47 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example +++ b/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example @@ -7,6 +7,7 @@ # ------------------------------------------------ vcd_url = "https://vcd.my-awesome-corp.com" +vcd_version = "10.5.1" administrator_user = "administrator" administrator_password = "change-me" administrator_org = "System" diff --git a/examples/container-service-extension/v4.2.0/install/step1/variables.tf b/examples/container-service-extension/v4.2.0/install/step1/variables.tf index 123d9db5f..4142fdbe6 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/variables.tf +++ b/examples/container-service-extension/v4.2.0/install/step1/variables.tf @@ -7,6 +7,11 @@ variable "vcd_url" { type = string } +variable "vcd_version" { + description = "The version of VCD. Required to manage the differences between VCD versions, such as available Rights" + type = string +} + variable "insecure_login" { description = "Allow unverified SSL connections when operating with VCD" type = bool diff --git a/vcd/datasource_vcd_version.go b/vcd/datasource_vcd_version.go new file mode 100644 index 000000000..19f5df6c8 --- /dev/null +++ b/vcd/datasource_vcd_version.go @@ -0,0 +1,79 @@ +package vcd + +import ( + "context" + "fmt" + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func datasourceVcdVersion() *schema.Resource { + return &schema.Resource{ + ReadContext: datasourceVcdVersionRead, + Schema: map[string]*schema.Schema{ + "condition": { + Type: schema.TypeString, + Optional: true, + Description: "A condition to check against the VCD version", + RequiredWith: []string{"fail_if_not_match"}, + }, + "fail_if_not_match": { + Type: schema.TypeBool, + Optional: true, + Description: "This data source fails if the VCD doesn't match the version constraint set in 'condition'", + RequiredWith: []string{"condition"}, + }, + "matches_condition": { + Type: schema.TypeBool, + Computed: true, + Description: "Whether VCD matches the condition or not", + }, + "vcd_version": { + Type: schema.TypeString, + Computed: true, + Description: "The VCD version", + }, + "api_version": { + Type: schema.TypeString, + Computed: true, + Description: "The VCD API version", + }, + }, + } +} + +func datasourceVcdVersionRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + vcdClient := meta.(*VCDClient) + vcdVersion, err := vcdClient.VCDClient.Client.GetVcdShortVersion() + if err != nil { + return diag.Errorf("could not get VCD version: %s", err) + } + apiVersion, err := vcdClient.VCDClient.Client.MaxSupportedVersion() + if err != nil { + return diag.Errorf("could not get VCD API version: %s", err) + } + + dSet(d, "vcd_version", vcdVersion) + dSet(d, "api_version", apiVersion) + + if condition, ok := d.GetOk("condition"); ok { + checkVer, err := semver.NewVersion(vcdVersion) + if err != nil { + return diag.Errorf("unable to parse version '%s': %s", vcdVersion, err) + } + constraints, err := semver.NewConstraint(condition.(string)) + if err != nil { + return diag.Errorf("unable to parse given version constraint '%s' : %s", condition, err) + } + matchesCondition := constraints.Check(checkVer) + dSet(d, "matches_condition", matchesCondition) + if !matchesCondition && d.Get("fail_if_not_match").(bool) { + return diag.Errorf("the VCD version doesn't match the version constraint '%s'", condition) + } + } + + // The ID is artificial, and we try to identify each data source instance unequivocally through its parameters. + d.SetId(fmt.Sprintf("vcd_version=%s,condition=%s,fail_if_not_match=%t", vcdVersion, d.Get("condition"), d.Get("fail_if_not_match"))) + return nil +} diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go new file mode 100644 index 000000000..44c5d9333 --- /dev/null +++ b/vcd/datasource_vcd_version_test.go @@ -0,0 +1,52 @@ +//go:build ALL || functional + +package vcd + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccVcdVersion(t *testing.T) { + preTestChecks(t) + skipIfNotSysAdmin(t) + + vcdClient := createSystemTemporaryVCDConnection() + currentVersion, err := vcdClient.Client.GetVcdShortVersion() + if err != nil { + t.Fatalf("could not get VCD version: %s", err) + } + + var params = StringMap{ + "Condition": ">= " + currentVersion, + } + testParamsNotEmpty(t, params) + + configText1 := templateFill(testAccVcdVersion, params) + if vcdShortTest { + t.Skip(acceptanceTestsSkipped) + return + } + debugPrintf("#[DEBUG] CONFIGURATION: %s", configText1) + + resource.ParallelTest(t, resource.TestCase{ + ProviderFactories: testAccProviders, + Steps: []resource.TestStep{ + { + Config: configText1, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version=%s,condition=%s,fail_if_not_match=false", currentVersion, params["Condition"])), + ), + }, + }, + }) + postTestChecks(t) +} + +const testAccVcdVersion = ` +data "vcd_version" "version" { + name = "{{.Vcenter}}" + } +` diff --git a/vcd/provider.go b/vcd/provider.go index c2ac313f0..021595d28 100644 --- a/vcd/provider.go +++ b/vcd/provider.go @@ -154,6 +154,7 @@ var globalDataSourceMap = map[string]*schema.Resource{ "vcd_vgpu_profile": datasourceVcdVgpuProfile(), // 3.11 "vcd_vm_vgpu_policy": datasourceVcdVmVgpuPolicy(), // 3.11 "vcd_cse_kubernetes_cluster": datasourceVcdCseKubernetesCluster(), // 3.12 + "vcd_version": datasourceVcdVersion(), // 3.12 } var globalResourceMap = map[string]*schema.Resource{ From 9c07d3e0fd1954b2ba85e80d71c2342f2c5831e7 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 14:21:42 +0100 Subject: [PATCH 066/156] Add vcd_version data source Signed-off-by: abarreiro --- .changes/v3.12.0/1195-features.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.changes/v3.12.0/1195-features.md b/.changes/v3.12.0/1195-features.md index c890e0a40..86c0c8771 100644 --- a/.changes/v3.12.0/1195-features.md +++ b/.changes/v3.12.0/1195-features.md @@ -1,4 +1,5 @@ * **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension 4.2 installed and running [GH-1195] * **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension 4.2 - installed and running [GH-1195] \ No newline at end of file + installed and running [GH-1195] +* **New Data Source:** `vcd_version` to get the VCD version and perform additional checks with version constraints [GH-1195] \ No newline at end of file From bee31eb15e137cd7363ebc8b6c4bb61a8dcd1d10 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 14:22:59 +0100 Subject: [PATCH 067/156] Add vcd_version data source Signed-off-by: abarreiro --- .../v4.2.0/install/step1/terraform.tfvars.example | 1 - .../v4.2.0/install/step1/variables.tf | 5 ----- 2 files changed, 6 deletions(-) diff --git a/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example b/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example index d4e883a47..b4a5f86f3 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example +++ b/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example @@ -7,7 +7,6 @@ # ------------------------------------------------ vcd_url = "https://vcd.my-awesome-corp.com" -vcd_version = "10.5.1" administrator_user = "administrator" administrator_password = "change-me" administrator_org = "System" diff --git a/examples/container-service-extension/v4.2.0/install/step1/variables.tf b/examples/container-service-extension/v4.2.0/install/step1/variables.tf index 4142fdbe6..123d9db5f 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/variables.tf +++ b/examples/container-service-extension/v4.2.0/install/step1/variables.tf @@ -7,11 +7,6 @@ variable "vcd_url" { type = string } -variable "vcd_version" { - description = "The version of VCD. Required to manage the differences between VCD versions, such as available Rights" - type = string -} - variable "insecure_login" { description = "Allow unverified SSL connections when operating with VCD" type = bool From 88d9ea35ee138cb3dd85be86161f975c670b7347 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 14:39:23 +0100 Subject: [PATCH 068/156] Add vcd_version data source Signed-off-by: abarreiro --- .../3.11-cse-install-1-provider-config.tf | 2 +- vcd/datasource_vcd_version_test.go | 44 ++++++++++++++++--- website/docs/d/version.html.markdown | 37 ++++++++++++++++ website/vcd.erb | 3 ++ 4 files changed, 79 insertions(+), 7 deletions(-) create mode 100644 website/docs/d/version.html.markdown diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf index 75182f04f..3df620368 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf @@ -39,7 +39,7 @@ data "vcd_version" "cse_minimum_supported" { fail_if_not_match = true } -# There are some special rights an elements introduced in VCD 10.5.1 +# There are some special rights and elements introduced in VCD 10.5.1 data "vcd_version" "gte_1051" { condition = ">= 10.5.1" fail_if_not_match = false diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go index 44c5d9333..ffb370a1e 100644 --- a/vcd/datasource_vcd_version_test.go +++ b/vcd/datasource_vcd_version_test.go @@ -4,14 +4,15 @@ package vcd import ( "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccVcdVersion(t *testing.T) { - preTestChecks(t) - skipIfNotSysAdmin(t) + //preTestChecks(t) + //skipIfNotSysAdmin(t) vcdClient := createSystemTemporaryVCDConnection() currentVersion, err := vcdClient.Client.GetVcdShortVersion() @@ -19,25 +20,56 @@ func TestAccVcdVersion(t *testing.T) { t.Fatalf("could not get VCD version: %s", err) } + apiVersion, err := vcdClient.VCDClient.Client.MaxSupportedVersion() + if err != nil { + t.Fatalf("could not get VCD API version: %s", err) + } + var params = StringMap{ - "Condition": ">= " + currentVersion, + "Condition": ">= 99.99.99", + "FailIfNotMatch": "false", } testParamsNotEmpty(t, params) - configText1 := templateFill(testAccVcdVersion, params) + step1 := templateFill(testAccVcdVersion, params) + + params["FuncName"] = params["FuncName"].(string) + "-step2" + params["FailIfNotMatch"] = "true" + step2 := templateFill(testAccVcdVersion, params) + + params["FuncName"] = params["FuncName"].(string) + "-step3" + params["Condition"] = "= " + currentVersion + step3 := templateFill(testAccVcdVersion, params) + if vcdShortTest { t.Skip(acceptanceTestsSkipped) return } - debugPrintf("#[DEBUG] CONFIGURATION: %s", configText1) + debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) resource.ParallelTest(t, resource.TestCase{ ProviderFactories: testAccProviders, Steps: []resource.TestStep{ { - Config: configText1, + Config: step1, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version=%s,condition=%s,fail_if_not_match=false", currentVersion, params["Condition"])), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "false"), + ), + }, + { + Config: step2, + ExpectError: regexp.MustCompile(`the VCD version doesn't match the version constraint '>= 99.99.99'`), + }, + { + Config: step3, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version=%s,condition=%s,fail_if_not_match=true", currentVersion, params["Condition"])), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), ), }, }, diff --git a/website/docs/d/version.html.markdown b/website/docs/d/version.html.markdown new file mode 100644 index 000000000..ca57589ca --- /dev/null +++ b/website/docs/d/version.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "vcd" +page_title: "VMware Cloud Director: vcd_version" +sidebar_current: "docs-vcd-data-source-version" +description: |- + Provides a VCD version data source. +--- + +# vcd\_version + +Provides a VMware Cloud Director version data source to fetch the VCD version, the maximum API version and perform some optional +checks with version constraints. + +Supported in provider *v3.12+*. Requires System Administrator privileges. + +## Example Usage + +```hcl +# This data source will assert that the VCD version is exactly 10.5.1, otherwise it will fail +data "vcd_version" "gte_1051" { + condition = "= 10.5.1" + fail_if_not_match = true +} +``` + +## Argument Reference + +The following arguments are supported: + +* `condition` - (Optional) A version constraint to check against the VCD version +* `fail_if_not_match` - (Optional) Required if `condition` is set. Throws an error if the version constraint set in `condition` is not met + +## Attribute Reference + +* `matches_condition` - It is true if the VCD version matches the constraint set in `condition` +* `vcd_version` - The VCD version +* `api_version` - The maximum supported API version diff --git a/website/vcd.erb b/website/vcd.erb index e56791e55..f92ec18e7 100644 --- a/website/vcd.erb +++ b/website/vcd.erb @@ -411,6 +411,9 @@ > vcd_cse_kubernetes_cluster + + > + vcd_version From 47cb8bbfb2801d26744bcf3d33af427022ee0521 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 14:42:05 +0100 Subject: [PATCH 069/156] Fix 4.1 Signed-off-by: abarreiro --- .../step1/3.11-cse-install-1-provider-config.tf | 16 ++++++++++++++-- ....11-cse-install-2-cse-server-prerequisites.tf | 4 ++-- .../step2/3.11-cse-install-4-provider-config.tf | 10 ++++++++-- .../step2/3.11-cse-install-4-provider-config.tf | 10 ++++++++-- 4 files changed, 32 insertions(+), 8 deletions(-) diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf index ba3ef6103..d522b0025 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf @@ -11,12 +11,12 @@ # Other than that, this snippet should be applied as it is. # ------------------------------------------------------------------------------------------------------------ -# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. terraform { required_providers { vcd = { source = "vmware/vcd" - version = ">= 3.11" + version = ">= 3.12" } } } @@ -32,3 +32,15 @@ provider "vcd" { logging = true logging_file = "cse_install_step1.log" } + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} + +# There are some special rights and elements introduced in VCD 10.5.1 +data "vcd_version" "gte_1051" { + condition = ">= 10.5.1" + fail_if_not_match = false +} \ No newline at end of file diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf index 2da8e4909..3484fc6e0 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -94,7 +94,7 @@ resource "vcd_role" "cse_admin_role" { org = var.administrator_org name = "CSE Admin Role" description = "Used for administrative purposes" - rights = [ + rights = concat([ "API Tokens: Manage", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator Full access", "${vcd_rde_type.vcdkeconfig_type.vendor}:${vcd_rde_type.vcdkeconfig_type.nss}: Administrator View", @@ -106,7 +106,7 @@ resource "vcd_role" "cse_admin_role" { "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Full Access", "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: Modify", "${vcd_rde_type.capvcdcluster_type.vendor}:${vcd_rde_type.capvcdcluster_type.nss}: View" - ] + ], data.vcd_version.gte_1051.matches_condition ? ["Organization: Traversal"] : []) } # This will allow to have a user with a limited set of rights that can access the Provider area of VCD. diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf index f3d622de6..794613fb3 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf @@ -11,12 +11,12 @@ # Other than that, this snippet should be applied as it is. # ------------------------------------------------------------------------------------------------------------ -# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. terraform { required_providers { vcd = { source = "vmware/vcd" - version = ">= 3.11" + version = ">= 3.12" } time = { source = "hashicorp/time" @@ -40,3 +40,9 @@ provider "vcd" { logging = true logging_file = "cse_install_step2.log" } + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} \ No newline at end of file diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf index 3a573270d..4fe844023 100644 --- a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf @@ -11,12 +11,12 @@ # Other than that, this snippet should be applied as it is. # ------------------------------------------------------------------------------------------------------------ -# VCD Provider configuration. It must be at least v3.11.0 and configured with a System administrator account. +# VCD Provider configuration. It must be at least v3.12.0 and configured with a System administrator account. terraform { required_providers { vcd = { source = "vmware/vcd" - version = ">= 3.11" + version = ">= 3.12" } time = { source = "hashicorp/time" @@ -40,3 +40,9 @@ provider "vcd" { logging = true logging_file = "cse_install_step2.log" } + +# Minimum supported version for CSE +data "vcd_version" "cse_minimum_supported" { + condition = ">= 10.4.2" + fail_if_not_match = true +} \ No newline at end of file From c58aabfe49426c48ccfb137380d7b608dfcefd23 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 14:48:35 +0100 Subject: [PATCH 070/156] Update guide Signed-off-by: abarreiro --- ...ervice_extension_4_x_install.html.markdown | 148 ++++++------------ 1 file changed, 48 insertions(+), 100 deletions(-) diff --git a/website/docs/guides/container_service_extension_4_x_install.html.markdown b/website/docs/guides/container_service_extension_4_x_install.html.markdown index 8c7343cb5..81a5d799d 100644 --- a/website/docs/guides/container_service_extension_4_x_install.html.markdown +++ b/website/docs/guides/container_service_extension_4_x_install.html.markdown @@ -1,19 +1,19 @@ --- layout: "vcd" -page_title: "VMware Cloud Director: Container Service Extension v4.1 installation" +page_title: "VMware Cloud Director: Container Service Extension 4.2 installation" sidebar_current: "docs-vcd-guides-cse-4-x-install" description: |- - Provides guidance on configuring VCD to be able to install and use Container Service Extension v4.1 + Provides guidance on configuring VCD to be able to install and use Container Service Extension 4.2 --- -# Container Service Extension v4.1 installation +# Container Service Extension 4.2 installation ## About -This guide describes the required steps to configure VCD to install the Container Service Extension (CSE) v4.1, that +This guide describes the required steps to configure VCD to install the Container Service Extension (CSE) 4.2, that will allow tenant users to deploy **Tanzu Kubernetes Grid Multi-cloud (TKGm)** clusters on VCD using Terraform or the UI. -To know more about CSE v4.1, you can visit [the documentation][cse_docs]. +To know more about CSE 4.2, you can visit [the documentation][cse_docs]. ## Pre-requisites @@ -21,15 +21,15 @@ To know more about CSE v4.1, you can visit [the documentation][cse_docs]. In order to complete the steps described in this guide, please be aware: -* CSE v4.1 is supported from VCD v10.4.2 or above, as specified in the [Product Interoperability Matrix][product_matrix]. +* CSE 4.2 is supported from VCD v10.4.2 or above, as specified in the [Product Interoperability Matrix][product_matrix]. Please check that the target VCD appliance matches the criteria. -* Terraform provider needs to be v3.11.0 or above. +* Terraform provider needs to be v3.12.0 or above. * Both CSE Server and the Bootstrap clusters require outbound Internet connectivity. -* CSE v4.1 makes use of [ALB](/providers/vmware/vcd/latest/docs/guides/nsxt_alb) capabilities. +* CSE 4.2 makes use of [ALB](/providers/vmware/vcd/latest/docs/guides/nsxt_alb) capabilities. ## Installation process --> To install CSE v4.1, this guide will make use of the example Terraform configuration located [here](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/install). +-> To install CSE 4.2, this guide will make use of the example Terraform configuration located [here](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/install). You can check it, customise it to your needs and apply. However, reading this guide first is recommended to understand what it does and how to use it. The installation process is split in two independent steps that must be run one after the other: @@ -57,7 +57,7 @@ modified and be applied as they are. #### RDE Interfaces, Types and Behaviors -CSE v4.1 requires a set of Runtime Defined Entity items, such as [Interfaces][rde_interface], [Types][rde_type] and [Behaviors][rde_interface_behavior]. +CSE 4.2 requires a set of Runtime Defined Entity items, such as [Interfaces][rde_interface], [Types][rde_type] and [Behaviors][rde_interface_behavior]. In the [step 1 configuration][step1] you can find the following: * The required `VCDKEConfig` [RDE Interface][rde_interface] and [RDE Type][rde_type]. These two resources specify the schema of the **CSE Server @@ -76,14 +76,15 @@ To customise it, the [step 1 configuration][step1] asks for the following variab * `vcdkeconfig_template_filepath` references a local file that defines the `VCDKEConfig` [RDE][rde] contents. It should be a JSON file with template variables that Terraform can interpret, like - [the RDE template file for CSE v4.1](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/entities/vcdkeconfig.json.template) + [the RDE template file for CSE 4.2](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template) used in the step 1 configuration, that can be rendered correctly with the Terraform built-in function `templatefile`. - (Note: In `terraform.tfvars.example` the path for the CSE v4.1 RDE contents is already provided). -* `capvcd_version`: The version for CAPVCD. The default value is **"1.1.0"** for CSE v4.1. + (Note: In `terraform.tfvars.example` the path for the CSE 4.2 RDE contents is already provided). +* `capvcd_version`: The version for CAPVCD. The default value is **"1.1.0"** for CSE 4.2. (Note: Do not confuse with the version of the `capvcdCluster` [RDE Type][rde_type], - which **must be "1.2.0"** for CSE v4.1 and cannot be changed through a variable). -* `cpi_version`: The version for CPI (Cloud Provider Interface). The default value is **"1.4.0"** for CSE v4.1. -* `csi_version`: The version for CSI (Cloud Storage Interface). The default value is **"1.4.0"** for CSE v4.1. + which **must be "1.3.0"** for CSE 4.2 and cannot be changed through a variable). +* `cpi_version`: The version for CPI (Cloud Provider Interface). The default value is **"1.5.0"** for CSE 4.2. +* `csi_version`: The version for CSI (Cloud Storage Interface). The default value is **"1.5.0"** for CSE 4.2. +* `rde_projector_version`: The version for the RDE Projector. The default value is **"0.7.0"** for CSE 4.2. * `github_personal_access_token`: Create this one [here](https://github.com/settings/tokens), this will avoid installation errors caused by GitHub rate limiting, as the TKGm cluster creation process requires downloading some Kubernetes components from GitHub. @@ -106,7 +107,7 @@ To customise it, the [step 1 configuration][step1] asks for the following variab #### Rights, Roles and VM Sizing Policies -CSE v4.1 requires a set of new [Rights Bundles][rights_bundle], [Roles][role] and [VM Sizing Policies][sizing] that are also created +CSE 4.2 requires a set of new [Rights Bundles][rights_bundle], [Roles][role] and [VM Sizing Policies][sizing] that are also created in this step of the [step 1 configuration][step1]. Nothing should be customised here, except for the "CSE Administrator" account to be created, where you can provide a username of your choice (`cse_admin_username`) and its password (`cse_admin_password`). @@ -120,7 +121,7 @@ Once all variables are reviewed and set, you can start the installation with `te ~> Make sure that the previous step is successfully completed. -This step will create all the remaining elements to install CSE v4.1 in VCD. You can read the subsequent sections +This step will create all the remaining elements to install CSE 4.2 in VCD. You can read the subsequent sections to have a better understanding of the building blocks that are described in the [step 2 Terraform configuration][step2]. In this [configuration][step2] you can also find a file named `terraform.tfvars.example` that needs to be updated with correct values and renamed to `terraform.tfvars` @@ -171,7 +172,7 @@ Then it will upload the required OVAs to them. The OVAs can be specified in `ter * `tkgm_ova_folder`: This will reference the path to the TKGm OVA, as an absolute or relative path. It should **not** end with a trailing `/`. * `tkgm_ova_files`: This will reference the file names of the TKGm OVAs, like `[ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc.ova, ubuntu-2004-kube-v1.24.11+vmware.1-tkg.1-2ccb2a001f8bd8f15f1bfbc811071830.ova]`. * `cse_ova_folder`: This will reference the path to the CSE OVA, as an absolute or relative path. It should **not** end with a trailing `/`. -* `cse_ova_file`: This will reference the file name of the CSE OVA, like `VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova`. +* `cse_ova_file`: This will reference the file name of the CSE OVA, like `VMware_Cloud_Director_Container_Service_Extension-4.2.0.ova`. -> To download the required OVAs, please refer to the [CSE documentation][cse_docs]. You can also check the [Product Interoperability Matrix][product_matrix] to confirm the appropriate version of TKGm. @@ -182,7 +183,7 @@ In case you're using a pre-uploaded OVA, leverage the [vcd_catalog_vapp_template #### Networking -The [step 2 configuration][step2] prepares a basic networking layout that will make CSE v4.1 work. However, it is +The [step 2 configuration][step2] prepares a basic networking layout that will make CSE 4.2 work. However, it is recommended that you review the code and adapt the different parts to your needs, specially for the resources like `vcd_nsxt_firewall`. The configuration will create the following: @@ -283,7 +284,7 @@ or if your tenant users are not familiar with Terraform, they will be still able with the UI. If you decide to install it, `k8s_container_clusters_ui_plugin_path` should point to the -[Kubernetes Container Clusters UI plug-in v4.1][cse_docs] ZIP file that you can download in the [CSE documentation][cse_docs]. +[Kubernetes Container Clusters UI plug-in 4.2][cse_docs] ZIP file that you can download in the [CSE documentation][cse_docs]. ### Final considerations @@ -337,65 +338,24 @@ The most common issues are: * Cluster creation is failing: * Please visit the [CSE documentation][cse_docs] to learn how to monitor the logs and troubleshoot possible problems. -## Upgrade from CSE v4.0 to v4.1 +## Upgrade from CSE 4.1 to 4.2 -In this section you can find the required steps to update from CSE v4.0 to v4.1. +In this section you can find the required steps to update from CSE 4.1 to 4.2. -~> This section assumes that the old CSE v4.0 installation was done with Terraform by following the v4.0 guide steps. +~> This section assumes that the old CSE 4.1 installation was done with Terraform by following the 4.1 guide steps. Also, you need to meet [the pre-requisites criteria](#pre-requisites). ### Create the new RDE elements -A new [RDE Interface][rde_interface] needs to be created, which is required by the new v4.1 version: - -```hcl -resource "vcd_rde_interface" "cse_interface" { - vendor = "cse" - nss = "capvcd" - version = "1.0.0" - name = "cseInterface" -} -``` - -CSE v4.1 also requires the usage of [RDE Interface Behaviors][rde_interface_behavior] and -[RDE Behavior Access Controls][rde_type_behavior_acl] that can be created with the following snippets (these can -also be found in the [step 1 configuration][step1]): - -```hcl -resource "vcd_rde_interface_behavior" "capvcd_behavior" { - rde_interface_id = vcd_rde_interface.cse_interface.id - name = "getFullEntity" - execution = { - "type" : "noop" - "id" : "getFullEntity" - } -} - -resource "vcd_rde_type_behavior_acl" "capvcd_behavior_acl" { - rde_type_id = vcd_rde_type.capvcdcluster_type_v120.id # This definition is below - behavior_id = vcd_rde_interface_behavior.capvcd_behavior.id - access_level_ids = ["urn:vcloud:accessLevel:FullControl"] -} -``` - -Create a new version of the [RDE Types][rde_type] that were used in v4.0. This will allow them to co-exist with the old ones, +Create a new version of the [RDE Types][rde_type] that were used in 4.1. This will allow them to co-exist with the old ones, so we can perform a smooth upgrade. ```hcl -resource "vcd_rde_type" "vcdkeconfig_type_v110" { - # Same attributes as v4.1, except for: - version = "1.1.0" # New version - # New schema: - schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.1/schemas/vcdkeconfig-type-schema-v1.1.0.json" -} - -resource "vcd_rde_type" "capvcdcluster_type_v120" { - # Same attributes as v4.1, except for: - version = "1.2.0" # New version +resource "vcd_rde_type" "capvcdcluster_type_v130" { + # Same attributes as 4.1, except for: + version = "1.3.0" # New version # New schema: - schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.1/schemas/capvcd-type-schema-v1.2.0.json" - # Notice that the new interface cse:capvcd:1.0.0 is used - interface_ids = [data.vcd_rde_interface.kubernetes_interface.id, vcd_rde_interface.cse_interface.id] + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json" # Behaviors need to be created before any RDE Type depends_on = [vcd_rde_interface_behavior.capvcd_behavior] } @@ -404,22 +364,15 @@ resource "vcd_rde_type" "capvcdcluster_type_v120" { ### Upgrade the VCDKEConfig RDE (CSE Server configuration) With the new [RDE Types][rde_type] in place, you need to perform an upgrade of the existing `VCDKEConfig` [RDE][rde], which -stores the CSE Server configuration. By using the v3.11.0 of the VCD Terraform Provider, you can do this update without forcing +stores the CSE Server configuration. By using the v3.12.0 of the VCD Terraform Provider, you can do this update without forcing a replacement: ```hcl resource "vcd_rde" "vcdkeconfig_instance" { # Same values as before, except: - rde_type_id = vcd_rde_type.vcdkeconfig_type_v110.id # Update to the new RDE Type input_entity = templatefile(var.vcdkeconfig_template_filepath, { # Same values as before, except: - node_startup_timeout = var.node_startup_timeout - node_not_ready_timeout = var.node_not_ready_timeout - node_unknown_timeout = var.node_unknown_timeout - max_unhealthy_node_percentage = var.max_unhealthy_node_percentage - container_registry_url = var.container_registry_url - k8s_cluster_certificates = join(",", var.k8s_cluster_certificates) - bootstrap_vm_certificates = join(",", var.bootstrap_vm_certificates) + rde_projector_version = "0.7.0" }) } ``` @@ -427,24 +380,19 @@ resource "vcd_rde" "vcdkeconfig_instance" { You can find the meaning of these values in the section ["RDE (CSE Server configuration / VCDKEConfig)"](#rde-cse-server-configuration--vcdkeconfig). Please notice that you need to upgrade the CAPVCD, CPI and CSI versions. The new values are stated in the same section. -### Update Rights and Roles - -There are differences between the rights needed in v4.0 and v4.1. You can check the resources `vcd_rights_bundle.k8s_clusters_rights_bundle` and -`vcd_global_role.k8s_cluster_author` in the [step 1 configuration][step1] to see the new required set of rights. - -### Upload the new CSE v4.1 OVA +### Upload the new CSE 4.2 OVA -You need to upload the new CSE v4.1 OVA to the `cse_catalog` that already hosts the CSE v4.0 one. +You need to upload the new CSE 4.2 OVA to the `cse_catalog` that already hosts the CSE 4.1 one. To download the required OVAs, please refer to the [CSE documentation][cse_docs]. ```hcl -resource "vcd_catalog_vapp_template" "cse_ova_v4_1" { - org = vcd_org.solutions_organization.name # References the Solutions Organization that already exists from v4.0 - catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog that already exists from v4.0 +resource "vcd_catalog_vapp_template" "cse_ova_4_2" { + org = vcd_org.solutions_organization.name # References the Solutions Organization that already exists from 4.1 + catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog that already exists from 4.1 - name = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - description = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - ova_path = "VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova" + name = "VMware_Cloud_Director_Container_Service_Extension-4.2.0" + description = "VMware_Cloud_Director_Container_Service_Extension-4.2.0" + ova_path = "VMware_Cloud_Director_Container_Service_Extension-4.2.0.ova" } ``` @@ -455,11 +403,11 @@ To update the CSE Server, just change the referenced OVA: ```hcl resource "vcd_vapp_vm" "cse_server_vm" { # All values remain the same, except: - vapp_template_id = vcd_catalog_vapp_template.cse_ova_v4_1.id # Reference the v4.1 OVA + vapp_template_id = vcd_catalog_vapp_template.cse_ova_4_2.id # Reference the 4.2 OVA } ``` -This will re-deploy the VM with the new CSE v4.1 Server. +This will re-deploy the VM with the new CSE 4.2 Server. ## Update CSE Server Configuration @@ -501,14 +449,14 @@ In the [step 2 configuration][step2], you can find the `cse_ova` [vApp Template] Then you can create a new `vcd_catalog_vapp_template` and modify `cse_server_vm` to reference it: ```hcl -# Uploads a new CSE Server OVA. In the example below, we upload version 4.1.0 +# Uploads a new CSE Server OVA. In the example below, we upload version 4.2.1 resource "vcd_catalog_vapp_template" "new_cse_ova" { org = vcd_org.solutions_organization.name # References the Solutions Organization catalog_id = vcd_catalog.cse_catalog.id # References the CSE Catalog - name = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - description = "VMware_Cloud_Director_Container_Service_Extension-4.1.0" - ova_path = "/home/bob/cse/VMware_Cloud_Director_Container_Service_Extension-4.1.0.ova" + name = "VMware_Cloud_Director_Container_Service_Extension-4.2.1" + description = "VMware_Cloud_Director_Container_Service_Extension-4.2.1" + ova_path = "/home/bob/cse/VMware_Cloud_Director_Container_Service_Extension-4.2.1.ova" } # ... @@ -556,8 +504,8 @@ Once all clusters are removed in the background by CSE Server, you may destroy t [role]: /providers/vmware/vcd/latest/docs/resources/role [routed_network]: /providers/vmware/vcd/latest/docs/resources/network_routed_v2 [sizing]: /providers/vmware/vcd/latest/docs/resources/vm_sizing_policy -[step1]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/install/step1 -[step2]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.1/install/step2 +[step1]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/install/step1 +[step2]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/install/step2 [tkgm_docs]: https://docs.vmware.com/en/VMware-Tanzu-Kubernetes-Grid/index.html [user]: /providers/vmware/vcd/latest/docs/resources/org_user [ui_plugin]: /providers/vmware/vcd/latest/docs/resources/ui_plugin From d754f0799c38b30c6b7f77c186f0412670b581b1 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 17:23:22 +0100 Subject: [PATCH 071/156] Fix data source Signed-off-by: abarreiro --- vcd/datasource_vcd_cse_kubernetes_cluster.go | 65 ++++++++++----- vcd/resource_vcd_cse_kubernetes_cluster.go | 82 +++++++++++-------- .../d/cse_kubernetes_cluster.html.markdown | 31 ++++++- 3 files changed, 123 insertions(+), 55 deletions(-) diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go index 6c97b8ddf..3d02ebd22 100644 --- a/vcd/datasource_vcd_cse_kubernetes_cluster.go +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -3,44 +3,51 @@ package vcd import ( "context" _ "embed" + semver "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/vmware/go-vcloud-director/v2/govcd" ) func datasourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ ReadContext: datasourceVcdCseKubernetesRead, Schema: map[string]*schema.Schema{ + "org": { + Type: schema.TypeString, + Optional: true, + Description: "The name of organization that owns the Kubernetes cluster, optional if defined at provider " + + "level. Useful when connected as sysadmin working across different organizations", + }, "cluster_id": { - Type: schema.TypeString, - Required: true, - Description: "The ID of the Kubernetes cluster to read", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_id", "name"}, + Description: "The unique ID of the Kubernetes cluster to read that must be present in the Organization", + }, + "name": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{"cluster_id", "name"}, + RequiredWith: []string{"cse_version"}, + Description: "The name of the Kubernetes cluster to read. If there is more than one Kubernetes cluster with the same name, searching by name will fail", }, "cse_version": { - Type: schema.TypeString, - Computed: true, - Description: "The CSE version used by the cluster", + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"name"}, + Description: "The CSE version used by the cluster", }, "runtime": { Type: schema.TypeString, Computed: true, Description: "The Kubernetes runtime used by the cluster", }, - "name": { - Type: schema.TypeString, - Computed: true, - Description: "The name of the Kubernetes cluster", - }, "ova_id": { Type: schema.TypeString, Computed: true, Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", }, - "org_id": { - Type: schema.TypeString, - Computed: true, - Description: "The name of organization that owns this Kubernetes cluster", - }, "vdc_id": { Type: schema.TypeString, Computed: true, @@ -251,15 +258,31 @@ func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, m vcdClient := meta.(*VCDClient) org, err := vcdClient.GetOrgFromResource(d) if err != nil { - return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Get("cluster_id"), err) + return diag.Errorf("could not get the target Organization: %s", err) } - cluster, err := org.CseGetKubernetesClusterById(d.Get("cluster_id").(string)) - if err != nil { - return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Get("cluster_id"), err) + var cluster *govcd.CseKubernetesCluster + if id, ok := d.GetOk("cluster_id"); ok { + cluster, err = org.CseGetKubernetesClusterById(id.(string)) + if err != nil { + return diag.FromErr(err) + } + } else if name, ok := d.GetOk("name"); ok { + cseVersion, err := semver.NewVersion(d.Get("cse_version").(string)) + if err != nil { + return diag.Errorf("could not parse cse_version='%s': %s", cseVersion, err) + } + clusters, err := org.CseGetKubernetesClustersByName(*cseVersion, name.(string)) + if err != nil { + return diag.FromErr(err) + } + if len(clusters) != 1 { + return diag.Errorf("expected one Kubernetes cluster with name '%s', got %d. Try to use 'cluster_id' instead of 'name'", name, len(clusters)) + } + cluster = clusters[0] } - warns, err := saveClusterDataToState(d, cluster, "") + warns, err := saveClusterDataToState(d, cluster, org.Org.Name) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index a41145bfc..2497cd872 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -28,7 +28,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Optional: true, // Required, but validated at runtime ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0", "4.2.1"}, false), + ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0"}, false), Description: "The CSE version to use", DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { // This custom diff function allows to correctly compare versions @@ -163,7 +163,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, }, - "node_pool": { + "worker_pool": { Type: schema.TypeList, Optional: true, // Required, but validated at runtime Description: "Defines a node pool for the cluster", @@ -173,7 +173,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Optional: true, // Required, but validated at runtime ForceNew: true, - Description: "The name of this node pool", + Description: "The name of this worker pool", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, @@ -181,7 +181,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 1, // As suggested in UI - Description: "The number of nodes that this node pool has. Must be higher than 0", + Description: "The number of nodes that this worker pool has. Must be higher than 0", ValidateDiagFunc: minimumValue(0, "number of nodes must be higher than or equal to 0"), }, "disk_size_gi": { @@ -346,17 +346,17 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { // are marked as Optional in the schema to facilitate the Import operation, but some of them are actually mandatory. func validateCseKubernetesClusterSchema(d *schema.ResourceData) diag.Diagnostics { var diags diag.Diagnostics - for _, arg := range []string{"cse_version", "name", "ova_id", "vdc_id", "network_id", "api_token_file", "control_plane", "node_pool"} { + for _, arg := range []string{"cse_version", "name", "ova_id", "vdc_id", "network_id", "api_token_file", "control_plane", "worker_pool"} { if _, ok := d.GetOk(arg); !ok { diags = append(diags, diag.Errorf("the argument '%s' is required, but no definition was found", arg)...) } } - nodePoolsRaw := d.Get("node_pool").([]interface{}) - for _, nodePoolRaw := range nodePoolsRaw { - nodePool := nodePoolRaw.(map[string]interface{}) + workerPools := d.Get("worker_pool").([]interface{}) + for _, w := range workerPools { + workerPool := w.(map[string]interface{}) for _, arg := range []string{"name"} { - if _, ok := nodePool[arg]; !ok { - diags = append(diags, diag.Errorf("the argument 'node_pool.%s' is required, but no definition was found", arg)...) + if _, ok := workerPool[arg]; !ok { + diags = append(diags, diag.Errorf("the argument 'worker_pool.%s' is required, but no definition was found", arg)...) } } } @@ -391,13 +391,30 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) } + apiTokenFile := d.Get("api_token_file").(string) + apiToken, err := govcd.GetTokenFromFile(apiTokenFile) + if err != nil { + return diag.Errorf("could not read the API token from the file '%s': %s", apiTokenFile, err) + } + owner := d.Get("owner").(string) + if owner == "" { + session, err := vcdClient.Client.GetSessionInfo() + if err != nil { + return diag.Errorf("could not get an Owner for the Kubernetes cluster. 'owner' is not set and cannot get one from the Provider configuration: %s", err) + } + owner = session.User.Name + if owner == "" { + return diag.Errorf("could not get an Owner for the Kubernetes cluster. 'owner' is not set and cannot get one from the Provider configuration") + } + } + creationData := govcd.CseClusterSettings{ + CseVersion: *cseVersion, Name: d.Get("name").(string), OrganizationId: org.Org.ID, VdcId: d.Get("vdc_id").(string), NetworkId: d.Get("network_id").(string), KubernetesTemplateOvaId: d.Get("ova_id").(string), - CseVersion: *cseVersion, ControlPlane: govcd.CseControlPlaneSettings{ MachineCount: d.Get("control_plane.0.machine_count").(int), DiskSizeGi: d.Get("control_plane.0.disk_size_gi").(int), @@ -406,12 +423,20 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour StorageProfileId: d.Get("control_plane.0.storage_profile_id").(string), Ip: d.Get("control_plane.0.ip").(string), }, + Owner: owner, + ApiToken: apiToken.RefreshToken, + NodeHealthCheck: d.Get("node_health_check").(bool), + PodCidr: d.Get("pods_cidr").(string), + ServiceCidr: d.Get("services_cidr").(string), + SshPublicKey: d.Get("ssh_public_key").(string), + VirtualIpSubnet: d.Get("virtual_ip_subnet").(string), + AutoRepairOnErrors: d.Get("auto_repair_on_errors").(bool), } - workerPoolsAttr := d.Get("worker_pool").(*schema.Set).List() + workerPoolsAttr := d.Get("worker_pool").([]interface{}) workerPools := make([]govcd.CseWorkerPoolSettings, len(workerPoolsAttr)) - for i, workerPoolRaw := range workerPoolsAttr { - workerPool := workerPoolRaw.(map[string]interface{}) + for i, w := range workerPoolsAttr { + workerPool := w.(map[string]interface{}) workerPools[i] = govcd.CseWorkerPoolSettings{ Name: workerPool["name"].(string), MachineCount: workerPool["machine_count"].(int), @@ -510,14 +535,14 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, payload := govcd.CseClusterUpdateInput{} if d.HasChange("worker_pool") { workerPools := map[string]govcd.CseWorkerPoolUpdateInput{} - for _, workerPoolAttr := range d.Get("worker_pool").(*schema.Set).List() { + for _, workerPoolAttr := range d.Get("worker_pool").([]interface{}) { w := workerPoolAttr.(map[string]interface{}) workerPools[w["name"].(string)] = govcd.CseWorkerPoolUpdateInput{MachineCount: w["machine_count"].(int)} } payload.WorkerPools = &workerPools } - err = cluster.Update(payload, false) + err = cluster.Update(payload, true) if err != nil { if cluster != nil { if cluster.State != "provisioned" { @@ -605,22 +630,15 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) dSet(d, "node_health_check", cluster.NodeHealthCheck) - if orgName == "" { - // Data source - dSet(d, "org_id", cluster.OrganizationId) - } else { - // Resource - if _, ok := d.GetOk("org"); ok { - // This field is optional, as it can take the value from the VCD client - dSet(d, "org", orgName) - } - if _, ok := d.GetOk("api_token_file"); !ok { - // During imports, this field is impossible to get, so we set an artificial value, as this argument - // is required at runtime - dSet(d, "api_token_file", "******") - } + if _, ok := d.GetOk("org"); ok { + // This field is optional, as it can take the value from the VCD client + dSet(d, "org", orgName) + } + if _, ok := d.GetOk("api_token_file"); !ok { + // During imports, this field is impossible to get, so we set an artificial value, as this argument + // is required at runtime + dSet(d, "api_token_file", "******") } - if _, ok := d.GetOk("owner"); ok { // This field is optional, as it can take the value from the VCD client dSet(d, "owner", cluster.Owner) @@ -637,7 +655,7 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes "machine_count": nodePool.MachineCount, } } - err = d.Set("node_pool", nodePoolBlocks) + err = d.Set("worker_pool", nodePoolBlocks) if err != nil { return nil, err } diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index ebea884ed..0402f0332 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -18,19 +18,46 @@ Supports the following **Container Service Extension** versions: -> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) -## Example Usage +## Example Usage with ID + +The cluster ID identifies unequivocally the cluster within VCD, and can be obtained with the CSE UI Plugin. + +This option requires only the target organization where the Kubernetes cluster is located and its ID: ```hcl data "vcd_cse_kubernetes_cluster" "my_cluster" { + org = "tenant_org" cluster_id = "urn:vcloud:entity:vmware:capvcdCluster:e8e82bcc-50a1-484f-9dd0-20965ab3e865" } ``` +## Example Usage with Name + +Sometimes using the cluster ID is not convenient, so this data source allows to use the cluster name. +As VCD allows to have multiple clusters with the same name, this option must be used with caution as it will fail +if there is more than one Kubernetes cluster with the same name in the same Organization: + +```hcl +locals { + my_clusters = [ "beta1", "test2", "foo45"] +} + +data "vcd_cse_kubernetes_cluster" "my_cluster" { + for_each = local.my_clusters + org = "tenant_org" + cse_version = "4.2.0" + name = each.key +} +``` + ## Argument Reference The following arguments are supported: -* `cluster_id` - (Required) Unequivocally identifies a cluster in VCD +* `org` - (Optional) The name of the Organization to which the Kubernetes cluster belongs. Optional if defined at provider level. +* `cluster_id` - (Optional) Unequivocally identifies a cluster in VCD. It must belong to the given `org`. Either `cluster_id` or `name` must be set. +* `name` - (Optional) Allows to find a Kubernetes cluster by name inside the given `org`. Either `cluster_id` or `name` must be set. This argument requires `cse_version` to be set. +* `cse_version` - (Optional) Specifies the CSE Version of the cluster to find when `name` is used instead of `cluster_id`. ## Attribute Reference From e9536c991093c067f2fbddff3fec5e831f27f314 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 12 Feb 2024 17:37:08 +0100 Subject: [PATCH 072/156] Improvements Signed-off-by: abarreiro --- vcd/datasource_vcd_cse_kubernetes_cluster.go | 40 +++++++++++-------- vcd/resource_vcd_cse_kubernetes_cluster.go | 39 ++++-------------- .../d/cse_kubernetes_cluster.html.markdown | 10 ++--- 3 files changed, 34 insertions(+), 55 deletions(-) diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go index 3d02ebd22..f18047940 100644 --- a/vcd/datasource_vcd_cse_kubernetes_cluster.go +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -13,30 +13,30 @@ func datasourceVcdCseKubernetesCluster() *schema.Resource { return &schema.Resource{ ReadContext: datasourceVcdCseKubernetesRead, Schema: map[string]*schema.Schema{ - "org": { - Type: schema.TypeString, - Optional: true, - Description: "The name of organization that owns the Kubernetes cluster, optional if defined at provider " + - "level. Useful when connected as sysadmin working across different organizations", - }, "cluster_id": { Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"cluster_id", "name"}, - Description: "The unique ID of the Kubernetes cluster to read that must be present in the Organization", + Description: "The unique ID of the Kubernetes cluster to read", }, "name": { Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"cluster_id", "name"}, - RequiredWith: []string{"cse_version"}, + RequiredWith: []string{"cse_version", "org"}, Description: "The name of the Kubernetes cluster to read. If there is more than one Kubernetes cluster with the same name, searching by name will fail", }, + "org_id": { + Type: schema.TypeString, + Optional: true, + RequiredWith: []string{"cse_version", "name"}, + Description: "The ID of organization that owns the Kubernetes cluster, only required if 'name' is set", + }, "cse_version": { Type: schema.TypeString, Optional: true, - RequiredWith: []string{"name"}, - Description: "The CSE version used by the cluster", + RequiredWith: []string{"name", "org"}, + Description: "The CSE version used by the cluster, only required if 'name' is set", }, "runtime": { Type: schema.TypeString, @@ -256,14 +256,10 @@ func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, m var diags diag.Diagnostics vcdClient := meta.(*VCDClient) - org, err := vcdClient.GetOrgFromResource(d) - if err != nil { - return diag.Errorf("could not get the target Organization: %s", err) - } - var cluster *govcd.CseKubernetesCluster + var err error if id, ok := d.GetOk("cluster_id"); ok { - cluster, err = org.CseGetKubernetesClusterById(id.(string)) + cluster, err = vcdClient.CseGetKubernetesClusterById(id.(string)) if err != nil { return diag.FromErr(err) } @@ -272,6 +268,13 @@ func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, m if err != nil { return diag.Errorf("could not parse cse_version='%s': %s", cseVersion, err) } + + orgId := d.Get("org_id").(string) + org, err := vcdClient.GetOrgById(orgId) + if err != nil { + return diag.Errorf("could not find an Organization with ID '%s': %s", orgId, err) + } + clusters, err := org.CseGetKubernetesClustersByName(*cseVersion, name.(string)) if err != nil { return diag.FromErr(err) @@ -282,7 +285,10 @@ func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, m cluster = clusters[0] } - warns, err := saveClusterDataToState(d, cluster, org.Org.Name) + dSet(d, "org_id", cluster.OrganizationId) + dSet(d, "cluster_id", cluster.ID) + + warns, err := saveClusterDataToState(d, cluster) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 2497cd872..746b77944 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -480,19 +480,14 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met var diags diag.Diagnostics vcdClient := meta.(*VCDClient) - org, err := vcdClient.GetOrgFromResource(d) - if err != nil { - return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) - } - // The ID must be already set for the read to be successful. We can't rely on the name as there can be // many clusters with the same name in the same org. - cluster, err := org.CseGetKubernetesClusterById(d.Id()) + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) if err != nil { return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, cluster, org.Org.Name) + warns, err := saveClusterDataToState(d, cluster) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } @@ -523,12 +518,7 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, } vcdClient := meta.(*VCDClient) - org, err := vcdClient.GetOrgFromResource(d) - if err != nil { - return diag.Errorf("could not create a Kubernetes cluster in the target Organization: %s", err) - } - - cluster, err := org.CseGetKubernetesClusterById(d.Id()) + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) if err != nil { return diag.Errorf("could not get Kubernetes cluster with ID '%s': %s", d.Id(), err) } @@ -561,13 +551,7 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, // to be gone. func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { vcdClient := meta.(*VCDClient) - - org, err := vcdClient.GetOrgFromResource(d) - if err != nil { - return diag.Errorf("could not get Organization: %s", err) - } - - cluster, err := org.CseGetKubernetesClusterById(d.Id()) + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) if err != nil { return diag.FromErr(err) } @@ -585,17 +569,12 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m } vcdClient := meta.(*VCDClient) - org, err := vcdClient.GetOrgByName(resourceURI[0]) - if err != nil { - return nil, fmt.Errorf("could not get Organization with name '%s': %s", resourceURI[0], err) - } - - cluster, err := org.CseGetKubernetesClusterById(resourceURI[1]) + cluster, err := vcdClient.CseGetKubernetesClusterById(resourceURI[1]) if err != nil { return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", resourceURI[1], err) } - warns, err := saveClusterDataToState(d, cluster, org.Org.Name) + warns, err := saveClusterDataToState(d, cluster) if err != nil { return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", cluster.ID, err) } @@ -609,7 +588,7 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m // saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. // Returns a slice of warnings first and an error second. -func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetesCluster, orgName string) ([]error, error) { +func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetesCluster) ([]error, error) { var warnings []error dSet(d, "name", cluster.Name) @@ -630,10 +609,6 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) dSet(d, "node_health_check", cluster.NodeHealthCheck) - if _, ok := d.GetOk("org"); ok { - // This field is optional, as it can take the value from the VCD client - dSet(d, "org", orgName) - } if _, ok := d.GetOk("api_token_file"); !ok { // During imports, this field is impossible to get, so we set an artificial value, as this argument // is required at runtime diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index 0402f0332..cbf4f0c9e 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -20,13 +20,11 @@ Supports the following **Container Service Extension** versions: ## Example Usage with ID -The cluster ID identifies unequivocally the cluster within VCD, and can be obtained with the CSE UI Plugin. - -This option requires only the target organization where the Kubernetes cluster is located and its ID: +The cluster ID identifies unequivocally the cluster within VCD, and can be obtained with the CSE Kubernetes Clusters UI Plugin, by selecting +the desired cluster and obtaining the ID from the displayed information. ```hcl data "vcd_cse_kubernetes_cluster" "my_cluster" { - org = "tenant_org" cluster_id = "urn:vcloud:entity:vmware:capvcdCluster:e8e82bcc-50a1-484f-9dd0-20965ab3e865" } ``` @@ -54,8 +52,8 @@ data "vcd_cse_kubernetes_cluster" "my_cluster" { The following arguments are supported: -* `org` - (Optional) The name of the Organization to which the Kubernetes cluster belongs. Optional if defined at provider level. -* `cluster_id` - (Optional) Unequivocally identifies a cluster in VCD. It must belong to the given `org`. Either `cluster_id` or `name` must be set. +* `cluster_id` - (Optional) Unequivocally identifies a cluster in VCD. Either `cluster_id` or `name` must be set. +* `org` - (Optional) The name of the Organization to which the Kubernetes cluster belongs. Optional if defined at provider level. Only used if `cluster_id` is not set. * `name` - (Optional) Allows to find a Kubernetes cluster by name inside the given `org`. Either `cluster_id` or `name` must be set. This argument requires `cse_version` to be set. * `cse_version` - (Optional) Specifies the CSE Version of the cluster to find when `name` is used instead of `cluster_id`. From f981dc3ec1cb29681f9f872b363543dda6d46473 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 13:06:30 +0100 Subject: [PATCH 073/156] Add import clarifications Signed-off-by: abarreiro --- vcd/datasource_vcd_cse_kubernetes_cluster.go | 4 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 120 ++++++------------ .../r/cse_kubernetes_cluster.html.markdown | 93 +++++++++++++- 3 files changed, 130 insertions(+), 87 deletions(-) diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go index f18047940..b714d90a2 100644 --- a/vcd/datasource_vcd_cse_kubernetes_cluster.go +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -23,7 +23,7 @@ func datasourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Optional: true, ExactlyOneOf: []string{"cluster_id", "name"}, - RequiredWith: []string{"cse_version", "org"}, + RequiredWith: []string{"cse_version", "org_id"}, Description: "The name of the Kubernetes cluster to read. If there is more than one Kubernetes cluster with the same name, searching by name will fail", }, "org_id": { @@ -35,7 +35,7 @@ func datasourceVcdCseKubernetesCluster() *schema.Resource { "cse_version": { Type: schema.TypeString, Optional: true, - RequiredWith: []string{"name", "org"}, + RequiredWith: []string{"name", "org_id"}, Description: "The CSE version used by the cluster, only required if 'name' is set", }, "runtime": { diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 746b77944..ebf9d58b8 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" - "strings" "time" ) @@ -26,7 +25,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "cse_version": { Type: schema.TypeString, - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0"}, false), Description: "The CSE version to use", @@ -54,7 +53,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "name": { Type: schema.TypeString, - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Description: "The name of the Kubernetes cluster", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ @@ -62,7 +61,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "ova_id": { Type: schema.TypeString, - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", }, @@ -75,13 +74,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "vdc_id": { Type: schema.TypeString, - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Description: "The ID of the VDC that hosts the Kubernetes cluster", }, "network_id": { Type: schema.TypeString, - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Description: "The ID of the network that the Kubernetes cluster will use", }, @@ -93,7 +92,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "api_token_file": { Type: schema.TypeString, - Optional: true, // Required, but validated at runtime + Optional: true, + Computed: true, ForceNew: true, Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", }, @@ -106,7 +106,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "control_plane": { Type: schema.TypeList, MaxItems: 1, - Optional: true, // Required, but validated at runtime + Required: true, Description: "Defines the control plane for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -165,13 +165,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "worker_pool": { Type: schema.TypeList, - Optional: true, // Required, but validated at runtime + Required: true, Description: "Defines a node pool for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Description: "The name of this worker pool", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ @@ -227,13 +227,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "storage_profile_id": { - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Type: schema.TypeString, Description: "ID of the storage profile to use for the storage class", }, "name": { - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Type: schema.TypeString, Description: "Name to give to this storage class", @@ -241,14 +241,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "reclaim_policy": { - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", }, "filesystem": { - Optional: true, // Required, but validated at runtime + Required: true, ForceNew: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"ext4", "xfs"}, false), @@ -277,7 +277,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "auto_repair_on_errors": { Type: schema.TypeBool, Optional: true, - Default: false, + Computed: true, // CSE Server turns this off when the cluster is created Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", }, "node_health_check": { @@ -342,44 +342,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { } } -// validateCseKubernetesClusterSchema validates all the required arguments at runtime. All the required arguments -// are marked as Optional in the schema to facilitate the Import operation, but some of them are actually mandatory. -func validateCseKubernetesClusterSchema(d *schema.ResourceData) diag.Diagnostics { - var diags diag.Diagnostics - for _, arg := range []string{"cse_version", "name", "ova_id", "vdc_id", "network_id", "api_token_file", "control_plane", "worker_pool"} { - if _, ok := d.GetOk(arg); !ok { - diags = append(diags, diag.Errorf("the argument '%s' is required, but no definition was found", arg)...) - } - } - workerPools := d.Get("worker_pool").([]interface{}) - for _, w := range workerPools { - workerPool := w.(map[string]interface{}) - for _, arg := range []string{"name"} { - if _, ok := workerPool[arg]; !ok { - diags = append(diags, diag.Errorf("the argument 'worker_pool.%s' is required, but no definition was found", arg)...) - } - } - } - if _, ok := d.GetOk("default_storage_class"); ok { - for _, arg := range []string{"storage_profile_id", "name", "reclaim_policy", "filesystem"} { - if _, ok := d.GetOk("default_storage_class.0." + arg); !ok { - diags = append(diags, diag.Errorf("the argument 'default_storage_class.%s' is required, but no definition was found", arg)...) - } - } - } - - if len(diags) > 0 { - return diags - } - return nil -} - func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - diags := validateCseKubernetesClusterSchema(d) - if diags != nil { - return diags - } - cseVersion, err := semver.NewSemver(d.Get("cse_version").(string)) if err != nil { return diag.Errorf("the introduced 'cse_version=%s' is not valid: %s", d.Get("cse_version"), err) @@ -507,11 +470,6 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met // resourceVcdCseKubernetesUpdate updates the Kubernetes clusters. Note that re-creating the CAPI YAML and sending it // back will break everything, so we must patch the YAML piece by piece. func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - diags := validateCseKubernetesClusterSchema(d) - if diags != nil { - return diags - } - // Some arguments don't require changes in the backend if !d.HasChangesExcept("operations_timeout_minutes") { return nil @@ -563,15 +521,10 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m } func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - resourceURI := strings.Split(d.Id(), ImportSeparator) - if len(resourceURI) != 2 { - return nil, fmt.Errorf("resource name must be specified as organization_name.cluster_id, but it was '%s'", d.Id()) - } - vcdClient := meta.(*VCDClient) - cluster, err := vcdClient.CseGetKubernetesClusterById(resourceURI[1]) + cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) if err != nil { - return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", resourceURI[1], err) + return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", d.Id(), err) } warns, err := saveClusterDataToState(d, cluster) @@ -592,15 +545,15 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes var warnings []error dSet(d, "name", cluster.Name) - dSet(d, "cse_version", cluster.CseVersion) + dSet(d, "cse_version", cluster.CseVersion.String()) dSet(d, "runtime", "tkg") // Only one supported dSet(d, "vdc_id", cluster.VdcId) dSet(d, "network_id", cluster.NetworkId) - dSet(d, "cpi_version", cluster.CpiVersion) - dSet(d, "csi_version", cluster.CsiVersion) - dSet(d, "capvcd_version", cluster.CapvcdVersion) - dSet(d, "kubernetes_version", cluster.KubernetesVersion) - dSet(d, "tkg_product_version", cluster.TkgVersion) + dSet(d, "cpi_version", cluster.CpiVersion.String()) + dSet(d, "csi_version", cluster.CsiVersion.String()) + dSet(d, "capvcd_version", cluster.CapvcdVersion.String()) + dSet(d, "kubernetes_version", cluster.KubernetesVersion.String()) + dSet(d, "tkg_product_version", cluster.TkgVersion.String()) dSet(d, "pods_cidr", cluster.PodCidr) dSet(d, "services_cidr", cluster.ServiceCidr) dSet(d, "ova_id", cluster.KubernetesTemplateOvaId) @@ -624,32 +577,43 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes return nil, err } - nodePoolBlocks := make([]map[string]interface{}, len(cluster.WorkerPools)) - for i, nodePool := range cluster.WorkerPools { - nodePoolBlocks[i] = map[string]interface{}{ - "machine_count": nodePool.MachineCount, + workerPoolBlocks := make([]map[string]interface{}, len(cluster.WorkerPools)) + for i, workerPool := range cluster.WorkerPools { + workerPoolBlocks[i] = map[string]interface{}{ + "machine_count": workerPool.MachineCount, + "name": workerPool.Name, + "vgpu_policy_id": workerPool.VGpuPolicyId, + "sizing_policy_id": workerPool.SizingPolicyId, + "placement_policy_id": workerPool.PlacementPolicyId, + "storage_profile_id": workerPool.StorageProfileId, + "disk_size_gi": workerPool.DiskSizeGi, } } - err = d.Set("worker_pool", nodePoolBlocks) + err = d.Set("worker_pool", workerPoolBlocks) if err != nil { return nil, err } err = d.Set("control_plane", []map[string]interface{}{ { - "machine_count": cluster.ControlPlane.MachineCount, + "machine_count": cluster.ControlPlane.MachineCount, + "ip": cluster.ControlPlane.Ip, + "sizing_policy_id": cluster.ControlPlane.SizingPolicyId, + "placement_policy_id": cluster.ControlPlane.PlacementPolicyId, + "storage_profile_id": cluster.ControlPlane.StorageProfileId, + "disk_size_gi": cluster.ControlPlane.DiskSizeGi, }, }) if err != nil { return nil, err } - err = d.Set("default_storage_class", map[string]interface{}{ + err = d.Set("default_storage_class", []map[string]interface{}{{ "storage_profile_id": cluster.DefaultStorageClass.StorageProfileId, "name": cluster.DefaultStorageClass.Name, "reclaim_policy": cluster.DefaultStorageClass.ReclaimPolicy, "filesystem": cluster.DefaultStorageClass.Filesystem, - }) + }}) if err != nil { return nil, err } diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index a44a1c768..aec64b33e 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -262,23 +262,102 @@ The Kubeconfig can now be used with `kubectl` and the Kubernetes cluster can be ## Importing -!!!!!!!!!!! TODO: NOT IMPLEMENTED. HOW TO DEAL WITH REQUIRED IDS? - ~> The current implementation of Terraform import can only import resources into the state. It does not generate configuration. [More information.](https://www.terraform.io/docs/import/) -An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the CSE Version installed in VCD and the Cluster (RDE) ID for it. -An example is below: +An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the **Cluster ID** for it. +The ID can be easily obtained in VCD UI, in the CSE Kubernetes Container Clusters plugin. + +An example is below. During import, none of the mentioned arguments are required, but they will in subsequent Terraform commands +such as `terraform plan`. Each comment in the code gives some context about how to obtain them to have a completely manageable cluster: ```hcl -# This is just a snippet of code that will host the imported cluster from VCD. -# This must not be created with Terraform beforehand +# This is just a snippet of code that will host the imported cluster that already exists in VCD. +# This must NOT be created with Terraform beforehand, it is just a shell that will receive the information resource "vcd_cse_kubernetes_cluster" "imported_cluster" { + name = "test2" # The name of the existing cluster + cse_version = "4.2.0" # The CSE version installed in your VCD + ova_id = data.vcd_catalog_vapp_template.tkg_ova.id # See below data sources + vdc_id = data.vcd_org_vdc.vdc.id # See below data sources + network_id = data.vcd_network_routed_v2.routed.id # See below data sources + node_health_check = true # Whether the existing cluster has Machine Health Check enabled or not, this can be checked in UI + + control_plane { + machine_count = 5 # This is optional, but not setting it to the current value will make subsequent plans to try to scale our existing cluster to the default one + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id # See below data sources + storage_profile_id = data.vcd_storage_profile.sp.id # See below data sources + } + + worker_pool { + name = "node-pool-1" # The name of the existing worker pool of the existing cluster. Retrievable from UI + machine_count = 40 # This is optional, but not setting it to the current value will make subsequent plans to try to scale our existing cluster to the default one + sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id # See below data sources + storage_profile_id = data.vcd_storage_profile.sp.id # See below data sources + } + + # While optional, we cannot change the Default Storage Class after an import, so we need + # to set the information of the existing cluster to avoid re-creation. + # The information can be retrieved from UI + default_storage_class { + filesystem = "ext4" + name = "sc-1" + reclaim_policy = "delete" + storage_profile_id = data.vcd_storage_profile.sp.id # See below data sources + } +} + +# The below data sources are needed to retrieve the required IDs. They are not needed +# during the Import phase, but they will be asked when operating it afterwards + +# The VDC and Organization where the existing cluster is located +data "vcd_org_vdc" "vdc" { + org = "tenant_org" + name = "tenant_vdc" +} + +# The OVA that the existing cluster is using. You can obtain the OVA by inspecting +# the existing cluster TKG/Kubernetes version. +data "vcd_catalog_vapp_template" "tkg_ova" { + org = data.vcd_catalog.tkg_catalog.org + catalog_id = data.vcd_catalog.tkg_catalog.id + name = "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc" +} + +# The network that the existing cluster is using +data "vcd_network_routed_v2" "routed" { + org = data.vcd_nsxt_edgegateway.egw.org + edge_gateway_id = data.vcd_nsxt_edgegateway.egw.id + name = "tenant_net_routed" +} + +# The VM Sizing Policy of the existing cluster nodes +data "vcd_vm_sizing_policy" "tkg_small" { + name = "TKG small" +} + +# The Storage Profile that the existing cluster uses +data "vcd_storage_profile" "sp" { + org = data.vcd_org_vdc.vdc.org + vdc = data.vcd_org_vdc.vdc.name + name = "*" } + +data "vcd_catalog" "tkg_catalog" { + org = "solutions_org" # The Organization that shares the TKGm OVAs with the tenants + name = "tkgm_catalog" # The Catalog name +} + +data "vcd_nsxt_edgegateway" "egw" { + org = data.vcd_org_vdc.vdc.org + owner_id = data.vcd_org_vdc.vdc.id + name = "tenant_edgegateway" +} + + ``` ```sh -terraform import vcd_cse_kubernetes_cluster.imported_cluster 4.2.urn:vcloud:entity:vmware:capvcdCluster:1d24af33-6e5a-4d47-a6ea-06d76f3ee5c9 +terraform import vcd_cse_kubernetes_cluster.imported_cluster urn:vcloud:entity:vmware:capvcdCluster:1d24af33-6e5a-4d47-a6ea-06d76f3ee5c9 ``` -> The ID is required as it is the only way to unequivocally identify a Kubernetes cluster inside VCD. To obtain the ID From 1706ed9dca61a9466a8a4aa1dd23dc15804da9ce Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 13:08:08 +0100 Subject: [PATCH 074/156] Add import clarifications Signed-off-by: abarreiro --- website/docs/r/cse_kubernetes_cluster.html.markdown | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index aec64b33e..8ae5a6483 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -268,12 +268,13 @@ It does not generate configuration. [More information.](https://www.terraform.io An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the **Cluster ID** for it. The ID can be easily obtained in VCD UI, in the CSE Kubernetes Container Clusters plugin. -An example is below. During import, none of the mentioned arguments are required, but they will in subsequent Terraform commands +An example is below. During import, none of the mentioned arguments are required, but they will be in subsequent Terraform commands such as `terraform plan`. Each comment in the code gives some context about how to obtain them to have a completely manageable cluster: ```hcl # This is just a snippet of code that will host the imported cluster that already exists in VCD. # This must NOT be created with Terraform beforehand, it is just a shell that will receive the information +# None of the arguments are required during the Import phase, but they will be asked when operating it afterwards resource "vcd_cse_kubernetes_cluster" "imported_cluster" { name = "test2" # The name of the existing cluster cse_version = "4.2.0" # The CSE version installed in your VCD @@ -352,8 +353,6 @@ data "vcd_nsxt_edgegateway" "egw" { owner_id = data.vcd_org_vdc.vdc.id name = "tenant_edgegateway" } - - ``` ```sh From 457021ba11df5fa5a10beb915037663a94da8c8d Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 14:28:41 +0100 Subject: [PATCH 075/156] Fixes Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 68 ++++++++----- .../r/cse_kubernetes_cluster.html.markdown | 95 ++++++++----------- 2 files changed, 84 insertions(+), 79 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index ebf9d58b8..a2a5fef95 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -59,7 +59,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, - "ova_id": { + "kubernetes_template_id": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -67,7 +67,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "org": { Type: schema.TypeString, - Optional: true, + Optional: true, // Gets the Provider org if not set ForceNew: true, Description: "The name of organization that will own this Kubernetes cluster, optional if defined at provider " + "level. Useful when connected as sysadmin working across different organizations", @@ -86,14 +86,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "owner": { Type: schema.TypeString, - Optional: true, + Optional: true, // Gets the Provider user if not set ForceNew: true, Description: "The user that creates the cluster and owns the API token specified in 'api_token'. It must have the 'Kubernetes Cluster Author' role. If not specified, it assumes it's the user from the provider configuration", }, "api_token_file": { Type: schema.TypeString, Optional: true, - Computed: true, + Computed: true, // It is never used again after creation. Rather, it gets always emptied ForceNew: true, Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", }, @@ -105,8 +105,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "control_plane": { Type: schema.TypeList, - MaxItems: 1, Required: true, + MaxItems: 1, Description: "Defines the control plane for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -155,7 +155,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "ip": { Type: schema.TypeString, Optional: true, - Computed: true, + Computed: true, // IP can be auto-assigned if left-empty ForceNew: true, Description: "IP for the control plane. It will be automatically assigned during cluster creation if left empty", ValidateFunc: checkEmptyOrSingleIP(), @@ -221,8 +221,8 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "default_storage_class": { Type: schema.TypeList, - MaxItems: 1, Optional: true, + MaxItems: 1, Description: "Defines the default storage class for the cluster", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -277,7 +277,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "auto_repair_on_errors": { Type: schema.TypeBool, Optional: true, - Computed: true, // CSE Server turns this off when the cluster is created + Computed: true, // CSE Server turns this off after the cluster is successfully provisioned Description: "If errors occur before the Kubernetes cluster becomes available, and this argument is 'true', CSE Server will automatically attempt to repair the cluster", }, "node_health_check": { @@ -338,6 +338,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Computed: true, Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", }, + "supported_upgrades": { + Type: schema.TypeSet, + Computed: true, + Description: "A set of vApp Template names that can be used to upgrade the cluster", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, }, } } @@ -377,7 +385,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour OrganizationId: org.Org.ID, VdcId: d.Get("vdc_id").(string), NetworkId: d.Get("network_id").(string), - KubernetesTemplateOvaId: d.Get("ova_id").(string), + KubernetesTemplateOvaId: d.Get("kubernetes_template_id").(string), ControlPlane: govcd.CseControlPlaneSettings{ MachineCount: d.Get("control_plane.0.machine_count").(int), DiskSizeGi: d.Get("control_plane.0.disk_size_gi").(int), @@ -422,20 +430,21 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } cluster, err := org.CseCreateKubernetesCluster(creationData, time.Duration(d.Get("operations_timeout_minutes").(int))*time.Minute) - if err != nil { - if cluster != nil { - if cluster.State != "provisioned" { - return diag.Errorf("Kubernetes cluster creation finished, but it is in '%s' state, not 'provisioned': '%s'", cluster.State, err) - } - } + if err != nil && cluster == nil { return diag.Errorf("Kubernetes cluster creation failed: %s", err) } - // We need to set the ID here to be able to distinguish this cluster from all the others that may have the same name and RDE Type. + + // If we get here, it means we got either a successful created cluster, a timeout or a cluster in "error" state. + // Either way, from this point we should go to the Update logic as the cluster is definitely present in VCD, so we store the ID. + // Also, we need to set the ID to be able to distinguish this cluster from all the others that may have the same name and RDE Type. // We could use some other ways of filtering, but ID is the only accurate. - // Also, the RDE is created at this point, so Terraform should trigger an update/delete next. // If the cluster can't be created due to errors, users should delete it and retry, like in UI. d.SetId(cluster.ID) + if cluster.State != "provisioned" { + return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state: '%s'", err) + } + return resourceVcdCseKubernetesRead(ctx, d, meta) } @@ -475,6 +484,8 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return nil } + // TODO: If creation ended in error???? Should we check cluster state and auto_recover?? + vcdClient := meta.(*VCDClient) cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) if err != nil { @@ -556,17 +567,13 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "tkg_product_version", cluster.TkgVersion.String()) dSet(d, "pods_cidr", cluster.PodCidr) dSet(d, "services_cidr", cluster.ServiceCidr) - dSet(d, "ova_id", cluster.KubernetesTemplateOvaId) + dSet(d, "kubernetes_template_id", cluster.KubernetesTemplateOvaId) dSet(d, "ssh_public_key", cluster.SshPublicKey) dSet(d, "virtual_ip_subnet", cluster.VirtualIpSubnet) dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) dSet(d, "node_health_check", cluster.NodeHealthCheck) + dSet(d, "api_token_file", "") - if _, ok := d.GetOk("api_token_file"); !ok { - // During imports, this field is impossible to get, so we set an artificial value, as this argument - // is required at runtime - dSet(d, "api_token_file", "******") - } if _, ok := d.GetOk("owner"); ok { // This field is optional, as it can take the value from the VCD client dSet(d, "owner", cluster.Owner) @@ -620,10 +627,23 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "state", cluster.State) + supportedUpgrades, err := cluster.GetSupportedUpgrades(true) + if err != nil { + return nil, fmt.Errorf("could not fetch the supported upgrades for the Kubernetes cluster with ID '%s': %s", cluster.ID, err) + } + supportedUpgradesNames := make([]string, len(supportedUpgrades)) + for i, upgrade := range supportedUpgrades { + supportedUpgradesNames[i] = upgrade.Name + } + err = d.Set("supported_upgrades", supportedUpgradesNames) + if err != nil { + return nil, err + } + if cluster.State == "provisioned" { kubeconfig, err := cluster.GetKubeconfig() if err != nil { - return nil, fmt.Errorf("error getting Kubeconfig for Kubernetes cluster with ID '%s': %s", cluster.ID, err) + return nil, fmt.Errorf("error getting Kubeconfig for the Kubernetes cluster with ID '%s': %s", cluster.ID, err) } dSet(d, "kubeconfig", kubeconfig) } else { diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 8ae5a6483..4d11052a2 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -26,7 +26,7 @@ data "vcd_catalog" "tkg_catalog" { name = "tkgm_catalog" } -# Fetch a valid Kubernetes template OVA +# Fetch a valid Kubernetes template OVA. If it's not valid, cluster creation will fail. data "vcd_catalog_vapp_template" "tkg_ova" { org = data.vcd_catalog.tkg_catalog.org catalog_id = data.vcd_catalog.tkg_catalog.id @@ -70,14 +70,14 @@ resource "vcd_api_token" "token" { } resource "vcd_cse_kubernetes_cluster" "my_cluster" { - cse_version = "4.2" - runtime = "tkg" - name = "my-cluster" - ova_id = data.vcd_catalog_vapp_template.tkg_ova.id - org = data.vcd_org_vdc.vdc.org - vdc_id = data.vcd_org_vdc.vdc.id - network_id = data.vcd_network_routed_v2.routed.id - api_token_file = vcd_api_token.token.file_name + cse_version = "4.2.0" + runtime = "tkg" + name = "test2" + kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id + org = data.vcd_org_vdc.vdc.org + vdc_id = data.vcd_org_vdc.vdc.id + network_id = data.vcd_network_routed_v2.routed.id + api_token_file = vcd_api_token.token.file_name control_plane { machine_count = 1 @@ -86,7 +86,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { storage_profile_id = data.vcd_storage_profile.sp.id } - node_pool { + worker_pool { name = "node-pool-1" machine_count = 1 disk_size_gi = 20 @@ -94,14 +94,6 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { storage_profile_id = data.vcd_storage_profile.sp.id } - node_pool { - name = "node-pool-2" - machine_count = 1 - disk_size_gi = 20 - sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id - storage_profile_id = data.vcd_storage_profile.sp.id - } - default_storage_class { name = "sc-1" storage_profile_id = data.vcd_storage_profile.sp.id @@ -109,11 +101,10 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { filesystem = "ext4" } - pods_cidr = "100.10.0.0/11" - services_cidr = "100.90.0.0/11" + auto_repair_on_errors = true + node_health_check = true - auto_repair_on_errors = false - node_health_check = false + operations_timeout_minutes = 0 } output "kubeconfig" { @@ -125,36 +116,37 @@ output "kubeconfig" { The following arguments are supported: -* `cse_version` - (Required) Specifies the CSE version to use. Only `4.2` is supported +* `cse_version` - (Required) Specifies the CSE version to use. Accepted versions: `4.1.0`, `4.1.1` and `4.2.0` * `runtime` - (Optional) Specifies the Kubernetes runtime to use. Defaults to `tkg` (Tanzu Kubernetes Grid) * `name` - (Required) The name of the Kubernetes cluster. It must contain only lowercase alphanumeric characters or "-", start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters -* `ova_id` - (Required) The ID of the vApp Template that corresponds to a Kubernetes template OVA +* `kubernetes_template_id` - (Required) The ID of the vApp Template that corresponds to a Kubernetes template OVA * `org` - (Optional) The name of organization that will host the Kubernetes cluster, optional if defined in the provider configuration * `vdc_id` - (Required) The ID of the VDC that hosts the Kubernetes cluster * `network_id` - (Required) The ID of the network that the Kubernetes cluster will use * `owner` - (Optional) The user that creates the cluster and owns the API token specified in `api_token`. It must have the `Kubernetes Cluster Author` role that was created during CSE installation. If not specified, it assumes it's the user from the provider configuration -* `api_token_file` - (Required) A file generated by [`vcd_api_token` resource](/providers/vmware/vcd/latest/docs/resources/api_token), - that stores the API token used to create and manage the cluster, owned by the user specified in `owner`. - Be careful about this file, as it contains sensitive information -* `ssh_public_key` - (Optional) The SSH public key used to login into the cluster nodes +* `api_token_file` - (Required) Must be a file generated by [`vcd_api_token` resource](/providers/vmware/vcd/latest/docs/resources/api_token), + or a file that follows the same formatting, that stores the API token used to create and manage the cluster, + owned by the user specified in `owner`. Be careful about this file, as it contains sensitive information +* `ssh_public_key` - (Optional) The SSH public key used to log in into the cluster nodes * `control_plane` - (Required) See [**Control Plane**](#control-plane) -* `node_pool` - (Required) See [**Node Pools**](#node-pools) +* `worker_pool` - (Required) See [**Worker Pools**](#worker-pools) * `default_storage_class` - (Optional) See [**Default Storage Class**](#default-storage-class) * `pods_cidr` - (Optional) A CIDR block for the pods to use. Defaults to `100.96.0.0/11` * `services_cidr` - (Optional) A CIDR block for the services to use. Defaults to `100.64.0.0/13` * `virtual_ip_subnet` - (Optional) A virtual IP subnet for the cluster * `auto_repair_on_errors` - (Optional) If errors occur before the Kubernetes cluster becomes available, and this argument is `true`, - CSE Server will automatically attempt to repair the cluster. Defaults to `false` + CSE Server will automatically attempt to repair the cluster. Defaults to `false`. + Since CSE 4.1.1, when the cluster is available/provisioned, this flag is set automatically to false. * `node_health_check` - (Optional) After the Kubernetes cluster becomes available, nodes that become unhealthy will be remediated according to unhealthy node conditions and remediation rules. Defaults to `false` * `operations_timeout_minutes` - (Optional) The time, in minutes, to wait for the cluster operations to be successfully completed. - For example, during cluster creation/update, it should be in `provisioned` state before the timeout is reached, otherwise the + For example, during cluster creation, it should be in `provisioned` state before the timeout is reached, otherwise the operation will return an error. For cluster deletion, this timeout specifies the time to wait until the cluster is completely deleted. - Setting this argument to `0` means to wait indefinitely (not recommended as it could hang Terraform if the cluster can't be created or deleted - due to a configuration error). Defaults to `60` + Setting this argument to `0` means to wait indefinitely (not recommended as it could hang Terraform if the cluster can't be created + due to a configuration error if `auto_repair_on_errors=true`). Defaults to `60` ### Control Plane @@ -170,20 +162,20 @@ This block asks for the following arguments: * `storage_profile_id` - (Optional) Storage profile for the control plane VMs * `ip` - (Optional) IP for the control plane. It will be automatically assigned during cluster creation if left empty -### Node Pools +### Worker Pools -The `node_pool` block is **required**, and every cluster should have **at least one** of them. +The `worker_pool` block is **required**, and every cluster should have **at least one** of them. Each block asks for the following arguments: -* `name` - (Required) The name of the node pool. It must contain only lowercase alphanumeric characters or "-", +* `name` - (Required) The name of the worker pool. It must contain only lowercase alphanumeric characters or "-", start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters -* `machine_count` - (Optional) The number of VMs that the node pool has. Must be higher than `0`. Defaults to `1` -* `disk_size_gi` - (Optional) Disk size, in **Gibibytes (Gi)**, for the node pool VMs. Must be at least `20`. Defaults to `20` +* `machine_count` - (Optional) The number of VMs that the worker pool has. Must be higher than `0`. Defaults to `1` +* `disk_size_gi` - (Optional) Disk size, in **Gibibytes (Gi)**, for the worker pool VMs. Must be at least `20`. Defaults to `20` * `sizing_policy_id` - (Optional) VM Sizing policy for the control plane VMs. Must be one of the ones made available during CSE installation -* `placement_policy_id` - (Optional) VM Placement policy for the node pool VMs. If this one is set, `vgpu_policy_id` must be empty -* `vgpu_policy_id` - (Optional) vGPU policy for the node pool VMs. If this one is set, `placement_policy_id` must be empty -* `storage_profile_id` - (Optional) Storage profile for the node pool VMs +* `placement_policy_id` - (Optional) VM Placement policy for the worker pool VMs. If this one is set, `vgpu_policy_id` must be empty +* `vgpu_policy_id` - (Optional) vGPU policy for the worker pool VMs. If this one is set, `placement_policy_id` must be empty +* `storage_profile_id` - (Optional) Storage profile for the worker pool VMs ### Default Storage Class @@ -212,30 +204,23 @@ The following attributes are available for consumption as read-only attributes a created and ready to use, or `error` when an error occurred. `provisioning` can only be obtained when a timeout happens during cluster creation. `error` can only be obtained either with a timeout or when `auto_repair_on_errors=false`. * `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` -* `persistent_volumes` - A set of persistent volumes that are present in the cluster, only available when a `default_storage_class` was provided during - cluster creation: - * `name` - The name of the persistent volume - * `status` - The status of the persistent volume - * `shared` - Whether the persistent volume is shared or not - * `attached_node_count` - How many nodes are consuming the persistent volume - * `iops` - I/O operations per second for the persistent volume - * `size` - Size of the persistent volume - * `storage_profile` - Storage profile name of the persistent volume - * `owner` - Owner of the persistent volume +* `supported_upgrades` - A set of vApp Template names that can be fetched with a + [`vcd_catalog_vapp_template` data source](/providers/vmware/vcd/latest/docs/data-sources/catalog_vapp_template) to upgrade the cluster. ## Updating Only the following arguments can be updated: -* `ova_id`: The cluster must allow upgrading to the new TKG version +* `kubernetes_template_id`: The cluster must allow upgrading to the new TKG version. You can check `supported_upgrades` attribute to know + the available OVAs. * `machine_count` of the `control_plane`: Supports scaling up and down -* `machine_count` of any `node_pool`: Supports scaling up and down. Use caution when resizing down to 0 nodes. +* `machine_count` of any `worker_pool`: Supports scaling up and down. Use caution when resizing down to 0 nodes. The cluster must always have at least 1 running node, or else the cluster will enter an unrecoverable error state. * `auto_repair_on_errors` * `node_health_check` * `operations_timeout_minutes`: Does not require modifying the existing cluster -You can also add more `node_pool` blocks to add more node pools to the cluster. +You can also add more `worker_pool` blocks to add more worker pools to the cluster. Updating any other argument will delete the existing cluster and create a new one, if the Terraform plan is applied. @@ -278,7 +263,7 @@ such as `terraform plan`. Each comment in the code gives some context about how resource "vcd_cse_kubernetes_cluster" "imported_cluster" { name = "test2" # The name of the existing cluster cse_version = "4.2.0" # The CSE version installed in your VCD - ova_id = data.vcd_catalog_vapp_template.tkg_ova.id # See below data sources + kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id # See below data sources vdc_id = data.vcd_org_vdc.vdc.id # See below data sources network_id = data.vcd_network_routed_v2.routed.id # See below data sources node_health_check = true # Whether the existing cluster has Machine Health Check enabled or not, this can be checked in UI From a4ea880b5a08816217e1fbb981e0a8baf955a631 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 16:19:56 +0100 Subject: [PATCH 076/156] Fixes Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 69 ++++++++++++++++++---- 1 file changed, 56 insertions(+), 13 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index a2a5fef95..8c1203fd6 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" + "sort" "time" ) @@ -164,6 +165,9 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "worker_pool": { + // This is a list because TypeSet tries to replace the whole block when we just change a sub-attribute like "machine_count", + // provoking that the whole cluster is marked to be replaced. On the other hand, with TypeList the updates on sub-attributes + // work as expected but in exchange we need to be careful on reads to guarantee that order is respected. Type: schema.TypeList, Required: true, Description: "Defines a node pool for the cluster", @@ -484,8 +488,6 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, return nil } - // TODO: If creation ended in error???? Should we check cluster state and auto_recover?? - vcdClient := meta.(*VCDClient) cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) if err != nil { @@ -493,21 +495,56 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, } payload := govcd.CseClusterUpdateInput{} if d.HasChange("worker_pool") { - workerPools := map[string]govcd.CseWorkerPoolUpdateInput{} - for _, workerPoolAttr := range d.Get("worker_pool").([]interface{}) { - w := workerPoolAttr.(map[string]interface{}) - workerPools[w["name"].(string)] = govcd.CseWorkerPoolUpdateInput{MachineCount: w["machine_count"].(int)} + oldPools, newPools := d.GetChange("worker_pool") + notNew := map[string]bool{} + + // Fetch the already existing worker pools that have been modified + changePoolsPayload := map[string]govcd.CseWorkerPoolUpdateInput{} + for _, o := range oldPools.([]interface{}) { + oldPool := o.(map[string]interface{}) + for _, n := range newPools.([]interface{}) { + newPool := n.(map[string]interface{}) + if oldPool["name"].(string) == newPool["name"].(string) { + changePoolsPayload[newPool["name"].(string)] = govcd.CseWorkerPoolUpdateInput{MachineCount: newPool["machine_count"].(int)} + notNew[newPool["name"].(string)] = true // Register this pool as not new + } + } + } + payload.WorkerPools = &changePoolsPayload + + // Fetch the worker pools that are brand new + var addPoolsPayload []govcd.CseWorkerPoolSettings + for _, n := range newPools.([]interface{}) { + newPool := n.(map[string]interface{}) + if _, ok := notNew[newPool["name"].(string)]; !ok { + addPoolsPayload = append(addPoolsPayload, govcd.CseWorkerPoolSettings{ + Name: newPool["name"].(string), + MachineCount: newPool["machine_count"].(int), + DiskSizeGi: newPool["disk_size_gi"].(int), + SizingPolicyId: newPool["sizing_policy_id"].(string), + PlacementPolicyId: newPool["placement_policy_id"].(string), + VGpuPolicyId: newPool["vgpu_policy_id"].(string), + StorageProfileId: newPool["storage_profile_id"].(string), + }) + } + } + payload.NewWorkerPools = &addPoolsPayload + } + if d.HasChange("control_plane") { + controlPlane := govcd.CseControlPlaneUpdateInput{} + for _, controlPlaneAttr := range d.Get("control_plane").([]interface{}) { + c := controlPlaneAttr.(map[string]interface{}) + controlPlane.MachineCount = c["machine_count"].(int) } - payload.WorkerPools = &workerPools + payload.ControlPlane = &controlPlane + } + if d.HasChange("kubernetes_template_id") { + payload.KubernetesTemplateOvaId = addrOf(d.Get("kubernetes_template_id").(string)) } + // If the cluster is not in "provisioned" state, this call should fail err = cluster.Update(payload, true) if err != nil { - if cluster != nil { - if cluster.State != "provisioned" { - return diag.Errorf("Kubernetes cluster update finished, but it is in '%s' state, not 'provisioned': '%s'", cluster.State, err) - } - } return diag.Errorf("Kubernetes cluster update failed: %s", err) } @@ -572,7 +609,7 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "virtual_ip_subnet", cluster.VirtualIpSubnet) dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) dSet(d, "node_health_check", cluster.NodeHealthCheck) - dSet(d, "api_token_file", "") + //dSet(d, "api_token_file", "") if _, ok := d.GetOk("owner"); ok { // This field is optional, as it can take the value from the VCD client @@ -596,6 +633,12 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes "disk_size_gi": workerPool.DiskSizeGi, } } + // The "worker_pool" argument is a TypeList, not a TypeSet (check the Schema comments for context), + // so we need to guarantee order. We order them by name. + sort.SliceStable(workerPoolBlocks, func(i, j int) bool { + return workerPoolBlocks[i]["name"].(string) < workerPoolBlocks[j]["name"].(string) + }) + err = d.Set("worker_pool", workerPoolBlocks) if err != nil { return nil, err From 083076b9b7380802d120a8afc9a7bbb500e28444 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 16:39:42 +0100 Subject: [PATCH 077/156] Fixes and finish Update Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 8c1203fd6..b977b0197 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -542,7 +542,14 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, payload.KubernetesTemplateOvaId = addrOf(d.Get("kubernetes_template_id").(string)) } - // If the cluster is not in "provisioned" state, this call should fail + if d.HasChange("node_health_check") { + payload.NodeHealthCheck = addrOf(d.Get("node_health_check").(bool)) + } + + if d.HasChanges("auto_repair_on_errors") { + payload.AutoRepairOnErrors = addrOf(d.Get("auto_repair_on_errors").(bool)) + } + err = cluster.Update(payload, true) if err != nil { return diag.Errorf("Kubernetes cluster update failed: %s", err) From 08174526968fed52e0fb05a3bddcf18e471259fd Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 16:45:39 +0100 Subject: [PATCH 078/156] # Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index b977b0197..ca3723301 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -177,7 +177,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The name of this worker pool", + Description: "The name of this worker pool. Must be unique", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, From bbb8f4cfbf7b54eb7c1f75a434cfe268705d0731 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 16:54:32 +0100 Subject: [PATCH 079/156] # Signed-off-by: abarreiro --- website/docs/r/cse_kubernetes_cluster.html.markdown | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 4d11052a2..f3113bdc7 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -14,7 +14,9 @@ Supported in provider *v3.12+* Supports the following **Container Service Extension** versions: -* 4.2 +* 4.1.0 +* 4.1.1 +* 4.2.0 -> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) From a3f2adda88bbf1e02b1fbdf7d26a8b7afd72c7d3 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 16:58:22 +0100 Subject: [PATCH 080/156] # Signed-off-by: abarreiro --- website/docs/r/cse_kubernetes_cluster.html.markdown | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index f3113bdc7..643b94737 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -170,7 +170,7 @@ The `worker_pool` block is **required**, and every cluster should have **at leas Each block asks for the following arguments: -* `name` - (Required) The name of the worker pool. It must contain only lowercase alphanumeric characters or "-", +* `name` - (Required) The name of the worker pool. It must be unique per cluster, and must contain only lowercase alphanumeric characters or "-", start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters * `machine_count` - (Optional) The number of VMs that the worker pool has. Must be higher than `0`. Defaults to `1` * `disk_size_gi` - (Optional) Disk size, in **Gibibytes (Gi)**, for the worker pool VMs. Must be at least `20`. Defaults to `20` @@ -218,8 +218,9 @@ Only the following arguments can be updated: * `machine_count` of the `control_plane`: Supports scaling up and down * `machine_count` of any `worker_pool`: Supports scaling up and down. Use caution when resizing down to 0 nodes. The cluster must always have at least 1 running node, or else the cluster will enter an unrecoverable error state. -* `auto_repair_on_errors` -* `node_health_check` +* `auto_repair_on_errors`: Can only be updated in CSE 4.1.0, and it is recommended to set it to `false` when the cluster is created. + In versions 4.1.1 and 4.2.0, this is automatically done by the CSE Server, so this flag cannot be updated. +* `node_health_check`: Can be turned on/off. * `operations_timeout_minutes`: Does not require modifying the existing cluster You can also add more `worker_pool` blocks to add more worker pools to the cluster. From 8b1d7925c95049a21c3e6e42650a5b5731c1eedb Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 13 Feb 2024 18:11:13 +0100 Subject: [PATCH 081/156] # Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 45 ++++++++++++++++++- .../r/cse_kubernetes_cluster.html.markdown | 5 +++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index ca3723301..7154476fb 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -350,6 +350,35 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, }, }, + "events": { + Type: schema.TypeSet, + Computed: true, + Description: "A set of events that happened during the Kubernetes cluster lifecycle", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Computed: true, + Type: schema.TypeString, + Description: "Name of the event", + }, + "type": { + Computed: true, + Type: schema.TypeString, + Description: "Type of the event, either 'event' or 'error'", + }, + "occurred_at": { + Computed: true, + Type: schema.TypeString, + Description: "When the event happened", + }, + "details": { + Computed: true, + Type: schema.TypeString, + Description: "Details of the event", + }, + }, + }, + }, }, } } @@ -641,7 +670,7 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes } } // The "worker_pool" argument is a TypeList, not a TypeSet (check the Schema comments for context), - // so we need to guarantee order. We order them by name. + // so we need to guarantee order. We order them by name, which is unique. sort.SliceStable(workerPoolBlocks, func(i, j int) bool { return workerPoolBlocks[i]["name"].(string) < workerPoolBlocks[j]["name"].(string) }) @@ -690,6 +719,20 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes return nil, err } + events := make([]map[string]interface{}, len(cluster.Events)) + for i, event := range cluster.Events { + events[i] = map[string]interface{}{ + "name": event.Name, + "occurred_at": event.OccurredAt.String(), + "details": event.Details, + "type": event.Type, + } + } + err = d.Set("events", events) + if err != nil { + return nil, err + } + if cluster.State == "provisioned" { kubeconfig, err := cluster.GetKubeconfig() if err != nil { diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 643b94737..f8f090745 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -208,6 +208,11 @@ The following attributes are available for consumption as read-only attributes a * `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` * `supported_upgrades` - A set of vApp Template names that can be fetched with a [`vcd_catalog_vapp_template` data source](/providers/vmware/vcd/latest/docs/data-sources/catalog_vapp_template) to upgrade the cluster. +* `events` - A set of events that happened during the Kubernetes cluster lifecycle. Each event has: + * `name` - Name of the event + * `type` - Type of the event, either `event` or `error` + * `details` - Details of the event + * `occurred_at` - When the event happened ## Updating From e76d9b94d7b32fb1b23f226eb0a712dd34af15bc Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 14 Feb 2024 11:22:24 +0100 Subject: [PATCH 082/156] Fixes Signed-off-by: abarreiro --- vcd/cse/4.2/capiyaml_cluster.tmpl | 153 --------------------- vcd/cse/4.2/capiyaml_mhc.tmpl | 22 --- vcd/cse/4.2/capiyaml_nodepool.tmpl | 41 ------ vcd/cse/4.2/rde.tmpl | 31 ----- vcd/cse/tkg_versions.json | 92 ------------- vcd/resource_vcd_cse_kubernetes_cluster.go | 3 + 6 files changed, 3 insertions(+), 339 deletions(-) delete mode 100644 vcd/cse/4.2/capiyaml_cluster.tmpl delete mode 100644 vcd/cse/4.2/capiyaml_mhc.tmpl delete mode 100644 vcd/cse/4.2/capiyaml_nodepool.tmpl delete mode 100644 vcd/cse/4.2/rde.tmpl delete mode 100644 vcd/cse/tkg_versions.json diff --git a/vcd/cse/4.2/capiyaml_cluster.tmpl b/vcd/cse/4.2/capiyaml_cluster.tmpl deleted file mode 100644 index 16a676ae1..000000000 --- a/vcd/cse/4.2/capiyaml_cluster.tmpl +++ /dev/null @@ -1,153 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: "{{.ClusterName}}" - namespace: "{{.TargetNamespace}}" - labels: - cluster-role.tkg.tanzu.vmware.com/management: "" - tanzuKubernetesRelease: "{{.TkrVersion}}" - tkg.tanzu.vmware.com/cluster-name: "{{.ClusterName}}" - annotations: - osInfo: "ubuntu,20.04,amd64" - TKGVERSION: "{{.TkgVersion}}" -spec: - clusterNetwork: - pods: - cidrBlocks: - - "{{.PodCidr}}" - serviceDomain: cluster.local - services: - cidrBlocks: - - "{{.ServiceCidr}}" - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: KubeadmControlPlane - name: "{{.ClusterName}}-control-plane-node-pool" - namespace: "{{.TargetNamespace}}" - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: VCDCluster - name: "{{.ClusterName}}" - namespace: "{{.TargetNamespace}}" ---- -apiVersion: v1 -kind: Secret -metadata: - name: capi-user-credentials - namespace: {{.TargetNamespace}} -type: Opaque -data: - username: "{{.UsernameB64}}" - refreshToken: "{{.ApiTokenB64}}" ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: VCDCluster -metadata: - name: "{{.ClusterName}}" - namespace: "{{.TargetNamespace}}" -spec: - site: "{{.VcdSite}}" - org: "{{.Org}}" - ovdc: "{{.OrgVdc}}" - ovdcNetwork: "{{.OrgVdcNetwork}}" - {{- if .ControlPlaneEndpoint}} - controlPlaneEndpoint: - host: "{{.ControlPlaneEndpoint}}" - port: 6443 - {{- end}} - {{- if .VirtualIpSubnet}} - loadBalancerConfigSpec: - vipSubnet: "{{.VirtualIpSubnet}}" - {{- end}} - useAsManagementCluster: false - userContext: - secretRef: - name: capi-user-credentials - namespace: "{{.TargetNamespace}}" ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: VCDMachineTemplate -metadata: - name: "{{.ClusterName}}-control-plane-node-pool" - namespace: "{{.TargetNamespace}}" -spec: - template: - spec: - catalog: "{{.Catalog}}" - template: "{{.VAppTemplate}}" - sizingPolicy: "{{.ControlPlaneSizingPolicy}}" - placementPolicy: "{{.ControlPlanePlacementPolicy}}" - storageProfile: "{{.ControlPlaneStorageProfile}}" - diskSize: {{.ControlPlaneDiskSize}} ---- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: KubeadmControlPlane -metadata: - name: "{{.ClusterName}}-control-plane-node-pool" - namespace: "{{.TargetNamespace}}" -spec: - kubeadmConfigSpec: - preKubeadmCommands: - - mv /etc/ssl/certs/custom_certificate_*.crt /usr/local/share/ca-certificates && update-ca-certificates - clusterConfiguration: - apiServer: - certSANs: - - localhost - - 127.0.0.1 - controllerManager: - extraArgs: - enable-hostpath-provisioner: "true" - dns: - imageRepository: "{{.ContainerRegistryUrl}}" - imageTag: "{{.DnsVersion}}" - etcd: - local: - imageRepository: "{{.ContainerRegistryUrl}}" - imageTag: "{{.EtcdVersion}}" - imageRepository: "{{.ContainerRegistryUrl}}" - users: - - name: root - sshAuthorizedKeys: - - "{{.SshPublicKey}}" - initConfiguration: - nodeRegistration: - criSocket: /run/containerd/containerd.sock - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - cloud-provider: external - joinConfiguration: - nodeRegistration: - criSocket: /run/containerd/containerd.sock - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - cloud-provider: external - machineTemplate: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: VCDMachineTemplate - name: "{{.ClusterName}}-control-plane-node-pool" - namespace: "{{.TargetNamespace}}" - replicas: {{.ControlPlaneMachineCount}} - version: "{{.KubernetesVersion}}" ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: KubeadmConfigTemplate -metadata: - name: "{{.ClusterName}}-kct" - namespace: "{{.TargetNamespace}}" -spec: - template: - spec: - users: - - name: root - sshAuthorizedKeys: - - "{{.SshPublicKey}}" - useExperimentalRetryJoin: true - preKubeadmCommands: - - mv /etc/ssl/certs/custom_certificate_*.crt /usr/local/share/ca-certificates && update-ca-certificates - joinConfiguration: - nodeRegistration: - criSocket: /run/containerd/containerd.sock - kubeletExtraArgs: - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% - cloud-provider: external \ No newline at end of file diff --git a/vcd/cse/4.2/capiyaml_mhc.tmpl b/vcd/cse/4.2/capiyaml_mhc.tmpl deleted file mode 100644 index d31e4c3ec..000000000 --- a/vcd/cse/4.2/capiyaml_mhc.tmpl +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineHealthCheck -metadata: - name: "{{.ClusterName}}" - namespace: "{{.TargetNamespace}}" - labels: - clusterctl.cluster.x-k8s.io: "" - clusterctl.cluster.x-k8s.io/move: "" -spec: - clusterName: "{{.ClusterName}}" - maxUnhealthy: "{{.MaxUnhealthyNodePercentage}}" - nodeStartupTimeout: "{{.NodeStartupTimeout}}" - selector: - matchLabels: - cluster.x-k8s.io/cluster-name: "{{.ClusterName}}" - unhealthyConditions: - - type: Ready - status: Unknown - timeout: "{{.NodeUnknownTimeout}}" - - type: Ready - status: "False" - timeout: "{{.NodeNotReadyTimeout}}" \ No newline at end of file diff --git a/vcd/cse/4.2/capiyaml_nodepool.tmpl b/vcd/cse/4.2/capiyaml_nodepool.tmpl deleted file mode 100644 index e2292c7d7..000000000 --- a/vcd/cse/4.2/capiyaml_nodepool.tmpl +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 -kind: VCDMachineTemplate -metadata: - name: "{{.NodePoolName}}" - namespace: "{{.TargetNamespace}}" -spec: - template: - spec: - catalog: "{{.Catalog}}" - template: "{{.VAppTemplate}}" - sizingPolicy: "{{.NodePoolSizingPolicy}}" - placementPolicy: "{{.NodePoolPlacementPolicy}}" - storageProfile: "{{.NodePoolStorageProfile}}" - diskSize: "{{.NodePoolDiskSize}}" - enableNvidiaGPU: {{.NodePoolEnableGpu}} ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: MachineDeployment -metadata: - name: "{{.NodePoolName}}" - namespace: "{{.TargetNamespace}}" -spec: - clusterName: "{{.ClusterName}}" - replicas: {{.NodePoolMachineCount}} - selector: - matchLabels: null - template: - spec: - bootstrap: - configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: KubeadmConfigTemplate - name: "{{.ClusterName}}-kct" - namespace: "{{.TargetNamespace}}" - clusterName: "{{.ClusterName}}" - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 - kind: VCDMachineTemplate - name: "{{.NodePoolName}}" - namespace: "{{.TargetNamespace}}" - version: "{{.KubernetesVersion}}" \ No newline at end of file diff --git a/vcd/cse/4.2/rde.tmpl b/vcd/cse/4.2/rde.tmpl deleted file mode 100644 index e5ea3e2b8..000000000 --- a/vcd/cse/4.2/rde.tmpl +++ /dev/null @@ -1,31 +0,0 @@ -{ - "apiVersion": "capvcd.vmware.com/v1.1", - "kind": "CAPVCDCluster", - "name": "{{.Name}}", - "metadata": { - "name": "{{.Name}}", - "orgName": "{{.Org}}", - "site": "{{.VcdUrl}}", - "virtualDataCenterName": "{{.Vdc}}" - }, - "spec": { - "vcdKe": { - "isVCDKECluster": true, - "markForDelete": {{.Delete}}, - "forceDelete": {{.ForceDelete}}, - "autoRepairOnErrors": {{.AutoRepairOnErrors}}, - {{- if .DefaultStorageClassName }} - "defaultStorageClassOptions": { - "filesystem": "{{.DefaultStorageClassFileSystem}}", - "k8sStorageClassName": "{{.DefaultStorageClassName}}", - "vcdStorageProfileName": "{{.DefaultStorageClassStorageProfile}}", - "useDeleteReclaimPolicy": {{.DefaultStorageClassUseDeleteReclaimPolicy}} - }, - {{- end }} - "secure": { - "apiToken": "{{.ApiToken}}" - } - }, - "capiYaml": "{{.CapiYaml}}" - } -} diff --git a/vcd/cse/tkg_versions.json b/vcd/cse/tkg_versions.json deleted file mode 100644 index 0566126b9..000000000 --- a/vcd/cse/tkg_versions.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "v1.27.5+vmware.1-tkg.1-0eb96d2f9f4f705ac87c40633d4b69st": { - "tkg": "v2.4.0", - "etcd": "v3.5.7_vmware.6", - "coreDns": "v1.10.1_vmware.7" - }, - "v1.26.8+vmware.1-tkg.1-b8c57a6c8c98d227f74e7b1a9eef27st": { - "tkg": "v2.4.0", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.10.1_vmware.7" - }, - "v1.26.8+vmware.1-tkg.1-0edd4dafbefbdb503f64d5472e500cf8": { - "tkg": "v2.3.1", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.9.3_vmware.16" - }, - "v1.25.13+vmware.1-tkg.1-0031669997707d1c644156b8fc31ebst": { - "tkg": "v2.4.0", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.10.1_vmware.7" - }, - "v1.25.13+vmware.1-tkg.1-6f7650434fd3787d751e8fb3c9e2153d": { - "tkg": "v2.3.1", - "etcd": "v3.5.6_vmware.20", - "coreDns": "v1.9.3_vmware.11" - }, - "v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc": { - "tkg": "v2.2.0", - "etcd": "v3.5.6_vmware.9", - "coreDns": "v1.9.3_vmware.8" - }, - "v1.24.17+vmware.1-tkg.1-9f70d901a7d851fb115411e6790fdeae": { - "tkg": "v2.3.1", - "etcd": "v3.5.6_vmware.19", - "coreDns": "v1.8.6_vmware.26" - }, - "v1.24.11+vmware.1-tkg.1-2ccb2a001f8bd8f15f1bfbc811071830": { - "tkg": "v2.2.0", - "etcd": "v3.5.6_vmware.10", - "coreDns": "v1.8.6_vmware.18" - }, - "v1.24.10+vmware.1-tkg.1-765d418b72c247c2310384e640ee075e": { - "tkg": "v2.1.1", - "etcd": "v3.5.6_vmware.6", - "coreDns": "v1.8.6_vmware.17" - }, - "v1.23.17+vmware.1-tkg.1-ee4d95d5d08cd7f31da47d1480571754": { - "tkg": "v2.2.0", - "etcd": "v3.5.6_vmware.11", - "coreDns": "v1.8.6_vmware.19" - }, - "v1.23.16+vmware.1-tkg.1-eb0de9755338b944ea9652e6f758b3ce": { - "tkg": "v2.1.1", - "etcd": "v3.5.6_vmware.5", - "coreDns": "v1.8.6_vmware.16" - }, - "v1.22.17+vmware.1-tkg.1-df08b304658a6cf17f5e74dc0ab7543c": { - "tkg": "v2.1.1", - "etcd": "v3.5.6_vmware.1", - "coreDns": "v1.8.4_vmware.10" - }, - "v1.22.9+vmware.1-tkg.1-2182cbabee08edf480ee9bc5866d6933": { - "tkg": "v1.5.4", - "etcd": "v3.5.4_vmware.2", - "coreDns": "v1.8.4_vmware.9" - }, - "v1.21.11+vmware.1-tkg.2-d788dbbb335710c0a0d1a28670057896": { - "tkg": "v1.5.4", - "etcd": "v3.4.13_vmware.27", - "coreDns": "v1.8.0_vmware.13" - }, - "v1.21.8+vmware.1-tkg.2-ed3c93616a02968be452fe1934a1d37c": { - "tkg": "v1.4.3", - "etcd": "v3.4.13_vmware.25", - "coreDns": "v1.8.0_vmware.11" - }, - "v1.20.15+vmware.1-tkg.2-839faf7d1fa7fa356be22b72170ce1a8": { - "tkg": "v1.5.4", - "etcd": "v3.4.13_vmware.23", - "coreDns": "v1.7.0_vmware.15" - }, - "v1.20.14+vmware.1-tkg.2-5a5027ce2528a6229acb35b38ff8084e": { - "tkg": "v1.4.3", - "etcd": "v3.4.13_vmware.23", - "coreDns": "v1.7.0_vmware.15" - }, - "v1.19.16+vmware.1-tkg.2-fba68db15591c15fcd5f26b512663a42": { - "tkg": "v1.4.3", - "etcd": "v3.4.13_vmware.19", - "coreDns": "v1.7.0_vmware.15" - } -} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 7154476fb..315b0e36a 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -595,6 +595,9 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m vcdClient := meta.(*VCDClient) cluster, err := vcdClient.CseGetKubernetesClusterById(d.Id()) if err != nil { + if govcd.ContainsNotFound(err) { + return nil // The cluster is gone, nothing to do + } return diag.FromErr(err) } err = cluster.Delete(time.Duration(d.Get("operations_timeout_minutes").(int))) From 79f38cba57bcd76ed47fe96c20faac88dc02a015 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 14 Feb 2024 12:45:09 +0100 Subject: [PATCH 083/156] Improve feedback on provisioning/errored cluster Signed-off-by: abarreiro --- vcd/datasource_vcd_cse_kubernetes_cluster.go | 2 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 26 +++++++++++++++---- ...esource_vcd_cse_kubernetes_cluster_test.go | 1 + 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go index b714d90a2..253a1015c 100644 --- a/vcd/datasource_vcd_cse_kubernetes_cluster.go +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -288,7 +288,7 @@ func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, m dSet(d, "org_id", cluster.OrganizationId) dSet(d, "cluster_id", cluster.ID) - warns, err := saveClusterDataToState(d, cluster) + warns, err := saveClusterDataToState(d, nil, cluster) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 315b0e36a..25836fe17 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -396,10 +396,14 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour } apiTokenFile := d.Get("api_token_file").(string) + if apiTokenFile == "" { + return diag.Errorf("the API token file 'is required during Kubernetes cluster creation") + } apiToken, err := govcd.GetTokenFromFile(apiTokenFile) if err != nil { return diag.Errorf("could not read the API token from the file '%s': %s", apiTokenFile, err) } + owner := d.Get("owner").(string) if owner == "" { session, err := vcdClient.Client.GetSessionInfo() @@ -492,7 +496,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, cluster) + warns, err := saveClusterDataToState(d, vcdClient, cluster) if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } @@ -614,7 +618,7 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, cluster) + warns, err := saveClusterDataToState(d, nil, cluster) if err != nil { return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", cluster.ID, err) } @@ -628,7 +632,7 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m // saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. // Returns a slice of warnings first and an error second. -func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetesCluster) ([]error, error) { +func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluster *govcd.CseKubernetesCluster) ([]error, error) { var warnings []error dSet(d, "name", cluster.Name) @@ -648,7 +652,17 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes dSet(d, "virtual_ip_subnet", cluster.VirtualIpSubnet) dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) dSet(d, "node_health_check", cluster.NodeHealthCheck) - //dSet(d, "api_token_file", "") + + if _, ok := d.GetOk("org"); ok { + // This field is optional, as it can take the value from the VCD client + if cluster.OrganizationId != "" { + org, err := vcdClient.GetOrgById(cluster.OrganizationId) + if err != nil { + return nil, fmt.Errorf("could not set 'org' argument: %s", err) + } + dSet(d, "org", org.Org.Name) + } + } if _, ok := d.GetOk("owner"); ok { // This field is optional, as it can take the value from the VCD client @@ -743,7 +757,9 @@ func saveClusterDataToState(d *schema.ResourceData, cluster *govcd.CseKubernetes } dSet(d, "kubeconfig", kubeconfig) } else { - warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, won't be able to retrieve the Kubeconfig", d.Id(), cluster.State)) + warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, meaning that "+ + "the Kubeconfig cannot be retrieved and "+ + "some attributes could be unavailable", d.Id(), cluster.State)) } d.SetId(cluster.ID) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 4d5034a4b..76787beef 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -112,6 +112,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { // TODO: Test: // Basic (DONE) +// Error with no auto-repair, then set auto-repair // With machine health checks // With machine health checks // Without storage class From 89f0bf91d379389f20b27927347f76d68a402ffa Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 15 Feb 2024 13:00:26 +0100 Subject: [PATCH 084/156] goreplace Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 2 ++ vcd/resource_vcd_cse_kubernetes_cluster_test.go | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 154ab6b48..5d4c90c3a 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => /Users/abarreiro/Documents/Development/go-vcloud-director +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240214170722-8a03156b615e diff --git a/go.sum b/go.sum index 88b0c8f5b..91f8528a2 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240214170722-8a03156b615e h1:NJdMq5jjjxaOglZv6wo7urwKOkmYeWf3yWQOkgU+L2U= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240214170722-8a03156b615e/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 76787beef..ff91577d4 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -113,6 +113,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { // TODO: Test: // Basic (DONE) // Error with no auto-repair, then set auto-repair +// Upgrade v2.2.0-v1.25.7 to v2.4.0-v1.26.8 // With machine health checks // With machine health checks // Without storage class From 2de851e3cb5850189147b9d48037aec9f6498068 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 16 Feb 2024 11:30:49 +0100 Subject: [PATCH 085/156] sdk bump Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5d4c90c3a..5886c6ab6 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240214170722-8a03156b615e +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240215151014-8bea6c719039 diff --git a/go.sum b/go.sum index 91f8528a2..3890b2256 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240214170722-8a03156b615e h1:NJdMq5jjjxaOglZv6wo7urwKOkmYeWf3yWQOkgU+L2U= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240214170722-8a03156b615e/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240215151014-8bea6c719039 h1:57w1bDfTbF2E4Q0jkPK2sx74bYpfS6Y5qDPIJpOyJSA= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240215151014-8bea6c719039/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= From 1dd8baab12491585b651e0575181656f064f3829 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 16 Feb 2024 13:46:54 +0100 Subject: [PATCH 086/156] bump sdk Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5886c6ab6..f0a5d335b 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240215151014-8bea6c719039 +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216124304-08dd63117ffc diff --git a/go.sum b/go.sum index 3890b2256..35bf3bc40 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240215151014-8bea6c719039 h1:57w1bDfTbF2E4Q0jkPK2sx74bYpfS6Y5qDPIJpOyJSA= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240215151014-8bea6c719039/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216124304-08dd63117ffc h1:+vNQWYEExlmFd8Khg5b1DVseJOoXnVL47NTlniFR4WM= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216124304-08dd63117ffc/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= From 57a97e19375fd85378efed1ab5ed6a0a94a99737 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 19 Feb 2024 21:20:01 +0100 Subject: [PATCH 087/156] Change again to typeset but with hash func Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 24 ++++++++-------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 25836fe17..4903b7ba9 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" - "sort" "time" ) @@ -165,12 +164,13 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "worker_pool": { - // This is a list because TypeSet tries to replace the whole block when we just change a sub-attribute like "machine_count", - // provoking that the whole cluster is marked to be replaced. On the other hand, with TypeList the updates on sub-attributes - // work as expected but in exchange we need to be careful on reads to guarantee that order is respected. - Type: schema.TypeList, + Type: schema.TypeSet, Required: true, Description: "Defines a node pool for the cluster", + Set: func(v interface{}) int { + // Every Worker Pool is defined unequivocally by its unique name. + return hashcodeString(v.(map[string]interface{})["name"].(string)) + }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -441,7 +441,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour AutoRepairOnErrors: d.Get("auto_repair_on_errors").(bool), } - workerPoolsAttr := d.Get("worker_pool").([]interface{}) + workerPoolsAttr := d.Get("worker_pool").(*schema.Set).List() workerPools := make([]govcd.CseWorkerPoolSettings, len(workerPoolsAttr)) for i, w := range workerPoolsAttr { workerPool := w.(map[string]interface{}) @@ -533,9 +533,9 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, // Fetch the already existing worker pools that have been modified changePoolsPayload := map[string]govcd.CseWorkerPoolUpdateInput{} - for _, o := range oldPools.([]interface{}) { + for _, o := range oldPools.(*schema.Set).List() { oldPool := o.(map[string]interface{}) - for _, n := range newPools.([]interface{}) { + for _, n := range newPools.(*schema.Set).List() { newPool := n.(map[string]interface{}) if oldPool["name"].(string) == newPool["name"].(string) { changePoolsPayload[newPool["name"].(string)] = govcd.CseWorkerPoolUpdateInput{MachineCount: newPool["machine_count"].(int)} @@ -547,7 +547,7 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, // Fetch the worker pools that are brand new var addPoolsPayload []govcd.CseWorkerPoolSettings - for _, n := range newPools.([]interface{}) { + for _, n := range newPools.(*schema.Set).List() { newPool := n.(map[string]interface{}) if _, ok := notNew[newPool["name"].(string)]; !ok { addPoolsPayload = append(addPoolsPayload, govcd.CseWorkerPoolSettings{ @@ -686,12 +686,6 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste "disk_size_gi": workerPool.DiskSizeGi, } } - // The "worker_pool" argument is a TypeList, not a TypeSet (check the Schema comments for context), - // so we need to guarantee order. We order them by name, which is unique. - sort.SliceStable(workerPoolBlocks, func(i, j int) bool { - return workerPoolBlocks[i]["name"].(string) < workerPoolBlocks[j]["name"].(string) - }) - err = d.Set("worker_pool", workerPoolBlocks) if err != nil { return nil, err From 82603a5204f6aac0313cc8899f3978ddb9a20114 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 19 Feb 2024 22:07:16 +0100 Subject: [PATCH 088/156] Fix Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 62 +++++++++++++------ .../r/cse_kubernetes_cluster.html.markdown | 15 ++--- 2 files changed, 52 insertions(+), 25 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 4903b7ba9..2081cd3ee 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/vmware/go-vcloud-director/v2/govcd" + "sort" "time" ) @@ -164,19 +165,20 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "worker_pool": { - Type: schema.TypeSet, + // This is a list because TypeSet tries to replace the whole block when we just change a sub-attribute like "machine_count", + // provoking that the worker pool is marked to be deleted and the re-created, and that cannot be done in CSE. + // On the other hand, with TypeList the updates on sub-attributes work as expected but in exchange + // we need to be careful on reads to guarantee that order is respected. + Type: schema.TypeList, Required: true, Description: "Defines a node pool for the cluster", - Set: func(v interface{}) int { - // Every Worker Pool is defined unequivocally by its unique name. - return hashcodeString(v.(map[string]interface{})["name"].(string)) - }, Elem: &schema.Resource{ + // Ideally, all of these sub-attributes should have ForceNew: true except for "machine_count", as + // they can´t be changed. However, this doesn´t work well so we check this at runtime. Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The name of this worker pool. Must be unique", ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), @@ -192,32 +194,27 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 20, // As suggested in UI - ForceNew: true, Description: "Disk size, in Gibibytes (Gi), for the control plane nodes", ValidateDiagFunc: minimumValue(20, "disk size in Gibibytes (Gi) must be at least 20"), }, "sizing_policy_id": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: "VM Sizing policy for the control plane nodes", }, "placement_policy_id": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: "VM Placement policy for the control plane nodes", }, "vgpu_policy_id": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: "vGPU policy for the control plane nodes", }, "storage_profile_id": { Type: schema.TypeString, Optional: true, - ForceNew: true, Description: "Storage profile for the control plane nodes", }, }, @@ -441,7 +438,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour AutoRepairOnErrors: d.Get("auto_repair_on_errors").(bool), } - workerPoolsAttr := d.Get("worker_pool").(*schema.Set).List() + workerPoolsAttr := d.Get("worker_pool").([]interface{}) workerPools := make([]govcd.CseWorkerPoolSettings, len(workerPoolsAttr)) for i, w := range workerPoolsAttr { workerPool := w.(map[string]interface{}) @@ -529,27 +526,50 @@ func resourceVcdCseKubernetesUpdate(ctx context.Context, d *schema.ResourceData, payload := govcd.CseClusterUpdateInput{} if d.HasChange("worker_pool") { oldPools, newPools := d.GetChange("worker_pool") - notNew := map[string]bool{} + existingPools := map[string]bool{} // Fetch the already existing worker pools that have been modified changePoolsPayload := map[string]govcd.CseWorkerPoolUpdateInput{} - for _, o := range oldPools.(*schema.Set).List() { + for _, o := range oldPools.([]interface{}) { oldPool := o.(map[string]interface{}) - for _, n := range newPools.(*schema.Set).List() { + for _, n := range newPools.([]interface{}) { newPool := n.(map[string]interface{}) if oldPool["name"].(string) == newPool["name"].(string) { + if oldPool["disk_size_gi"] != newPool["disk_size_gi"] { + return diag.Errorf("'disk_size_gi' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["sizing_policy_id"] != newPool["sizing_policy_id"] { + return diag.Errorf("'sizing_policy_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["placement_policy_id"] != newPool["placement_policy_id"] { + return diag.Errorf("'placement_policy_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["vgpu_policy_id"] != newPool["vgpu_policy_id"] { + return diag.Errorf("'vgpu_policy_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } + if oldPool["storage_profile_id"] != newPool["storage_profile_id"] { + return diag.Errorf("'storage_profile_id' of Worker Pool '%s' cannot be changed", oldPool["name"]) + } changePoolsPayload[newPool["name"].(string)] = govcd.CseWorkerPoolUpdateInput{MachineCount: newPool["machine_count"].(int)} - notNew[newPool["name"].(string)] = true // Register this pool as not new + existingPools[newPool["name"].(string)] = true // Register this pool as not new } } } payload.WorkerPools = &changePoolsPayload + // Check that no Worker Pools are deleted + for _, o := range oldPools.([]interface{}) { + oldPool := o.(map[string]interface{}) + if _, ok := existingPools[oldPool["name"].(string)]; !ok { + return diag.Errorf("the Worker Pool '%s' can't be deleted, but you can scale it to 0", oldPool["name"].(string)) + } + } + // Fetch the worker pools that are brand new var addPoolsPayload []govcd.CseWorkerPoolSettings - for _, n := range newPools.(*schema.Set).List() { + for _, n := range newPools.([]interface{}) { newPool := n.(map[string]interface{}) - if _, ok := notNew[newPool["name"].(string)]; !ok { + if _, ok := existingPools[newPool["name"].(string)]; !ok { addPoolsPayload = append(addPoolsPayload, govcd.CseWorkerPoolSettings{ Name: newPool["name"].(string), MachineCount: newPool["machine_count"].(int), @@ -686,6 +706,12 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste "disk_size_gi": workerPool.DiskSizeGi, } } + // The "worker_pool" argument is a TypeList, not a TypeSet (check the Schema comments for context), + // so we need to guarantee order. We order them by name, which is unique. + sort.SliceStable(workerPoolBlocks, func(i, j int) bool { + return workerPoolBlocks[i]["name"].(string) < workerPoolBlocks[j]["name"].(string) + }) + err = d.Set("worker_pool", workerPoolBlocks) if err != nil { return nil, err diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index f8f090745..7c90f99ae 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -120,7 +120,7 @@ The following arguments are supported: * `cse_version` - (Required) Specifies the CSE version to use. Accepted versions: `4.1.0`, `4.1.1` and `4.2.0` * `runtime` - (Optional) Specifies the Kubernetes runtime to use. Defaults to `tkg` (Tanzu Kubernetes Grid) -* `name` - (Required) The name of the Kubernetes cluster. It must contain only lowercase alphanumeric characters or "-", +* `name` - (Required) The name of the Kubernetes cluster. It must contain only lowercase alphanumeric characters or "-", start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters * `kubernetes_template_id` - (Required) The ID of the vApp Template that corresponds to a Kubernetes template OVA * `org` - (Optional) The name of organization that will host the Kubernetes cluster, optional if defined in the provider configuration @@ -209,10 +209,10 @@ The following attributes are available for consumption as read-only attributes a * `supported_upgrades` - A set of vApp Template names that can be fetched with a [`vcd_catalog_vapp_template` data source](/providers/vmware/vcd/latest/docs/data-sources/catalog_vapp_template) to upgrade the cluster. * `events` - A set of events that happened during the Kubernetes cluster lifecycle. Each event has: - * `name` - Name of the event - * `type` - Type of the event, either `event` or `error` - * `details` - Details of the event - * `occurred_at` - When the event happened + * `name` - Name of the event + * `type` - Type of the event, either `event` or `error` + * `details` - Details of the event + * `occurred_at` - When the event happened ## Updating @@ -220,7 +220,7 @@ Only the following arguments can be updated: * `kubernetes_template_id`: The cluster must allow upgrading to the new TKG version. You can check `supported_upgrades` attribute to know the available OVAs. -* `machine_count` of the `control_plane`: Supports scaling up and down +* `machine_count` of the `control_plane`: Supports scaling up and down. Nothing else can be updated. * `machine_count` of any `worker_pool`: Supports scaling up and down. Use caution when resizing down to 0 nodes. The cluster must always have at least 1 running node, or else the cluster will enter an unrecoverable error state. * `auto_repair_on_errors`: Can only be updated in CSE 4.1.0, and it is recommended to set it to `false` when the cluster is created. @@ -228,7 +228,8 @@ Only the following arguments can be updated: * `node_health_check`: Can be turned on/off. * `operations_timeout_minutes`: Does not require modifying the existing cluster -You can also add more `worker_pool` blocks to add more worker pools to the cluster. +You can also add more `worker_pool` blocks to add more worker pools to the cluster. **You can't delete Worker Pools**, but they can +be scaled down to zero. Updating any other argument will delete the existing cluster and create a new one, if the Terraform plan is applied. From 9d323818867c0c808863a49a01e4f1335fb37c52 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 20 Feb 2024 09:43:41 +0100 Subject: [PATCH 089/156] Bump sdk Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f0a5d335b..ea86d4d72 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216124304-08dd63117ffc +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216170000-7b77a925f3af diff --git a/go.sum b/go.sum index 35bf3bc40..48925e833 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216124304-08dd63117ffc h1:+vNQWYEExlmFd8Khg5b1DVseJOoXnVL47NTlniFR4WM= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216124304-08dd63117ffc/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216170000-7b77a925f3af h1:JxBHaSiTwcJj+fH2RmVekBaRZ84iw5GUDBFDV58TJf4= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216170000-7b77a925f3af/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= From 0ba3af84811f810f2bac6f18468c2fed1c73c759 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 20 Feb 2024 11:05:15 +0100 Subject: [PATCH 090/156] Improve events Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 2081cd3ee..046f9b212 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -358,6 +358,11 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Description: "Name of the event", }, + "resource_id": { + Computed: true, + Type: schema.TypeString, + Description: "ID of the resource that caused the event", + }, "type": { Computed: true, Type: schema.TypeString, @@ -759,6 +764,7 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste events := make([]map[string]interface{}, len(cluster.Events)) for i, event := range cluster.Events { events[i] = map[string]interface{}{ + "resource_id": event.ResourceId, "name": event.Name, "occurred_at": event.OccurredAt.String(), "details": event.Details, From 3879dcee825a252ebbfb49bc58e0de94823d03d9 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 20 Feb 2024 12:09:01 +0100 Subject: [PATCH 091/156] Fix data source Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 +- vcd/datasource_vcd_cse_kubernetes_cluster.go | 49 ++++++++++++++++++-- vcd/resource_vcd_cse_kubernetes_cluster.go | 34 ++++++++------ 4 files changed, 68 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index ea86d4d72..724b4a33d 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216170000-7b77a925f3af +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240220110102-5878f65920c6 diff --git a/go.sum b/go.sum index 48925e833..3ecaf997b 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216170000-7b77a925f3af h1:JxBHaSiTwcJj+fH2RmVekBaRZ84iw5GUDBFDV58TJf4= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240216170000-7b77a925f3af/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240220110102-5878f65920c6 h1:fDs2W/cqMQKkOI9pYfp3c5T0pHZ2voTkk/6KryBJ/JE= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240220110102-5878f65920c6/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go index 253a1015c..98c5201ef 100644 --- a/vcd/datasource_vcd_cse_kubernetes_cluster.go +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -43,7 +43,7 @@ func datasourceVcdCseKubernetesCluster() *schema.Resource { Computed: true, Description: "The Kubernetes runtime used by the cluster", }, - "ova_id": { + "kubernetes_template_id": { Type: schema.TypeString, Computed: true, Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", @@ -107,7 +107,7 @@ func datasourceVcdCseKubernetesCluster() *schema.Resource { }, }, }, - "node_pool": { + "worker_pool": { Type: schema.TypeList, Computed: true, Description: "Defines a node pool for the cluster", @@ -248,6 +248,48 @@ func datasourceVcdCseKubernetesCluster() *schema.Resource { Computed: true, Description: "The contents of the kubeconfig of the Kubernetes cluster, only available when 'state=provisioned'", }, + "supported_upgrades": { + Type: schema.TypeSet, + Computed: true, + Description: "A set of vApp Template names that could be used to upgrade the existing cluster", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "events": { + Type: schema.TypeSet, + Computed: true, + Description: "A set of events that happened during the Kubernetes cluster lifecycle", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Computed: true, + Type: schema.TypeString, + Description: "Name of the event", + }, + "resource_id": { + Computed: true, + Type: schema.TypeString, + Description: "ID of the resource that caused the event", + }, + "type": { + Computed: true, + Type: schema.TypeString, + Description: "Type of the event, either 'event' or 'error'", + }, + "occurred_at": { + Computed: true, + Type: schema.TypeString, + Description: "When the event happened", + }, + "details": { + Computed: true, + Type: schema.TypeString, + Description: "Details of the event", + }, + }, + }, + }, }, } } @@ -285,10 +327,11 @@ func datasourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, m cluster = clusters[0] } + // These fields are specific to the data source dSet(d, "org_id", cluster.OrganizationId) dSet(d, "cluster_id", cluster.ID) - warns, err := saveClusterDataToState(d, nil, cluster) + warns, err := saveClusterDataToState(d, vcdClient, cluster, "datasource") if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 046f9b212..2ce9505ce 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -31,7 +31,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0"}, false), Description: "The CSE version to use", DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { - // This custom diff function allows to correctly compare versions + // This custom diff function allows to correctly compare versions. oldVersion, err := semver.NewVersion(oldValue) if err != nil { return false @@ -166,7 +166,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, "worker_pool": { // This is a list because TypeSet tries to replace the whole block when we just change a sub-attribute like "machine_count", - // provoking that the worker pool is marked to be deleted and the re-created, and that cannot be done in CSE. + // that would cause the worker pool to be deleted and then re-created, which is not allowed in CSE. // On the other hand, with TypeList the updates on sub-attributes work as expected but in exchange // we need to be careful on reads to guarantee that order is respected. Type: schema.TypeList, @@ -174,7 +174,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Description: "Defines a node pool for the cluster", Elem: &schema.Resource{ // Ideally, all of these sub-attributes should have ForceNew: true except for "machine_count", as - // they can´t be changed. However, this doesn´t work well so we check this at runtime. + // they can't be changed. However, this doesn't work well, so we check this at runtime. Schema: map[string]*schema.Schema{ "name": { Type: schema.TypeString, @@ -498,7 +498,7 @@ func resourceVcdCseKubernetesRead(_ context.Context, d *schema.ResourceData, met return diag.Errorf("could not read Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, vcdClient, cluster) + warns, err := saveClusterDataToState(d, vcdClient, cluster, "resource") if err != nil { return diag.Errorf("could not save Kubernetes cluster data into Terraform state: %s", err) } @@ -643,7 +643,7 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m return nil, fmt.Errorf("error retrieving Kubernetes cluster with ID '%s': %s", d.Id(), err) } - warns, err := saveClusterDataToState(d, nil, cluster) + warns, err := saveClusterDataToState(d, vcdClient, cluster, "import") if err != nil { return nil, fmt.Errorf("failed importing Kubernetes cluster '%s': %s", cluster.ID, err) } @@ -657,7 +657,7 @@ func resourceVcdCseKubernetesImport(_ context.Context, d *schema.ResourceData, m // saveClusterDataToState reads the received RDE contents and sets the Terraform arguments and attributes. // Returns a slice of warnings first and an error second. -func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluster *govcd.CseKubernetesCluster) ([]error, error) { +func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluster *govcd.CseKubernetesCluster, origin string) ([]error, error) { var warnings []error dSet(d, "name", cluster.Name) @@ -678,19 +678,23 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste dSet(d, "auto_repair_on_errors", cluster.AutoRepairOnErrors) dSet(d, "node_health_check", cluster.NodeHealthCheck) - if _, ok := d.GetOk("org"); ok { - // This field is optional, as it can take the value from the VCD client - if cluster.OrganizationId != "" { - org, err := vcdClient.GetOrgById(cluster.OrganizationId) - if err != nil { - return nil, fmt.Errorf("could not set 'org' argument: %s", err) + // The data source does not have the attribute "org", so we cannot set it + if origin != "datasource" { + // If the Org was set, it needs to be refreshed (it should not change, though) + if _, ok := d.GetOk("org"); ok { + if cluster.OrganizationId != "" { + org, err := vcdClient.GetOrgById(cluster.OrganizationId) + if err != nil { + return nil, fmt.Errorf("could not set 'org' argument: %s", err) + } + dSet(d, "org", org.Org.Name) } - dSet(d, "org", org.Org.Name) } } - if _, ok := d.GetOk("owner"); ok { - // This field is optional, as it can take the value from the VCD client + // If the Owner was set, it needs to be refreshed (it should not change, though). + // If the origin is a data source, we always need to set this one as it is a purely computed attribute. + if _, ok := d.GetOk("owner"); ok || origin == "datasource" { dSet(d, "owner", cluster.Owner) } From 3b845e9ae494c582750a4b84444c5e11b5e5a14b Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 20 Feb 2024 13:18:56 +0100 Subject: [PATCH 092/156] Fix events order Signed-off-by: abarreiro --- vcd/datasource_vcd_cse_kubernetes_cluster.go | 2 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/vcd/datasource_vcd_cse_kubernetes_cluster.go b/vcd/datasource_vcd_cse_kubernetes_cluster.go index 98c5201ef..483eeaa96 100644 --- a/vcd/datasource_vcd_cse_kubernetes_cluster.go +++ b/vcd/datasource_vcd_cse_kubernetes_cluster.go @@ -257,7 +257,7 @@ func datasourceVcdCseKubernetesCluster() *schema.Resource { }, }, "events": { - Type: schema.TypeSet, + Type: schema.TypeList, // Order matters here, as they're ordered by date Computed: true, Description: "A set of events that happened during the Kubernetes cluster lifecycle", Elem: &schema.Resource{ diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 2ce9505ce..69ccefb68 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -348,9 +348,9 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { }, }, "events": { - Type: schema.TypeSet, + Type: schema.TypeList, // Order matters here, as they're ordered by date Computed: true, - Description: "A set of events that happened during the Kubernetes cluster lifecycle", + Description: "A list of events that happened during the Kubernetes cluster lifecycle", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "name": { @@ -765,7 +765,7 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste return nil, err } - events := make([]map[string]interface{}, len(cluster.Events)) + events := make([]interface{}, len(cluster.Events)) for i, event := range cluster.Events { events[i] = map[string]interface{}{ "resource_id": event.ResourceId, From 21476e2d0f5b126f65931e7e2e748c7ec6a794b6 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 20 Feb 2024 13:24:49 +0100 Subject: [PATCH 093/156] Fix docs Signed-off-by: abarreiro --- .../docs/d/cse_kubernetes_cluster.html.markdown | 14 +++++++------- .../docs/r/cse_kubernetes_cluster.html.markdown | 3 ++- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index cbf4f0c9e..5c84a6230 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -33,16 +33,16 @@ data "vcd_cse_kubernetes_cluster" "my_cluster" { Sometimes using the cluster ID is not convenient, so this data source allows to use the cluster name. As VCD allows to have multiple clusters with the same name, this option must be used with caution as it will fail -if there is more than one Kubernetes cluster with the same name in the same Organization: +if there is more than one Kubernetes cluster with the same name in the same Organization:å ```hcl locals { - my_clusters = [ "beta1", "test2", "foo45"] + my_clusters = toset(["my-cluster-1", "my-cluster-2", "my-cluster-3"]) } -data "vcd_cse_kubernetes_cluster" "my_cluster" { - for_each = local.my_clusters - org = "tenant_org" +data "vcd_cse_kubernetes_cluster" "my_clusters" { + for_each = local.my_clusters + org_id = data.vcd_org.org.id cse_version = "4.2.0" name = each.key } @@ -53,8 +53,8 @@ data "vcd_cse_kubernetes_cluster" "my_cluster" { The following arguments are supported: * `cluster_id` - (Optional) Unequivocally identifies a cluster in VCD. Either `cluster_id` or `name` must be set. -* `org` - (Optional) The name of the Organization to which the Kubernetes cluster belongs. Optional if defined at provider level. Only used if `cluster_id` is not set. -* `name` - (Optional) Allows to find a Kubernetes cluster by name inside the given `org`. Either `cluster_id` or `name` must be set. This argument requires `cse_version` to be set. +* `name` - (Optional) Allows to find a Kubernetes cluster by name inside the given Organization with ID `org_id`. Either `cluster_id` or `name` must be set. This argument requires `cse_version` and `org_id` to be set. +* `org_id` - (Optional) The ID of the Organization to which the Kubernetes cluster belongs. Only used if `cluster_id` is not set. Must be present if `name` is used. * `cse_version` - (Optional) Specifies the CSE Version of the cluster to find when `name` is used instead of `cluster_id`. ## Attribute Reference diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 7c90f99ae..ed923c0ce 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -208,8 +208,9 @@ The following attributes are available for consumption as read-only attributes a * `kubeconfig` - The ready-to-use Kubeconfig file **contents** as a raw string. Only available when `state=provisioned` * `supported_upgrades` - A set of vApp Template names that can be fetched with a [`vcd_catalog_vapp_template` data source](/providers/vmware/vcd/latest/docs/data-sources/catalog_vapp_template) to upgrade the cluster. -* `events` - A set of events that happened during the Kubernetes cluster lifecycle. Each event has: +* `events` - A set of events that happened during the Kubernetes cluster lifecycle. They're ordered from most recent to least. Each event has: * `name` - Name of the event + * `resource_id` - ID of the resource that caused the event * `type` - Type of the event, either `event` or `error` * `details` - Details of the event * `occurred_at` - When the event happened From 67d01cddc86e7b741c8de324ae8e35f48011ad68 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 20 Feb 2024 13:40:11 +0100 Subject: [PATCH 094/156] Fixes in import Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 69ccefb68..6c7f002b3 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -681,7 +681,8 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste // The data source does not have the attribute "org", so we cannot set it if origin != "datasource" { // If the Org was set, it needs to be refreshed (it should not change, though) - if _, ok := d.GetOk("org"); ok { + // We also set it always during imports. + if _, ok := d.GetOk("org"); ok || origin == "import" { if cluster.OrganizationId != "" { org, err := vcdClient.GetOrgById(cluster.OrganizationId) if err != nil { @@ -693,8 +694,8 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste } // If the Owner was set, it needs to be refreshed (it should not change, though). - // If the origin is a data source, we always need to set this one as it is a purely computed attribute. - if _, ok := d.GetOk("owner"); ok || origin == "datasource" { + // If the origin is a data source or import, we always need to set this one. + if _, ok := d.GetOk("owner"); ok || origin == "datasource" || origin == "import" { dSet(d, "owner", cluster.Owner) } From 5d9cc8506e5f32cbd92a7ccb8c669907cdee3f90 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 20 Feb 2024 13:44:48 +0100 Subject: [PATCH 095/156] Test Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index ff91577d4..307e53010 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -167,14 +167,14 @@ resource "vcd_api_token" "token" { } resource "vcd_cse_kubernetes_cluster" "my_cluster" { - cse_version = "4.2" - runtime = "tkg" - name = "{{.Name}}" - ova_id = data.vcd_catalog_vapp_template.tkg_ova.id - org = data.vcd_org_vdc.vdc.org - vdc_id = data.vcd_org_vdc.vdc.id - network_id = data.vcd_network_routed_v2.routed.id - api_token_file = vcd_api_token.token.file_name + cse_version = "4.2.0" + runtime = "tkg" + name = "{{.Name}}" + kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id + org = data.vcd_org_vdc.vdc.org + vdc_id = data.vcd_org_vdc.vdc.id + network_id = data.vcd_network_routed_v2.routed.id + api_token_file = vcd_api_token.token.file_name control_plane { machine_count = {{.ControlPlaneCount}} @@ -183,8 +183,8 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { storage_profile_id = data.vcd_storage_profile.sp.id } - node_pool { - name = "node-pool-1" + worker_pool { + name = "worker-pool-1" machine_count = {{.NodePoolCount}} disk_size_gi = 20 sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id From 7bdfb249af21e09f8d949afd1d38c8604a859cdf Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 21 Feb 2024 09:14:05 +0100 Subject: [PATCH 096/156] sdk bump Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 724b4a33d..4d9a457eb 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240220110102-5878f65920c6 +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240221081145-a9805422ac07 diff --git a/go.sum b/go.sum index 3ecaf997b..db316847b 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240220110102-5878f65920c6 h1:fDs2W/cqMQKkOI9pYfp3c5T0pHZ2voTkk/6KryBJ/JE= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240220110102-5878f65920c6/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240221081145-a9805422ac07 h1:yxwSfRfd6vwgudFGhQaXnPIDLDAY4MLEyl8wX8MO1d4= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240221081145-a9805422ac07/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= From e255b526b999fa7f0922825264e5d56842be0b4a Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 21 Feb 2024 09:20:18 +0100 Subject: [PATCH 097/156] fix docs Signed-off-by: abarreiro --- website/docs/d/cse_kubernetes_cluster.html.markdown | 4 +++- website/docs/r/cse_kubernetes_cluster.html.markdown | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index 5c84a6230..d462e8b0b 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -14,7 +14,9 @@ Supported in provider *v3.12+* Supports the following **Container Service Extension** versions: -* 4.2 +* 4.1.0 +* 4.1.1 +* 4.2.0 -> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index ed923c0ce..0f8297b70 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -67,7 +67,7 @@ data "vcd_storage_profile" "sp" { # The token file is required, and it should be safely stored resource "vcd_api_token" "token" { name = "myClusterToken" - file_name = "/home/Bob/vcdTestAccVcdCseKubernetesCluster.json" + file_name = "/home/Bob/safely_stored_token.json" allow_token_file = true } From 9b18cc97f1e63223dfe95e65514963fafcd1b715 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 21 Feb 2024 09:21:11 +0100 Subject: [PATCH 098/156] fix docs Signed-off-by: abarreiro --- website/docs/d/cse_kubernetes_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index d462e8b0b..cd0007920 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -35,7 +35,7 @@ data "vcd_cse_kubernetes_cluster" "my_cluster" { Sometimes using the cluster ID is not convenient, so this data source allows to use the cluster name. As VCD allows to have multiple clusters with the same name, this option must be used with caution as it will fail -if there is more than one Kubernetes cluster with the same name in the same Organization:å +if there is more than one Kubernetes cluster with the same name in the same Organization: ```hcl locals { From 1e6e702859c65ffe1785f5a3fabe0ad73e745aa1 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 21 Feb 2024 13:02:48 +0100 Subject: [PATCH 099/156] Fix upgrade Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 1 - 1 file changed, 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 6c7f002b3..d1c517672 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -63,7 +63,6 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "kubernetes_template_id": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The ID of the vApp Template that corresponds to a Kubernetes template OVA", }, "org": { From 82861856f50aa90f04ec0875611149fc6e701586 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 26 Feb 2024 10:34:09 +0100 Subject: [PATCH 100/156] bump sdk Signed-off-by: abarreiro --- go.mod | 4 ++-- go.sum | 12 ++++++------ vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 4d9a457eb..517b54a26 100644 --- a/go.mod +++ b/go.mod @@ -59,7 +59,7 @@ require ( github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/zclconf/go-cty v1.14.1 // indirect golang.org/x/crypto v0.17.0 // indirect - golang.org/x/exp v0.0.0-20221114191408-850992195362 + golang.org/x/exp v0.0.0-20240119083558-1b970713d09a golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.18.0 // indirect golang.org/x/sys v0.15.0 // indirect @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240221081145-a9805422ac07 +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240226092324-cba5468cd412 diff --git a/go.sum b/go.sum index db316847b..4ec562378 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240221081145-a9805422ac07 h1:yxwSfRfd6vwgudFGhQaXnPIDLDAY4MLEyl8wX8MO1d4= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240221081145-a9805422ac07/go.mod h1:AdJE5Zz0+ivhfLtelNyBFkoBrvRrXvQHoD/Es/9kPqY= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240226092324-cba5468cd412 h1:9LgI2EFhxjwm5f0Yc6LK0W8h/+bZWKKxlLtFyTaZttg= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240226092324-cba5468cd412/go.mod h1:OppAD8mlRGZnNsY7FCxnmQXLm47tdzLRDps+SaqCXdU= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -155,8 +155,8 @@ golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2Uz golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/exp v0.0.0-20221114191408-850992195362 h1:NoHlPRbyl1VFI6FjwHtPQCN7wAMXI6cKcqrmXhOOfBQ= -golang.org/x/exp v0.0.0-20221114191408-850992195362/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= @@ -209,8 +209,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= -golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= +golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index d1c517672..4e99ea6fc 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -781,7 +781,7 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste } if cluster.State == "provisioned" { - kubeconfig, err := cluster.GetKubeconfig() + kubeconfig, err := cluster.GetKubeconfig(false) if err != nil { return nil, fmt.Errorf("error getting Kubeconfig for the Kubernetes cluster with ID '%s': %s", cluster.ID, err) } From b52c20ae4c242ccf2e9d1b65fba88718dd020341 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 28 Feb 2024 10:32:30 +0100 Subject: [PATCH 101/156] Bump sdk Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 6 ++---- vcd/datasource_vcd_version_test.go | 9 +++++---- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 517b54a26..2f328f2e9 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240226092324-cba5468cd412 +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228092419-21284a990aee diff --git a/go.sum b/go.sum index 4ec562378..8a03029c5 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240226092324-cba5468cd412 h1:9LgI2EFhxjwm5f0Yc6LK0W8h/+bZWKKxlLtFyTaZttg= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240226092324-cba5468cd412/go.mod h1:OppAD8mlRGZnNsY7FCxnmQXLm47tdzLRDps+SaqCXdU= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228092419-21284a990aee h1:VafPAOKykBLFWexO62OjtZNpsv/ahh4rHK2wZDEZSCg= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228092419-21284a990aee/go.mod h1:pgUXGxY6077Y8nIc91jURV9Il4D04tZkqhT2gBDWq0w= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -229,8 +229,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go index ffb370a1e..08cb2e03d 100644 --- a/vcd/datasource_vcd_version_test.go +++ b/vcd/datasource_vcd_version_test.go @@ -11,8 +11,8 @@ import ( ) func TestAccVcdVersion(t *testing.T) { - //preTestChecks(t) - //skipIfNotSysAdmin(t) + preTestChecks(t) + skipIfNotSysAdmin(t) vcdClient := createSystemTemporaryVCDConnection() currentVersion, err := vcdClient.Client.GetVcdShortVersion() @@ -79,6 +79,7 @@ func TestAccVcdVersion(t *testing.T) { const testAccVcdVersion = ` data "vcd_version" "version" { - name = "{{.Vcenter}}" - } + condition = "{{.Condition}}" + fail_if_not_match = {{.FailIfNotMatch}} +} ` From a435b3efc4716e6ffa52eb49e8e8f2879c6d0f67 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 28 Feb 2024 16:32:43 +0100 Subject: [PATCH 102/156] Improve tests Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 +- vcd/config_test.go | 1 + vcd/resource_vcd_cse_kubernetes_cluster.go | 3 +- ...esource_vcd_cse_kubernetes_cluster_test.go | 123 +++++++++++++++--- 5 files changed, 108 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index 2f328f2e9..40262ba1d 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228092419-21284a990aee +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228145626-02609cc2fb05 diff --git a/go.sum b/go.sum index 8a03029c5..faeda84e2 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228092419-21284a990aee h1:VafPAOKykBLFWexO62OjtZNpsv/ahh4rHK2wZDEZSCg= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228092419-21284a990aee/go.mod h1:pgUXGxY6077Y8nIc91jURV9Il4D04tZkqhT2gBDWq0w= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228145626-02609cc2fb05 h1:tdQ1EP5Ce7U/57rInHEFtIzyB21kkoD3XkwYoge7ESo= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228145626-02609cc2fb05/go.mod h1:pgUXGxY6077Y8nIc91jURV9Il4D04tZkqhT2gBDWq0w= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= diff --git a/vcd/config_test.go b/vcd/config_test.go index f7eb51de9..64c9c6837 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -232,6 +232,7 @@ type TestConfig struct { } `json:"testEnvBuild"` EnvVariables map[string]string `json:"envVariables,omitempty"` Cse struct { + CseVersion string `json:"cseVersion,omitempty"` SolutionsOrg string `json:"solutionsOrg,omitempty"` TenantOrg string `json:"tenantOrg,omitempty"` Vdc string `json:"vdc,omitempty"` diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 4e99ea6fc..f99f5d0e1 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -93,8 +93,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { "api_token_file": { Type: schema.TypeString, Optional: true, - Computed: true, // It is never used again after creation. Rather, it gets always emptied - ForceNew: true, + ForceNew: false, // It's only used on creation, so we do not care about updates Description: "A file generated by 'vcd_api_token' resource, that stores the API token used to create and manage the cluster, owned by the user specified in 'owner'. Be careful about this file, as it contains sensitive information", }, "ssh_public_key": { diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 307e53010..8aba3d745 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -4,13 +4,12 @@ package vcd import ( "fmt" + semver "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "os" "strings" "testing" - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func TestAccVcdCseKubernetesCluster(t *testing.T) { @@ -20,6 +19,16 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { t.Skip("CSE tests deactivated, skipping " + t.Name()) } + cseVersion, err := semver.NewVersion(testConfig.Cse.CseVersion) + if err != nil { + t.Fatal(err) + } + + v411, err := semver.NewVersion("4.1.1") + if err != nil { + t.Fatal(err) + } + tokenFilename := getCurrentDir() + t.Name() + ".json" defer func() { // Clean the API Token file @@ -31,27 +40,30 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } }() - now := time.Now() var params = StringMap{ - "Name": strings.ToLower(t.Name()), - "OvaCatalog": testConfig.Cse.OvaCatalog, - "OvaName": testConfig.Cse.OvaName, - "SolutionsOrg": testConfig.Cse.SolutionsOrg, - "TenantOrg": testConfig.Cse.TenantOrg, - "Vdc": testConfig.Cse.Vdc, - "EdgeGateway": testConfig.Cse.EdgeGateway, - "Network": testConfig.Cse.RoutedNetwork, - "TokenName": fmt.Sprintf("%s%d%d%d", strings.ToLower(t.Name()), now.Day(), now.Hour(), now.Minute()), - "TokenFile": tokenFilename, - "ControlPlaneCount": 1, - "NodePoolCount": 1, + "CseVersion": testConfig.Cse.CseVersion, + "Name": strings.ToLower(t.Name()), + "OvaCatalog": testConfig.Cse.OvaCatalog, + "OvaName": testConfig.Cse.OvaName, + "SolutionsOrg": testConfig.Cse.SolutionsOrg, + "TenantOrg": testConfig.Cse.TenantOrg, + "Vdc": testConfig.Cse.Vdc, + "EdgeGateway": testConfig.Cse.EdgeGateway, + "Network": testConfig.Cse.RoutedNetwork, + "TokenName": t.Name(), + "TokenFile": tokenFilename, + "ControlPlaneCount": 1, + "NodePoolCount": 1, + "ExtraWorkerPool": " ", + "AutoRepairOnErrors": true, + "NodeHealthCheck": true, } testParamsNotEmpty(t, params) step1 := templateFill(testAccVcdCseKubernetesCluster, params) params["FuncName"] = t.Name() + "Step2" - params["ControlPlaneCount"] = 2 + params["ControlPlaneCount"] = 3 step2 := templateFill(testAccVcdCseKubernetesCluster, params) params["FuncName"] = t.Name() + "Step3" @@ -59,6 +71,25 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { params["NodePoolCount"] = 2 step3 := templateFill(testAccVcdCseKubernetesCluster, params) + params["FuncName"] = t.Name() + "Step4" + params["ControlPlaneCount"] = 1 + params["NodePoolCount"] = 1 + params["NodeHealthCheck"] = false + step4 := templateFill(testAccVcdCseKubernetesCluster, params) + + extraWorkerPool := " worker_pool {\n" + + " name = \"worker-pool-2\"\n" + + " machine_count = 1\n" + + " disk_size_gi = 20\n" + + " sizing_policy_id = data.vcd_vm_sizing_policy.tkg_small.id\n" + + " storage_profile_id = data.vcd_storage_profile.sp.id\n" + + " }" + + params["FuncName"] = t.Name() + "Step5" + params["NodeHealthCheck"] = true + params["ExtraWorkerPool"] = extraWorkerPool + step5 := templateFill(testAccVcdCseKubernetesCluster, params) + if vcdShortTest { t.Skip(acceptanceTestsSkipped) return @@ -84,7 +115,41 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( cacheId.cacheTestResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttrSet(clusterName, "id"), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.CseVersion), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckNoResourceAttr(clusterName, "org"), // It is taken from Provider config + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + func() resource.TestCheckFunc { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 + if cseVersion.GreaterThanOrEqual(v411) { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") + } else { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") + } + }(), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + resource.TestCheckResourceAttrSet(clusterName, "kubernetes_version"), // TODO: Fine-grain? resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), ), @@ -105,6 +170,22 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), ), }, + { + Config: step4, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + ), + }, + { + Config: step5, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + ), + }, }, }) postTestChecks(t) @@ -167,7 +248,7 @@ resource "vcd_api_token" "token" { } resource "vcd_cse_kubernetes_cluster" "my_cluster" { - cse_version = "4.2.0" + cse_version = "{{.CseVersion}}" runtime = "tkg" name = "{{.Name}}" kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id @@ -191,6 +272,8 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { storage_profile_id = data.vcd_storage_profile.sp.id } + {{.ExtraWorkerPool}} + default_storage_class { name = "sc-1" storage_profile_id = data.vcd_storage_profile.sp.id @@ -198,8 +281,8 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { filesystem = "ext4" } - auto_repair_on_errors = true - node_health_check = true + auto_repair_on_errors = {{.AutoRepairOnErrors}} + node_health_check = {{.NodeHealthCheck}} operations_timeout_minutes = 0 } From fb085e69d4fd8c26820999443ba91aec7dda0a67 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 28 Feb 2024 17:39:19 +0100 Subject: [PATCH 103/156] Improve tests Signed-off-by: abarreiro --- vcd/config_test.go | 3 +- vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- ...esource_vcd_cse_kubernetes_cluster_test.go | 30 +++++++++++++------ 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/vcd/config_test.go b/vcd/config_test.go index 64c9c6837..09bb0c2fd 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -232,13 +232,12 @@ type TestConfig struct { } `json:"testEnvBuild"` EnvVariables map[string]string `json:"envVariables,omitempty"` Cse struct { - CseVersion string `json:"cseVersion,omitempty"` + Version string `json:"version,omitempty"` SolutionsOrg string `json:"solutionsOrg,omitempty"` TenantOrg string `json:"tenantOrg,omitempty"` Vdc string `json:"vdc,omitempty"` OvaCatalog string `json:"ovaCatalog,omitempty"` OvaName string `json:"ovaName,omitempty"` - CapVcdVersion string `json:"capVcdVersion,omitempty"` RoutedNetwork string `json:"routedNetwork,omitempty"` EdgeGateway string `json:"edgeGateway,omitempty"` } `json:"cse,omitempty"` diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index f99f5d0e1..884d13c26 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -627,7 +627,7 @@ func resourceVcdCseKubernetesDelete(_ context.Context, d *schema.ResourceData, m } return diag.FromErr(err) } - err = cluster.Delete(time.Duration(d.Get("operations_timeout_minutes").(int))) + err = cluster.Delete(time.Duration(d.Get("operations_timeout_minutes").(int)) * time.Minute) if err != nil { return diag.FromErr(err) } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 8aba3d745..c7a96ca00 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -8,18 +8,30 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "os" + "reflect" "strings" "testing" ) -func TestAccVcdCseKubernetesCluster(t *testing.T) { - preTestChecks(t) - +func requireCseConfig(t *testing.T, testConfig TestConfig) { + skippedPrefix := fmt.Sprintf("skipped %s because:", t.Name()) if cse := os.Getenv("TEST_VCD_CSE"); cse == "" { - t.Skip("CSE tests deactivated, skipping " + t.Name()) + t.Skipf("%s the environment variable TEST_VCD_CSE is not set", skippedPrefix) + } + cseConfigValues := reflect.ValueOf(testConfig.Cse) + cseConfigType := cseConfigValues.Type() + for i := 0; i < cseConfigValues.NumField(); i++ { + if cseConfigValues.Field(i).String() == "" { + t.Skipf("%s the config value '%s' inside 'cse' object of vcd_test_config.json is not set", skippedPrefix, cseConfigType.Field(i).Name) + } } +} + +func TestAccVcdCseKubernetesCluster(t *testing.T) { + preTestChecks(t) + requireCseConfig(t, testConfig) - cseVersion, err := semver.NewVersion(testConfig.Cse.CseVersion) + cseVersion, err := semver.NewVersion(testConfig.Cse.Version) if err != nil { t.Fatal(err) } @@ -41,7 +53,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { }() var params = StringMap{ - "CseVersion": testConfig.Cse.CseVersion, + "CseVersion": testConfig.Cse.Version, "Name": strings.ToLower(t.Name()), "OvaCatalog": testConfig.Cse.OvaCatalog, "OvaName": testConfig.Cse.OvaName, @@ -50,7 +62,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { "Vdc": testConfig.Cse.Vdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, - "TokenName": t.Name(), + "TokenName": t.Name() + "2", // FIXME: Remove suffix "TokenFile": tokenFilename, "ControlPlaneCount": 1, "NodePoolCount": 1, @@ -115,7 +127,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( cacheId.cacheTestResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttrSet(clusterName, "id"), - resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.CseVersion), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), @@ -284,6 +296,6 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { auto_repair_on_errors = {{.AutoRepairOnErrors}} node_health_check = {{.NodeHealthCheck}} - operations_timeout_minutes = 0 + operations_timeout_minutes = 150 } ` From 924e279d1f8f30170836b28b84cfacd75549381c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 28 Feb 2024 20:22:12 +0100 Subject: [PATCH 104/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index c7a96ca00..08b5e8682 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -131,10 +131,11 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), - resource.TestCheckNoResourceAttr(clusterName, "org"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "org", ""), // It is taken from Provider config resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), - resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "owner", ""), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), @@ -152,6 +153,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), func() resource.TestCheckFunc { // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 if cseVersion.GreaterThanOrEqual(v411) { From 1def8509e49a0aa149bd0cc1344a1b4624e08ec8 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 28 Feb 2024 22:39:01 +0100 Subject: [PATCH 105/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 08b5e8682..cb1360b78 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -131,7 +131,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), - resource.TestCheckResourceAttr(clusterName, "org", ""), // It is taken from Provider config + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), resource.TestCheckResourceAttr(clusterName, "owner", ""), // It is taken from Provider config From 1cd62742632778469ebe0fcc57aa8f7a62187bfe Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 29 Feb 2024 08:59:29 +0100 Subject: [PATCH 106/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index cb1360b78..b2dc90f55 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -134,7 +134,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), - resource.TestCheckResourceAttr(clusterName, "owner", ""), // It is taken from Provider config + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), From a62b5eee6419eab79edafed6121b147409d5dc90 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 29 Feb 2024 12:23:58 +0100 Subject: [PATCH 107/156] Fix versions Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 12 ++++++------ vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 ++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index 884d13c26..cbf1f781d 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -659,15 +659,15 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste var warnings []error dSet(d, "name", cluster.Name) - dSet(d, "cse_version", cluster.CseVersion.String()) + dSet(d, "cse_version", cluster.CseVersion.Original()) dSet(d, "runtime", "tkg") // Only one supported dSet(d, "vdc_id", cluster.VdcId) dSet(d, "network_id", cluster.NetworkId) - dSet(d, "cpi_version", cluster.CpiVersion.String()) - dSet(d, "csi_version", cluster.CsiVersion.String()) - dSet(d, "capvcd_version", cluster.CapvcdVersion.String()) - dSet(d, "kubernetes_version", cluster.KubernetesVersion.String()) - dSet(d, "tkg_product_version", cluster.TkgVersion.String()) + dSet(d, "cpi_version", cluster.CpiVersion.Original()) + dSet(d, "csi_version", cluster.CsiVersion.Original()) + dSet(d, "capvcd_version", cluster.CapvcdVersion.Original()) + dSet(d, "kubernetes_version", cluster.KubernetesVersion.Original()) + dSet(d, "tkg_product_version", cluster.TkgVersion.Original()) dSet(d, "pods_cidr", cluster.PodCidr) dSet(d, "services_cidr", cluster.ServiceCidr) dSet(d, "kubernetes_template_id", cluster.KubernetesTemplateOvaId) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index b2dc90f55..8801982e8 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -122,6 +122,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { return nil }, Steps: []resource.TestStep{ + // Basic scenario of cluster creation { Config: step1, Check: resource.ComposeAggregateTestCheckFunc( @@ -168,6 +169,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), ), }, + // Basic scenario of cluster creation { Config: step2, Check: resource.ComposeAggregateTestCheckFunc( From d7ec5d38d482ab54317b2187284237ac142e1b2d Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 29 Feb 2024 13:04:09 +0100 Subject: [PATCH 108/156] Finish test, needs to pass Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 323 +++++++++++++++++- 1 file changed, 310 insertions(+), 13 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 8801982e8..9bf3867ca 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -9,6 +9,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "os" "reflect" + "regexp" "strings" "testing" ) @@ -41,6 +42,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { t.Fatal(err) } + vcdClient := createSystemTemporaryVCDConnection() + tokenFilename := getCurrentDir() + t.Name() + ".json" defer func() { // Clean the API Token file @@ -57,6 +60,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { "Name": strings.ToLower(t.Name()), "OvaCatalog": testConfig.Cse.OvaCatalog, "OvaName": testConfig.Cse.OvaName, + "KubernetesOva": "data.vcd_catalog_vapp_template.tkg_ova.id", "SolutionsOrg": testConfig.Cse.SolutionsOrg, "TenantOrg": testConfig.Cse.TenantOrg, "Vdc": testConfig.Cse.Vdc, @@ -102,6 +106,11 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { params["ExtraWorkerPool"] = extraWorkerPool step5 := templateFill(testAccVcdCseKubernetesCluster, params) + params["FuncName"] = t.Name() + "Step6" + upgradeOvaId := "" + // This one is set dynamically by the Step6 pre-check itself + step6 := "" + if vcdShortTest { t.Skip(acceptanceTestsSkipped) return @@ -127,7 +136,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { Config: step1, Check: resource.ComposeAggregateTestCheckFunc( cacheId.cacheTestResourceFieldValue(clusterName, "id"), - resource.TestCheckResourceAttrSet(clusterName, "id"), + resource.TestMatchResourceAttr(clusterName, "id", regexp.MustCompile(`^urn:vcloud:entity:vmware:capvcdCluster:.+$`)), resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), @@ -164,42 +173,339 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } }(), resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), - resource.TestCheckResourceAttrSet(clusterName, "kubernetes_version"), // TODO: Fine-grain? + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, - // Basic scenario of cluster creation + // Scale the control plane to 3 replicas { Config: step2, Check: resource.ComposeAggregateTestCheckFunc( + // Control plane should change + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "3"), + + // Other things should remain the same resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + func() resource.TestCheckFunc { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 + if cseVersion.GreaterThanOrEqual(v411) { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") + } else { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") + } + }(), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, + // Scale down the control plane to 1 replica, scale up worker pool to 2 replicas { Config: step3, Check: resource.ComposeAggregateTestCheckFunc( + // Changed settings + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "2"), + + // Other things should remain the same resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + func() resource.TestCheckFunc { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 + if cseVersion.GreaterThanOrEqual(v411) { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") + } else { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") + } + }(), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, + // Scale down the worker pool to 1 replica, disable health check { Config: step4, Check: resource.ComposeAggregateTestCheckFunc( + // Changed settings + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "false"), + + // Other things should remain the same resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + func() resource.TestCheckFunc { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 + if cseVersion.GreaterThanOrEqual(v411) { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") + } else { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") + } + }(), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, + // Enable health check, add a worker pool { Config: step5, Check: resource.ComposeAggregateTestCheckFunc( + // The new worker pool should be present + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "2"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.name", "worker-pool-2"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + + // Other things should remain the same resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + func() resource.TestCheckFunc { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 + if cseVersion.GreaterThanOrEqual(v411) { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") + } else { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") + } + }(), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), + resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + ), + }, + // Enable health check, add a worker pool + { + SkipFunc: func() (bool, error) { + cluster, err := vcdClient.CseGetKubernetesClusterById(cacheId.fieldValue) + if err != nil { + return true, err + } + ovas, err := cluster.GetSupportedUpgrades(true) + if err != nil { + return true, err + } + if len(ovas) == 0 { + fmt.Println("Skipping cluster upgrade step as there are no available OVAs to upgrade") + return true, nil + } + return false, nil + }, + PreConfig: func() { + cluster, err := vcdClient.CseGetKubernetesClusterById(cacheId.fieldValue) + if err != nil { + t.Fatalf("failed pre-config of step 6: %s", err) + } + ovas, err := cluster.GetSupportedUpgrades(true) + if err != nil { + t.Fatalf("failed pre-config of step 6: %s", err) + } + if len(ovas) == 0 { + t.Fatalf("failed pre-config of step 6: there are no upgrade OVAs") + return + } + upgradeOvaId = ovas[0].ID + params["KubernetesOva"] = fmt.Sprintf("\"%s\"", upgradeOvaId) + step6 = templateFill(testAccVcdCseKubernetesCluster, params) + }, + Config: step6, + Check: resource.ComposeAggregateTestCheckFunc( + // The OVA should be updated + resource.TestCheckResourceAttr(clusterName, "kubernetes_template_id", upgradeOvaId), + + // Other things should remain the same + resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), + resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), + resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), + resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), + resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), + resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), + resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), + resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config + resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "2"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.machine_count", "1"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.name", "worker-pool-2"), + resource.TestCheckResourceAttr(clusterName, "worker_pool.1.disk_size_gi", "20"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), + resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), + resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), + resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), + resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), + resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), + resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), + func() resource.TestCheckFunc { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 + if cseVersion.GreaterThanOrEqual(v411) { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") + } else { + return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") + } + }(), + resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), + resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, }, @@ -207,15 +513,6 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { postTestChecks(t) } -// TODO: Test: -// Basic (DONE) -// Error with no auto-repair, then set auto-repair -// Upgrade v2.2.0-v1.25.7 to v2.4.0-v1.26.8 -// With machine health checks -// With machine health checks -// Without storage class -// With virtual IP and control plane IPs -// Nodes With vGPU policies const testAccVcdCseKubernetesCluster = ` # skip-binary-test - This one requires a very special setup @@ -267,7 +564,7 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { cse_version = "{{.CseVersion}}" runtime = "tkg" name = "{{.Name}}" - kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id + kubernetes_template_id = {{.KubernetesOva}} org = data.vcd_org_vdc.vdc.org vdc_id = data.vcd_org_vdc.vdc.id network_id = data.vcd_network_routed_v2.routed.id From 86b75bfd1a537f0833974ad3769924a47bc19252 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 29 Feb 2024 13:10:05 +0100 Subject: [PATCH 109/156] Finish test, needs to pass Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 9bf3867ca..54b48df12 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -448,7 +448,9 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { params["KubernetesOva"] = fmt.Sprintf("\"%s\"", upgradeOvaId) step6 = templateFill(testAccVcdCseKubernetesCluster, params) }, - Config: step6, + Config: func() string { + return step6 + }(), Check: resource.ComposeAggregateTestCheckFunc( // The OVA should be updated resource.TestCheckResourceAttr(clusterName, "kubernetes_template_id", upgradeOvaId), From 423e4c5eb7057083ca2861438ea5773042b66726 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 29 Feb 2024 13:12:09 +0100 Subject: [PATCH 110/156] Reverted test, needs to pass Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 68 ++----------------- 1 file changed, 6 insertions(+), 62 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 54b48df12..6d8055fa2 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -42,7 +42,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { t.Fatal(err) } - vcdClient := createSystemTemporaryVCDConnection() + // vcdClient := createSystemTemporaryVCDConnection() tokenFilename := getCurrentDir() + t.Name() + ".json" defer func() { @@ -107,9 +107,9 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { step5 := templateFill(testAccVcdCseKubernetesCluster, params) params["FuncName"] = t.Name() + "Step6" - upgradeOvaId := "" + //upgradeOvaId := "" // This one is set dynamically by the Step6 pre-check itself - step6 := "" + // step6 := "" if vcdShortTest { t.Skip(acceptanceTestsSkipped) @@ -415,7 +415,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { ), }, // Enable health check, add a worker pool - { + /*{ SkipFunc: func() (bool, error) { cluster, err := vcdClient.CseGetKubernetesClusterById(cacheId.fieldValue) if err != nil { @@ -448,68 +448,12 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { params["KubernetesOva"] = fmt.Sprintf("\"%s\"", upgradeOvaId) step6 = templateFill(testAccVcdCseKubernetesCluster, params) }, - Config: func() string { - return step6 - }(), + Config: step6, Check: resource.ComposeAggregateTestCheckFunc( // The OVA should be updated resource.TestCheckResourceAttr(clusterName, "kubernetes_template_id", upgradeOvaId), - - // Other things should remain the same - resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), - resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), - resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), - resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), - resource.TestCheckResourceAttrPair(clusterName, "kubernetes_template_id", "data.vcd_catalog_vapp_template.tkg_ova", "id"), - resource.TestCheckResourceAttrPair(clusterName, "org", "data.vcd_org_vdc.vdc", "org"), - resource.TestCheckResourceAttrPair(clusterName, "vdc_id", "data.vcd_org_vdc.vdc", "id"), - resource.TestCheckResourceAttrPair(clusterName, "network_id", "data.vcd_network_routed_v2.routed", "id"), - resource.TestCheckNoResourceAttr(clusterName, "owner"), // It is taken from Provider config - resource.TestCheckResourceAttr(clusterName, "ssh_public_key", ""), - resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "1"), - resource.TestCheckResourceAttr(clusterName, "control_plane.0.disk_size_gi", "20"), - resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), - resource.TestCheckResourceAttrPair(clusterName, "control_plane.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), - resource.TestCheckResourceAttrSet(clusterName, "control_plane.0.ip"), // IP should be assigned after creation as it was not set manually in HCL config - resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "1"), - resource.TestCheckResourceAttr(clusterName, "worker_pool.0.name", "worker-pool-1"), - resource.TestCheckResourceAttr(clusterName, "worker_pool.0.disk_size_gi", "20"), - resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), - resource.TestCheckResourceAttrPair(clusterName, "worker_pool.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), - resource.TestCheckResourceAttr(clusterName, "worker_pool.#", "2"), - resource.TestCheckResourceAttr(clusterName, "worker_pool.1.machine_count", "1"), - resource.TestCheckResourceAttr(clusterName, "worker_pool.1.name", "worker-pool-2"), - resource.TestCheckResourceAttr(clusterName, "worker_pool.1.disk_size_gi", "20"), - resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.sizing_policy_id", "data.vcd_vm_sizing_policy.tkg_small", "id"), - resource.TestCheckResourceAttrPair(clusterName, "worker_pool.1.storage_profile_id", "data.vcd_storage_profile.sp", "id"), - resource.TestCheckResourceAttrPair(clusterName, "default_storage_class.0.storage_profile_id", "data.vcd_storage_profile.sp", "id"), - resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.name", "sc-1"), - resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.reclaim_policy", "delete"), - resource.TestCheckResourceAttr(clusterName, "default_storage_class.0.filesystem", "ext4"), - resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), - resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), - resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), - resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), - func() resource.TestCheckFunc { - // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 - if cseVersion.GreaterThanOrEqual(v411) { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") - } else { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") - } - }(), - resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), - resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), - resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), - resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), - resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), - }, + },*/ }, }) postTestChecks(t) From 3169b78086f2a7b8a4983c800cdfcfba89bab4bc Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 29 Feb 2024 20:57:11 +0100 Subject: [PATCH 111/156] Fix test Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 25 ++++++++----------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 6d8055fa2..4301d7a40 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -177,9 +177,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), - resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), @@ -233,9 +232,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), - resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), @@ -289,9 +287,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), - resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), @@ -345,9 +342,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), - resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), @@ -406,9 +402,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "cluster_resource_set_bindings.#", regexp.MustCompile(`^[1-9][0-9]*$`)), - resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), - resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "cpi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), + resource.TestMatchResourceAttr(clusterName, "csi_version", regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestCheckResourceAttr(clusterName, "state", "provisioned"), resource.TestCheckResourceAttrSet(clusterName, "kubeconfig"), resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), From 671c9007070369f040d61ce18f3a9d5b5f93cb0f Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 1 Mar 2024 10:29:10 +0100 Subject: [PATCH 112/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 4301d7a40..b3507d8c7 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -192,7 +192,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "control_plane.0.machine_count", "3"), // Other things should remain the same - resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), @@ -248,7 +248,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "worker_pool.0.machine_count", "2"), // Other things should remain the same - resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), @@ -303,7 +303,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "node_health_check", "false"), // Other things should remain the same - resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), @@ -363,7 +363,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), // Other things should remain the same - resource.TestCheckResourceAttr(clusterName, "id", cacheId.fieldValue), + cacheId.testCheckCachedResourceFieldValue(clusterName, "id"), resource.TestCheckResourceAttr(clusterName, "cse_version", testConfig.Cse.Version), resource.TestCheckResourceAttr(clusterName, "runtime", "tkg"), resource.TestCheckResourceAttr(clusterName, "name", strings.ToLower(t.Name())), From b6481de1e52655265b93960225007bb6e38b0a57 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 1 Mar 2024 13:42:23 +0100 Subject: [PATCH 113/156] Bump sdk Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 40262ba1d..181f4ff7e 100644 --- a/go.mod +++ b/go.mod @@ -69,4 +69,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228145626-02609cc2fb05 +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240301112016-5018bed64835 diff --git a/go.sum b/go.sum index faeda84e2..b97f6ede6 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228145626-02609cc2fb05 h1:tdQ1EP5Ce7U/57rInHEFtIzyB21kkoD3XkwYoge7ESo= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240228145626-02609cc2fb05/go.mod h1:pgUXGxY6077Y8nIc91jURV9Il4D04tZkqhT2gBDWq0w= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240301112016-5018bed64835 h1:p/yhoIBfgediKP8lff1XgmFy3rM+/48keDR+nzdEJV4= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240301112016-5018bed64835/go.mod h1:pgUXGxY6077Y8nIc91jURV9Il4D04tZkqhT2gBDWq0w= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= From 4df2b6e2e5ff1e279be16566a135072c09913316 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 1 Mar 2024 14:53:19 +0100 Subject: [PATCH 114/156] Add import test Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 53 +++---------------- 1 file changed, 7 insertions(+), 46 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index b3507d8c7..f4c356193 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -42,8 +42,6 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { t.Fatal(err) } - // vcdClient := createSystemTemporaryVCDConnection() - tokenFilename := getCurrentDir() + t.Name() + ".json" defer func() { // Clean the API Token file @@ -106,11 +104,6 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { params["ExtraWorkerPool"] = extraWorkerPool step5 := templateFill(testAccVcdCseKubernetesCluster, params) - params["FuncName"] = t.Name() + "Step6" - //upgradeOvaId := "" - // This one is set dynamically by the Step6 pre-check itself - // step6 := "" - if vcdShortTest { t.Skip(acceptanceTestsSkipped) return @@ -409,46 +402,14 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, - // Enable health check, add a worker pool - /*{ - SkipFunc: func() (bool, error) { - cluster, err := vcdClient.CseGetKubernetesClusterById(cacheId.fieldValue) - if err != nil { - return true, err - } - ovas, err := cluster.GetSupportedUpgrades(true) - if err != nil { - return true, err - } - if len(ovas) == 0 { - fmt.Println("Skipping cluster upgrade step as there are no available OVAs to upgrade") - return true, nil - } - return false, nil - }, - PreConfig: func() { - cluster, err := vcdClient.CseGetKubernetesClusterById(cacheId.fieldValue) - if err != nil { - t.Fatalf("failed pre-config of step 6: %s", err) - } - ovas, err := cluster.GetSupportedUpgrades(true) - if err != nil { - t.Fatalf("failed pre-config of step 6: %s", err) - } - if len(ovas) == 0 { - t.Fatalf("failed pre-config of step 6: there are no upgrade OVAs") - return - } - upgradeOvaId = ovas[0].ID - params["KubernetesOva"] = fmt.Sprintf("\"%s\"", upgradeOvaId) - step6 = templateFill(testAccVcdCseKubernetesCluster, params) + { + ResourceName: clusterName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: func(state *terraform.State) (string, error) { + return cacheId.fieldValue, nil }, - Config: step6, - Check: resource.ComposeAggregateTestCheckFunc( - // The OVA should be updated - resource.TestCheckResourceAttr(clusterName, "kubernetes_template_id", upgradeOvaId), - ), - },*/ + }, }, }) postTestChecks(t) From a1392ec15e7d5a88fd43bd3307fed6886ff7b9a1 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 1 Mar 2024 20:31:25 +0100 Subject: [PATCH 115/156] Add import test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index f4c356193..00b682556 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -409,6 +409,11 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { ImportStateIdFunc: func(state *terraform.State) (string, error) { return cacheId.fieldValue, nil }, + // Ignore api_token_file and operations_timeout_minutes as these are not computed from VCD, so they are missing + // after any successful import. + // Ignore also owner and org as these may not be set in the resource configuration, but they are always + // computed. + ImportStateVerifyIgnore: []string{"api_token_file", "operations_timeout_minutes", "owner", "org"}, }, }, }) From 23bc4464e1087e1a83d49a19dd4ef883fea72247 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 15:13:32 +0100 Subject: [PATCH 116/156] Fix test Signed-off-by: abarreiro --- vcd/datasource_vcd_version.go | 2 +- vcd/datasource_vcd_version_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vcd/datasource_vcd_version.go b/vcd/datasource_vcd_version.go index 19f5df6c8..5b089c3c9 100644 --- a/vcd/datasource_vcd_version.go +++ b/vcd/datasource_vcd_version.go @@ -74,6 +74,6 @@ func datasourceVcdVersionRead(_ context.Context, d *schema.ResourceData, meta in } // The ID is artificial, and we try to identify each data source instance unequivocally through its parameters. - d.SetId(fmt.Sprintf("vcd_version=%s,condition=%s,fail_if_not_match=%t", vcdVersion, d.Get("condition"), d.Get("fail_if_not_match"))) + d.SetId(fmt.Sprintf("vcd_version='%s',condition='%s',fail_if_not_match='%t'", vcdVersion, d.Get("condition"), d.Get("fail_if_not_match"))) return nil } diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go index 08cb2e03d..f0fadfca4 100644 --- a/vcd/datasource_vcd_version_test.go +++ b/vcd/datasource_vcd_version_test.go @@ -33,11 +33,11 @@ func TestAccVcdVersion(t *testing.T) { step1 := templateFill(testAccVcdVersion, params) - params["FuncName"] = params["FuncName"].(string) + "-step2" + params["FuncName"] = t.Name() + "-step2" params["FailIfNotMatch"] = "true" step2 := templateFill(testAccVcdVersion, params) - params["FuncName"] = params["FuncName"].(string) + "-step3" + params["FuncName"] = t.Name() + "-step3" params["Condition"] = "= " + currentVersion step3 := templateFill(testAccVcdVersion, params) @@ -53,7 +53,7 @@ func TestAccVcdVersion(t *testing.T) { { Config: step1, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version=%s,condition=%s,fail_if_not_match=false", currentVersion, params["Condition"])), + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='>= 99.99.99',fail_if_not_match='false'", currentVersion)), resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "false"), @@ -66,7 +66,7 @@ func TestAccVcdVersion(t *testing.T) { { Config: step3, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version=%s,condition=%s,fail_if_not_match=true", currentVersion, params["Condition"])), + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='= %s',fail_if_not_match='true'", currentVersion, currentVersion)), resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), From dfe18bc42af02bd3bbab04a7310b4305e5ae7f39 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 15:29:48 +0100 Subject: [PATCH 117/156] Improve test Signed-off-by: abarreiro --- vcd/datasource_vcd_version_test.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go index f0fadfca4..67c818797 100644 --- a/vcd/datasource_vcd_version_test.go +++ b/vcd/datasource_vcd_version_test.go @@ -41,11 +41,19 @@ func TestAccVcdVersion(t *testing.T) { params["Condition"] = "= " + currentVersion step3 := templateFill(testAccVcdVersion, params) + params["FuncName"] = t.Name() + "-step4" + params["Condition"] = " " // Not used, but illustrates the point of this check + params["FailIfNotMatch"] = " " + step4 := templateFill(testAccVcdVersionWithoutArguments, params) + if vcdShortTest { t.Skip(acceptanceTestsSkipped) return } debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) + debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) + debugPrintf("#[DEBUG] CONFIGURATION step3: %s", step3) + debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) resource.ParallelTest(t, resource.TestCase{ ProviderFactories: testAccProviders, @@ -72,6 +80,15 @@ func TestAccVcdVersion(t *testing.T) { resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), ), }, + { + Config: step4, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='',fail_if_not_match='false'", currentVersion)), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckNoResourceAttr("data.vcd_version.version", "matches_condition"), + ), + }, }, }) postTestChecks(t) @@ -83,3 +100,8 @@ data "vcd_version" "version" { fail_if_not_match = {{.FailIfNotMatch}} } ` + +const testAccVcdVersionWithoutArguments = ` +data "vcd_version" "version" { +} +` From 1ef77274052cb8571d08c2dc824500f8c2375d7b Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 16:16:45 +0100 Subject: [PATCH 118/156] Add new test, still need to pass Signed-off-by: abarreiro --- .../3.11-cse-install-1-provider-config.tf | 2 +- ...esource_vcd_cse_kubernetes_cluster_test.go | 90 ++++++++++++++++++- 2 files changed, 89 insertions(+), 3 deletions(-) diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf index d522b0025..9f7db5632 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf @@ -43,4 +43,4 @@ data "vcd_version" "cse_minimum_supported" { data "vcd_version" "gte_1051" { condition = ">= 10.5.1" fail_if_not_match = false -} \ No newline at end of file +} diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 00b682556..a9e845682 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -64,13 +64,16 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { "Vdc": testConfig.Cse.Vdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, - "TokenName": t.Name() + "2", // FIXME: Remove suffix + "TokenName": t.Name(), "TokenFile": tokenFilename, "ControlPlaneCount": 1, "NodePoolCount": 1, "ExtraWorkerPool": " ", + "PodsCidr": "100.96.0.0/11", + "ServicesCidr": "100.64.0.0/13", "AutoRepairOnErrors": true, "NodeHealthCheck": true, + "Timeout": 150, } testParamsNotEmpty(t, params) @@ -420,6 +423,86 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { postTestChecks(t) } +func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { + preTestChecks(t) + requireCseConfig(t, testConfig) + + vcdClient := createSystemTemporaryVCDConnection() + + cseVersion, err := semver.NewVersion(testConfig.Cse.Version) + if err != nil { + t.Fatal(err) + } + + tokenFilename := getCurrentDir() + t.Name() + ".json" + defer func() { + // Clean the API Token file + if fileExists(tokenFilename) { + err := os.Remove(tokenFilename) + if err != nil { + fmt.Printf("could not delete API token file '%s', please delete it manually", tokenFilename) + } + } + }() + + clusterName := strings.ToLower(t.Name()) + + var params = StringMap{ + "CseVersion": testConfig.Cse.Version, + "Name": clusterName, + "OvaCatalog": testConfig.Cse.OvaCatalog, + "OvaName": testConfig.Cse.OvaName, + "KubernetesOva": "data.vcd_catalog_vapp_template.tkg_ova.id", + "SolutionsOrg": testConfig.Cse.SolutionsOrg, + "TenantOrg": testConfig.Cse.TenantOrg, + "Vdc": testConfig.Cse.Vdc, + "EdgeGateway": testConfig.Cse.EdgeGateway, + "Network": testConfig.Cse.RoutedNetwork, + "TokenName": t.Name(), + "TokenFile": tokenFilename, + "ControlPlaneCount": 1, + "NodePoolCount": 1, + "ExtraWorkerPool": " ", + "PodsCidr": "1.2.3.4/24", // This will make the cluster to fail + "ServicesCidr": "5.6.7.8/24", // This will make the cluster to fail + "AutoRepairOnErrors": true, + "NodeHealthCheck": true, + "Timeout": 150, + } + testParamsNotEmpty(t, params) + + step1 := templateFill(testAccVcdCseKubernetesCluster, params) + + if vcdShortTest { + t.Skip(acceptanceTestsSkipped) + return + } + resource.Test(t, resource.TestCase{ + ProviderFactories: testAccProviders, + CheckDestroy: func(state *terraform.State) error { + org, err := vcdClient.GetOrgByName(testConfig.Cse.TenantOrg) + if err != nil { + return fmt.Errorf("could not check cluster deletion: %s", err) + } + clusters, err := org.CseGetKubernetesClustersByName(*cseVersion, clusterName) + if err != nil { + return fmt.Errorf("could not check cluster deletion: %s", err) + } + if len(clusters) != 0 { + return fmt.Errorf("there are still %d clusters with name '%s': %s", len(clusters), clusterName, err) + } + return nil + }, + Steps: []resource.TestStep{ + { + Config: step1, + ExpectError: regexp.MustCompile("asd"), + }, + }, + }) + postTestChecks(t) +} + const testAccVcdCseKubernetesCluster = ` # skip-binary-test - This one requires a very special setup @@ -501,9 +584,12 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { filesystem = "ext4" } + pods_cidr = "{{.PodsCidr}}" + services_cidr = "{{.ServicesCidr}}" + auto_repair_on_errors = {{.AutoRepairOnErrors}} node_health_check = {{.NodeHealthCheck}} - operations_timeout_minutes = 150 + operations_timeout_minutes = {{.Timeout}} } ` From f8b959abbba7fcb5b9251e0daca712e1d31b0613 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 17:26:06 +0100 Subject: [PATCH 119/156] Self-review Signed-off-by: abarreiro --- .../install/step2/3.11-cse-install-4-provider-config.tf | 2 +- .../install/step1/3.11-cse-install-1-provider-config.tf | 2 +- .../install/step2/3.11-cse-install-4-provider-config.tf | 2 +- vcd/datasource_vcd_version_test.go | 8 ++++---- vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 8 +++++++- 6 files changed, 15 insertions(+), 9 deletions(-) diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf index 794613fb3..af28b8d44 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf @@ -45,4 +45,4 @@ provider "vcd" { data "vcd_version" "cse_minimum_supported" { condition = ">= 10.4.2" fail_if_not_match = true -} \ No newline at end of file +} diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf index 3df620368..11c4b56bd 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf @@ -43,4 +43,4 @@ data "vcd_version" "cse_minimum_supported" { data "vcd_version" "gte_1051" { condition = ">= 10.5.1" fail_if_not_match = false -} \ No newline at end of file +} diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf index 4fe844023..0184ef03d 100644 --- a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf @@ -45,4 +45,4 @@ provider "vcd" { data "vcd_version" "cse_minimum_supported" { condition = ">= 10.4.2" fail_if_not_match = true -} \ No newline at end of file +} diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go index 67c818797..c04b6962a 100644 --- a/vcd/datasource_vcd_version_test.go +++ b/vcd/datasource_vcd_version_test.go @@ -32,28 +32,28 @@ func TestAccVcdVersion(t *testing.T) { testParamsNotEmpty(t, params) step1 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) params["FuncName"] = t.Name() + "-step2" params["FailIfNotMatch"] = "true" step2 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) params["FuncName"] = t.Name() + "-step3" params["Condition"] = "= " + currentVersion step3 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step3: %s", step3) params["FuncName"] = t.Name() + "-step4" params["Condition"] = " " // Not used, but illustrates the point of this check params["FailIfNotMatch"] = " " step4 := templateFill(testAccVcdVersionWithoutArguments, params) + debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) if vcdShortTest { t.Skip(acceptanceTestsSkipped) return } - debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) - debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) - debugPrintf("#[DEBUG] CONFIGURATION step3: %s", step3) - debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) resource.ParallelTest(t, resource.TestCase{ ProviderFactories: testAccProviders, diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index cbf1f781d..dd8a52d7f 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -479,7 +479,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour d.SetId(cluster.ID) if cluster.State != "provisioned" { - return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state: '%s'", err) + return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state (it ended in '%s' state): %s", cluster.State, err) } return resourceVcdCseKubernetesRead(ctx, d, meta) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index a9e845682..61563c3c4 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -78,21 +78,25 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { testParamsNotEmpty(t, params) step1 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) params["FuncName"] = t.Name() + "Step2" params["ControlPlaneCount"] = 3 step2 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) params["FuncName"] = t.Name() + "Step3" params["ControlPlaneCount"] = 1 params["NodePoolCount"] = 2 step3 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step3: %s", step3) params["FuncName"] = t.Name() + "Step4" params["ControlPlaneCount"] = 1 params["NodePoolCount"] = 1 params["NodeHealthCheck"] = false step4 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) extraWorkerPool := " worker_pool {\n" + " name = \"worker-pool-2\"\n" + @@ -106,6 +110,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { params["NodeHealthCheck"] = true params["ExtraWorkerPool"] = extraWorkerPool step5 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step5: %s", step5) if vcdShortTest { t.Skip(acceptanceTestsSkipped) @@ -472,6 +477,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { testParamsNotEmpty(t, params) step1 := templateFill(testAccVcdCseKubernetesCluster, params) + debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) if vcdShortTest { t.Skip(acceptanceTestsSkipped) @@ -496,7 +502,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { Steps: []resource.TestStep{ { Config: step1, - ExpectError: regexp.MustCompile("asd"), + ExpectError: regexp.MustCompile(`Kubernetes cluster creation finished, but it is not in 'provisioned' state \(it ended in 'error' state\)`), }, }, }) From 81fac154b7d934f090e76bf89af27ea5d7531b8f Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 17:35:19 +0100 Subject: [PATCH 120/156] Self-review Signed-off-by: abarreiro --- vcd/sample_vcd_test_config.json | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/vcd/sample_vcd_test_config.json b/vcd/sample_vcd_test_config.json index 56cc737b2..da6bbc0e5 100644 --- a/vcd/sample_vcd_test_config.json +++ b/vcd/sample_vcd_test_config.json @@ -226,11 +226,24 @@ "VCD_ADD_PROVIDER": "", "REMOVE_ORG_VDC_FROM_TEMPLATE": "" }, + "cse": { + "version": "4.1.0", + "solutionsOrg": "solutions_org", + "tenantOrg": "tenant_org", + "vdc": "tenant_vdc", + "routedNetwork": "tenant_net_routed", + "edgeGateway": "tenant_edgegateway", + "ovaCatalog": "tkgm_catalog", + "ovaName": "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc" + }, + "cse" :{ "//" : "Only needed to test Container Service Extension specific resources", + "version": "4.2.0", "solutionsOrg" : "solutions_org", + "storageProfile": "*", "tenantOrg" : "tenant_org", - "vdc": "tenant_vdc", + "tenantVdc": "tenant_vdc", "routedNetwork": "tenant_net_routed", "edgeGateway": "tenant_edgegateway", "ovaCatalog": "tkgm_catalog", From edb317019af08b7c1188882e601b37664420cf08 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 17:38:03 +0100 Subject: [PATCH 121/156] Self-review Signed-off-by: abarreiro --- vcd/config_test.go | 17 +++++++++-------- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 4 ++-- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/vcd/config_test.go b/vcd/config_test.go index 09bb0c2fd..13cb52e4e 100644 --- a/vcd/config_test.go +++ b/vcd/config_test.go @@ -232,14 +232,15 @@ type TestConfig struct { } `json:"testEnvBuild"` EnvVariables map[string]string `json:"envVariables,omitempty"` Cse struct { - Version string `json:"version,omitempty"` - SolutionsOrg string `json:"solutionsOrg,omitempty"` - TenantOrg string `json:"tenantOrg,omitempty"` - Vdc string `json:"vdc,omitempty"` - OvaCatalog string `json:"ovaCatalog,omitempty"` - OvaName string `json:"ovaName,omitempty"` - RoutedNetwork string `json:"routedNetwork,omitempty"` - EdgeGateway string `json:"edgeGateway,omitempty"` + Version string `json:"version,omitempty"` + StorageProfile string `json:"storageProfile,omitempty"` + SolutionsOrg string `json:"solutionsOrg,omitempty"` + TenantOrg string `json:"tenantOrg,omitempty"` + TenantVdc string `json:"tenantVdc,omitempty"` + OvaCatalog string `json:"ovaCatalog,omitempty"` + OvaName string `json:"ovaName,omitempty"` + RoutedNetwork string `json:"routedNetwork,omitempty"` + EdgeGateway string `json:"edgeGateway,omitempty"` } `json:"cse,omitempty"` } diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 61563c3c4..5bc67ee96 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -61,7 +61,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { "KubernetesOva": "data.vcd_catalog_vapp_template.tkg_ova.id", "SolutionsOrg": testConfig.Cse.SolutionsOrg, "TenantOrg": testConfig.Cse.TenantOrg, - "Vdc": testConfig.Cse.Vdc, + "Vdc": testConfig.Cse.TenantVdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, "TokenName": t.Name(), @@ -460,7 +460,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { "KubernetesOva": "data.vcd_catalog_vapp_template.tkg_ova.id", "SolutionsOrg": testConfig.Cse.SolutionsOrg, "TenantOrg": testConfig.Cse.TenantOrg, - "Vdc": testConfig.Cse.Vdc, + "Vdc": testConfig.Cse.TenantVdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, "TokenName": t.Name(), From dd0bb5f5de1fdc4c9f7bd4b068d039470880aa66 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 17:57:12 +0100 Subject: [PATCH 122/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 5bc67ee96..41abff035 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -450,7 +450,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { } }() - clusterName := strings.ToLower(t.Name()) + clusterName := "cseKubernetesClusterFailure" // We can't use the test name as it is too long var params = StringMap{ "CseVersion": testConfig.Cse.Version, From f7187ddf037bf2c09a5e5c8a65ecce9c7cd020b1 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 17:58:52 +0100 Subject: [PATCH 123/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 41abff035..9a6d9eaab 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -450,7 +450,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { } }() - clusterName := "cseKubernetesClusterFailure" // We can't use the test name as it is too long + clusterName := "cse-k8s-cluster-failure" // We can't use the test name as it is too long var params = StringMap{ "CseVersion": testConfig.Cse.Version, From d03d56477fa1a169650c4093a8a807d88b6648e5 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 18:13:39 +0100 Subject: [PATCH 124/156] fmt hcl Signed-off-by: abarreiro --- vcdTestAccVcdCseKubernetesClusterFailure.json | 6 ++++++ website/docs/d/cse_kubernetes_cluster.html.markdown | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 vcdTestAccVcdCseKubernetesClusterFailure.json diff --git a/vcdTestAccVcdCseKubernetesClusterFailure.json b/vcdTestAccVcdCseKubernetesClusterFailure.json new file mode 100644 index 000000000..c362d0eca --- /dev/null +++ b/vcdTestAccVcdCseKubernetesClusterFailure.json @@ -0,0 +1,6 @@ +{ + "token_type": "API Token", + "refresh_token": "BawuQJ551Ips0Ze72jCwsaut8TzkB3oa", + "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", + "updated_on": "2024-03-04T17:59:35+01:00" + } \ No newline at end of file diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index cd0007920..012d85e1d 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -27,7 +27,7 @@ the desired cluster and obtaining the ID from the displayed information. ```hcl data "vcd_cse_kubernetes_cluster" "my_cluster" { - cluster_id = "urn:vcloud:entity:vmware:capvcdCluster:e8e82bcc-50a1-484f-9dd0-20965ab3e865" + cluster_id = "urn:vcloud:entity:vmware:capvcdCluster:e8e82bcc-50a1-484f-9dd0-20965ab3e865" } ``` From 775ab2945b4f3259bc5954b6fc9ff9e5caa3f399 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Mon, 4 Mar 2024 18:13:52 +0100 Subject: [PATCH 125/156] fmt hcl Signed-off-by: abarreiro --- vcdTestAccVcdCseKubernetesClusterFailure.json | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 vcdTestAccVcdCseKubernetesClusterFailure.json diff --git a/vcdTestAccVcdCseKubernetesClusterFailure.json b/vcdTestAccVcdCseKubernetesClusterFailure.json deleted file mode 100644 index c362d0eca..000000000 --- a/vcdTestAccVcdCseKubernetesClusterFailure.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "token_type": "API Token", - "refresh_token": "BawuQJ551Ips0Ze72jCwsaut8TzkB3oa", - "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", - "updated_on": "2024-03-04T17:59:35+01:00" - } \ No newline at end of file From 5c6092eaf1db1e9b2d45f0410595ea73f9b87354 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 10:05:15 +0100 Subject: [PATCH 126/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 9a6d9eaab..64590ca0b 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -470,8 +470,8 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { "ExtraWorkerPool": " ", "PodsCidr": "1.2.3.4/24", // This will make the cluster to fail "ServicesCidr": "5.6.7.8/24", // This will make the cluster to fail - "AutoRepairOnErrors": true, - "NodeHealthCheck": true, + "AutoRepairOnErrors": false, + "NodeHealthCheck": false, "Timeout": 150, } testParamsNotEmpty(t, params) From adde707739fe13726e10337862b4b5413b3bbd90 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 10:11:41 +0100 Subject: [PATCH 127/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index dd8a52d7f..a0efec801 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -479,7 +479,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour d.SetId(cluster.ID) if cluster.State != "provisioned" { - return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state (it ended in '%s' state): %s", cluster.State, err) + return diag.Errorf("Kubernetes cluster creation finished, but it is not in 'provisioned' state: %s", err) } return resourceVcdCseKubernetesRead(ctx, d, meta) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 64590ca0b..5e8e32aa4 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -502,7 +502,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { Steps: []resource.TestStep{ { Config: step1, - ExpectError: regexp.MustCompile(`Kubernetes cluster creation finished, but it is not in 'provisioned' state \(it ended in 'error' state\)`), + ExpectError: regexp.MustCompile(`Kubernetes cluster creation finished, but it is not in 'provisioned' state`), }, }, }) From 3812cb2aec1a8da8783a8285b899c7e66bd7f3e0 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 10:49:18 +0100 Subject: [PATCH 128/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- vcdTestAccVcdCseKubernetesClusterFailure.json | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 vcdTestAccVcdCseKubernetesClusterFailure.json diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 5e8e32aa4..df8b29e76 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -470,7 +470,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { "ExtraWorkerPool": " ", "PodsCidr": "1.2.3.4/24", // This will make the cluster to fail "ServicesCidr": "5.6.7.8/24", // This will make the cluster to fail - "AutoRepairOnErrors": false, + "AutoRepairOnErrors": false, // This must be false "NodeHealthCheck": false, "Timeout": 150, } diff --git a/vcdTestAccVcdCseKubernetesClusterFailure.json b/vcdTestAccVcdCseKubernetesClusterFailure.json new file mode 100644 index 000000000..d5da336ec --- /dev/null +++ b/vcdTestAccVcdCseKubernetesClusterFailure.json @@ -0,0 +1,6 @@ +{ + "token_type": "API Token", + "refresh_token": "UWJ7WNTKouqG6LyONo8FfFL1NFlPSbhy", + "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", + "updated_on": "2024-03-05T10:46:01+01:00" + } \ No newline at end of file From 01ecb5032de981d76b2a6291944e7a6a53a80b90 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 11:00:29 +0100 Subject: [PATCH 129/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 9 +++++---- vcdTestAccVcdCseKubernetesClusterFailure.json | 6 ------ 2 files changed, 5 insertions(+), 10 deletions(-) delete mode 100644 vcdTestAccVcdCseKubernetesClusterFailure.json diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index df8b29e76..c4da5ac74 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -7,6 +7,7 @@ import ( semver "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/vmware/go-vcloud-director/v2/govcd" "os" "reflect" "regexp" @@ -491,13 +492,13 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { return fmt.Errorf("could not check cluster deletion: %s", err) } clusters, err := org.CseGetKubernetesClustersByName(*cseVersion, clusterName) - if err != nil { + if err != nil && !govcd.IsNotFound(err) { return fmt.Errorf("could not check cluster deletion: %s", err) } - if len(clusters) != 0 { - return fmt.Errorf("there are still %d clusters with name '%s': %s", len(clusters), clusterName, err) + if len(clusters) == 0 || govcd.IsNotFound(err) { + return nil } - return nil + return fmt.Errorf("there are still %d clusters with name '%s': %s", len(clusters), clusterName, err) }, Steps: []resource.TestStep{ { diff --git a/vcdTestAccVcdCseKubernetesClusterFailure.json b/vcdTestAccVcdCseKubernetesClusterFailure.json deleted file mode 100644 index d5da336ec..000000000 --- a/vcdTestAccVcdCseKubernetesClusterFailure.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "token_type": "API Token", - "refresh_token": "UWJ7WNTKouqG6LyONo8FfFL1NFlPSbhy", - "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", - "updated_on": "2024-03-05T10:46:01+01:00" - } \ No newline at end of file From 193bbf400c239dc6022a9c858baef50d332b0167 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 11:46:27 +0100 Subject: [PATCH 130/156] Add DS tests Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 35 +++++++++++++++++-- vcdTestAccVcdCseKubernetesClusterFailure.json | 6 ++++ 2 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 vcdTestAccVcdCseKubernetesClusterFailure.json diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index c4da5ac74..df7524ddf 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -113,6 +113,10 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { step5 := templateFill(testAccVcdCseKubernetesCluster, params) debugPrintf("#[DEBUG] CONFIGURATION step5: %s", step5) + params["FuncName"] = t.Name() + "Step6" + step6 := templateFill(testAccVcdCseKubernetesCluster+testAccVcdCseKubernetesClusterDS, params) + debugPrintf("#[DEBUG] CONFIGURATION step6: %s", step5) + if vcdShortTest { t.Skip(acceptanceTestsSkipped) return @@ -411,6 +415,15 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, + // Test data sources + { + Config: step6, + Check: resource.ComposeAggregateTestCheckFunc( + resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_id_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file"}), + resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_name_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file"}), + resourceFieldsEqual("data.vcd_cse_kubernetes_cluster.with_id_ds", "data.vcd_cse_kubernetes_cluster.with_name_ds", nil), + ), + }, { ResourceName: clusterName, ImportState: true, @@ -421,7 +434,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { // Ignore api_token_file and operations_timeout_minutes as these are not computed from VCD, so they are missing // after any successful import. // Ignore also owner and org as these may not be set in the resource configuration, but they are always - // computed. + // set on imports. ImportStateVerifyIgnore: []string{"api_token_file", "operations_timeout_minutes", "owner", "org"}, }, }, @@ -464,7 +477,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { "Vdc": testConfig.Cse.TenantVdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, - "TokenName": t.Name(), + "TokenName": t.Name() + "3", "TokenFile": tokenFilename, "ControlPlaneCount": 1, "NodePoolCount": 1, @@ -600,3 +613,21 @@ resource "vcd_cse_kubernetes_cluster" "my_cluster" { operations_timeout_minutes = {{.Timeout}} } ` + +const testAccVcdCseKubernetesClusterDS = ` +# skip-binary-test - This one requires a very special setup + +data "vcd_org" "tenant_org" { + name = "tenant_org" +} + +data "vcd_cse_kubernetes_cluster" "with_id_ds" { + cluster_id = vcd_cse_kubernetes_cluster.my_cluster.id +} + +data "vcd_cse_kubernetes_cluster" "with_name_ds" { + org_id = data.vcd_org.tenant_org.id + cse_version = vcd_cse_kubernetes_cluster.my_cluster.cse_version + name = vcd_cse_kubernetes_cluster.my_cluster.name +} +` diff --git a/vcdTestAccVcdCseKubernetesClusterFailure.json b/vcdTestAccVcdCseKubernetesClusterFailure.json new file mode 100644 index 000000000..c750d11f9 --- /dev/null +++ b/vcdTestAccVcdCseKubernetesClusterFailure.json @@ -0,0 +1,6 @@ +{ + "token_type": "API Token", + "refresh_token": "twVlMN5TTYeoqWtS2yl6E5lFCx3yMDAp", + "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", + "updated_on": "2024-03-05T11:01:34+01:00" + } \ No newline at end of file From 8d4ac4140e21b8efe8a5d157da8c479bfe3b573e Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 11:51:00 +0100 Subject: [PATCH 131/156] # Signed-off-by: abarreiro --- vcdTestAccVcdCseKubernetesClusterFailure.json | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 vcdTestAccVcdCseKubernetesClusterFailure.json diff --git a/vcdTestAccVcdCseKubernetesClusterFailure.json b/vcdTestAccVcdCseKubernetesClusterFailure.json deleted file mode 100644 index c750d11f9..000000000 --- a/vcdTestAccVcdCseKubernetesClusterFailure.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "token_type": "API Token", - "refresh_token": "twVlMN5TTYeoqWtS2yl6E5lFCx3yMDAp", - "updated_by": "terraform-provider-vcd/test (darwin/amd64; isProvider:true)", - "updated_on": "2024-03-05T11:01:34+01:00" - } \ No newline at end of file From 5b40e8d9fd92d387d0a6df0cafcccebd203e400a Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 11:56:58 +0100 Subject: [PATCH 132/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index df7524ddf..13fcddc3e 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -505,10 +505,10 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { return fmt.Errorf("could not check cluster deletion: %s", err) } clusters, err := org.CseGetKubernetesClustersByName(*cseVersion, clusterName) - if err != nil && !govcd.IsNotFound(err) { + if err != nil && !govcd.ContainsNotFound(err) { return fmt.Errorf("could not check cluster deletion: %s", err) } - if len(clusters) == 0 || govcd.IsNotFound(err) { + if len(clusters) == 0 || govcd.ContainsNotFound(err) { return nil } return fmt.Errorf("there are still %d clusters with name '%s': %s", len(clusters), clusterName, err) From 56106b8ca3af9fc00d0c7f0ac2239a6480c636ad Mon Sep 17 00:00:00 2001 From: abarreiro Date: Tue, 5 Mar 2024 15:30:16 +0100 Subject: [PATCH 133/156] fix docs Signed-off-by: abarreiro --- website/docs/r/cse_kubernetes_cluster.html.markdown | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 0f8297b70..cc2a02cb5 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -271,12 +271,12 @@ such as `terraform plan`. Each comment in the code gives some context about how # This must NOT be created with Terraform beforehand, it is just a shell that will receive the information # None of the arguments are required during the Import phase, but they will be asked when operating it afterwards resource "vcd_cse_kubernetes_cluster" "imported_cluster" { - name = "test2" # The name of the existing cluster - cse_version = "4.2.0" # The CSE version installed in your VCD - kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id # See below data sources - vdc_id = data.vcd_org_vdc.vdc.id # See below data sources - network_id = data.vcd_network_routed_v2.routed.id # See below data sources - node_health_check = true # Whether the existing cluster has Machine Health Check enabled or not, this can be checked in UI + name = "test2" # The name of the existing cluster + cse_version = "4.2.0" # The CSE version installed in your VCD + kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id # See below data sources + vdc_id = data.vcd_org_vdc.vdc.id # See below data sources + network_id = data.vcd_network_routed_v2.routed.id # See below data sources + node_health_check = true # Whether the existing cluster has Machine Health Check enabled or not, this can be checked in UI control_plane { machine_count = 5 # This is optional, but not setting it to the current value will make subsequent plans to try to scale our existing cluster to the default one From 62a0738c6d3c1dbd3da9611f055eaf832b5cb43c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 09:11:52 +0100 Subject: [PATCH 134/156] Fix changelog Signed-off-by: abarreiro --- .changes/v3.12.0/1195-features.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.changes/v3.12.0/1195-features.md b/.changes/v3.12.0/1195-features.md index 86c0c8771..5824a25f1 100644 --- a/.changes/v3.12.0/1195-features.md +++ b/.changes/v3.12.0/1195-features.md @@ -1,5 +1,5 @@ -* **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension 4.2 - installed and running [GH-1195] -* **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension 4.2 - installed and running [GH-1195] +* **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension + 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] +* **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension + 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] * **New Data Source:** `vcd_version` to get the VCD version and perform additional checks with version constraints [GH-1195] \ No newline at end of file From 3f20c6dab7e0b76bbaa1ead34c34123a217d8b4c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 09:17:27 +0100 Subject: [PATCH 135/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 13fcddc3e..2b1f8dd6f 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -87,6 +87,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) params["FuncName"] = t.Name() + "Step3" + params["AutoRepairOnErrors"] = "false" // Deactivate it to avoid non-empty plans. Also, it is recommended after cluster creation params["ControlPlaneCount"] = 1 params["NodePoolCount"] = 2 step3 := templateFill(testAccVcdCseKubernetesCluster, params) @@ -140,6 +141,15 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { // Basic scenario of cluster creation { Config: step1, + ExpectNonEmptyPlan: func() bool { + // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1, + // so it will return a non-empty plan + if cseVersion.GreaterThanOrEqual(v411) { + return true + } else { + return false + } + }(), Check: resource.ComposeAggregateTestCheckFunc( cacheId.cacheTestResourceFieldValue(clusterName, "id"), resource.TestMatchResourceAttr(clusterName, "id", regexp.MustCompile(`^urn:vcloud:entity:vmware:capvcdCluster:.+$`)), From d97995befd616b8b93cef80578cf036cd3774c77 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 11:24:27 +0100 Subject: [PATCH 136/156] Bump sdk Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 36024c6b9..3841616e4 100644 --- a/go.mod +++ b/go.mod @@ -67,4 +67,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240304135714-39b8d8535d91 +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240306102333-7b022788a3fe diff --git a/go.sum b/go.sum index f66b5b9f6..4146048d3 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240304135714-39b8d8535d91 h1:jI+ISHhCgskDWp9xTNzQDNM+1juikN6ChmFLfDEiVIc= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240304135714-39b8d8535d91/go.mod h1:NyNcb2ymhrzwv4GyYXyYOm1NbqRwGNxDWn90AtWniXc= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240306102333-7b022788a3fe h1:92YhSbwwR+wTyyLo/a67Bo2XqTu/Knb4/HC8DF8ozHQ= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240306102333-7b022788a3fe/go.mod h1:NyNcb2ymhrzwv4GyYXyYOm1NbqRwGNxDWn90AtWniXc= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= From 11ebdedd22a675615d6b45885eb107aacf6324c2 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 12:58:32 +0100 Subject: [PATCH 137/156] Add CSE v4.2.1 Signed-off-by: abarreiro --- .changes/v3.12.0/1195-features.md | 4 ++-- .../entities/vcdkeconfig.json.template | 0 .../step1/3.11-cse-install-1-provider-config.tf | 2 +- .../3.11-cse-install-2-cse-server-prerequisites.tf | 6 +++--- .../step1/3.11-cse-install-3-cse-server-settings.tf | 2 +- .../install/step1/terraform.tfvars.example | 2 +- .../{v4.2.0 => v4.2}/install/step1/variables.tf | 0 .../step2/3.11-cse-install-4-provider-config.tf | 2 +- .../step2/3.11-cse-install-5-infrastructure.tf | 2 +- .../install/step2/3.11-cse-install-6-ovas.tf | 2 +- .../step2/3.11-cse-install-7-cse-server-init.tf | 2 +- .../install/step2/3.11-cse-install-8-optionals.tf | 2 +- .../install/step2/terraform.tfvars.example | 0 .../{v4.2.0 => v4.2}/install/step2/variables.tf | 0 .../schemas/capvcd-type-schema-v1.3.0.json | 0 .../schemas/vcdkeconfig-type-schema-v1.1.0.json | 0 vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- website/docs/d/cse_kubernetes_cluster.html.markdown | 3 ++- ...ainer_service_extension_4_x_install.html.markdown | 12 ++++++------ website/docs/r/cse_kubernetes_cluster.html.markdown | 9 +++++---- 20 files changed, 27 insertions(+), 25 deletions(-) rename examples/container-service-extension/{v4.2.0 => v4.2}/entities/vcdkeconfig.json.template (100%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step1/3.11-cse-install-1-provider-config.tf (98%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf (98%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step1/3.11-cse-install-3-cse-server-settings.tf (98%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step1/terraform.tfvars.example (98%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step1/variables.tf (100%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step2/3.11-cse-install-4-provider-config.tf (98%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step2/3.11-cse-install-5-infrastructure.tf (99%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step2/3.11-cse-install-6-ovas.tf (99%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step2/3.11-cse-install-7-cse-server-init.tf (99%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step2/3.11-cse-install-8-optionals.tf (97%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step2/terraform.tfvars.example (100%) rename examples/container-service-extension/{v4.2.0 => v4.2}/install/step2/variables.tf (100%) rename examples/container-service-extension/{v4.2.0 => v4.2}/schemas/capvcd-type-schema-v1.3.0.json (100%) rename examples/container-service-extension/{v4.2.0 => v4.2}/schemas/vcdkeconfig-type-schema-v1.1.0.json (100%) diff --git a/.changes/v3.12.0/1195-features.md b/.changes/v3.12.0/1195-features.md index 5824a25f1..39903b4c7 100644 --- a/.changes/v3.12.0/1195-features.md +++ b/.changes/v3.12.0/1195-features.md @@ -1,5 +1,5 @@ * **New Resource:** `vcd_cse_kubernetes_cluster` to create and manage Kubernetes clusters in a VCD with Container Service Extension - 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] + 4.2.1, 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] * **New Data Source:** `vcd_cse_kubernetes_cluster` to read Kubernetes clusters from a VCD with Container Service Extension - 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] + 4.2.1, 4.2.0, 4.1.1 or 4.1.0 installed and running [GH-1195] * **New Data Source:** `vcd_version` to get the VCD version and perform additional checks with version constraints [GH-1195] \ No newline at end of file diff --git a/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template b/examples/container-service-extension/v4.2/entities/vcdkeconfig.json.template similarity index 100% rename from examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template rename to examples/container-service-extension/v4.2/entities/vcdkeconfig.json.template diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf similarity index 98% rename from examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf rename to examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf index 11c4b56bd..82b3c07fc 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation, step 1: +# CSE 4.2 installation, step 1: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf similarity index 98% rename from examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf rename to examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf index f87775322..23dadbb19 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation, step 1: +# CSE 4.2 installation, step 1: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. @@ -32,7 +32,7 @@ resource "vcd_rde_type" "vcdkeconfig_type" { nss = "VCDKEConfig" version = "1.1.0" name = "VCD-KE RDE Schema" - schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json" + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2/schemas/vcdkeconfig-type-schema-v1.1.0.json" interface_ids = [vcd_rde_interface.vcdkeconfig_interface.id] } @@ -73,7 +73,7 @@ resource "vcd_rde_type" "capvcdcluster_type" { nss = "capvcdCluster" version = "1.3.0" name = "CAPVCD Cluster" - schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json" + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json" interface_ids = [data.vcd_rde_interface.kubernetes_interface.id] depends_on = [vcd_rde_interface_behavior.capvcd_behavior] # Interface Behaviors must be created before any RDE Type diff --git a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-3-cse-server-settings.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf similarity index 98% rename from examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-3-cse-server-settings.tf rename to examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf index 270000e5d..78efdaf2e 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/3.11-cse-install-3-cse-server-settings.tf +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation, step 1: +# CSE 4.2 installation, step 1: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. diff --git a/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example b/examples/container-service-extension/v4.2/install/step1/terraform.tfvars.example similarity index 98% rename from examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example rename to examples/container-service-extension/v4.2/install/step1/terraform.tfvars.example index b4a5f86f3..2d1d3f7ec 100644 --- a/examples/container-service-extension/v4.2.0/install/step1/terraform.tfvars.example +++ b/examples/container-service-extension/v4.2/install/step1/terraform.tfvars.example @@ -28,7 +28,7 @@ cse_admin_password = "change-me" # These are required to create the Runtime Defined Entity that will contain the CSE Server configuration (vcdKeConfig) # To know more about the specific versions, please refer to the CSE documentation. -# The values set here correspond to CSE 4.2.0: +# The values set here correspond to CSE 4.2: vcdkeconfig_template_filepath = "../../entities/vcdkeconfig.json.template" capvcd_version = "1.2.0" cpi_version = "1.5.0" diff --git a/examples/container-service-extension/v4.2.0/install/step1/variables.tf b/examples/container-service-extension/v4.2/install/step1/variables.tf similarity index 100% rename from examples/container-service-extension/v4.2.0/install/step1/variables.tf rename to examples/container-service-extension/v4.2/install/step1/variables.tf diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf similarity index 98% rename from examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf rename to examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf index 0184ef03d..a7e452607 100644 --- a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation, step 2: +# CSE 4.2 installation, step 2: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-5-infrastructure.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf similarity index 99% rename from examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-5-infrastructure.tf rename to examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf index 7d6961cad..14b9b53e8 100644 --- a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-5-infrastructure.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation: +# CSE 4.2 installation: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-6-ovas.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf similarity index 99% rename from examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-6-ovas.tf rename to examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf index 5b2dfa919..9c7202174 100644 --- a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-6-ovas.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation: +# CSE 4.2 installation: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-7-cse-server-init.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf similarity index 99% rename from examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-7-cse-server-init.tf rename to examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf index 1d7db0eda..3463e2214 100644 --- a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-7-cse-server-init.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation: +# CSE 4.2 installation: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. diff --git a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-8-optionals.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf similarity index 97% rename from examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-8-optionals.tf rename to examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf index fe2c0c0d6..635d2717b 100644 --- a/examples/container-service-extension/v4.2.0/install/step2/3.11-cse-install-8-optionals.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf @@ -1,5 +1,5 @@ # ------------------------------------------------------------------------------------------------------------ -# CSE 4.2.0 installation: +# CSE 4.2 installation: # # * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. diff --git a/examples/container-service-extension/v4.2.0/install/step2/terraform.tfvars.example b/examples/container-service-extension/v4.2/install/step2/terraform.tfvars.example similarity index 100% rename from examples/container-service-extension/v4.2.0/install/step2/terraform.tfvars.example rename to examples/container-service-extension/v4.2/install/step2/terraform.tfvars.example diff --git a/examples/container-service-extension/v4.2.0/install/step2/variables.tf b/examples/container-service-extension/v4.2/install/step2/variables.tf similarity index 100% rename from examples/container-service-extension/v4.2.0/install/step2/variables.tf rename to examples/container-service-extension/v4.2/install/step2/variables.tf diff --git a/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json b/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json similarity index 100% rename from examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json rename to examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json diff --git a/examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json b/examples/container-service-extension/v4.2/schemas/vcdkeconfig-type-schema-v1.1.0.json similarity index 100% rename from examples/container-service-extension/v4.2.0/schemas/vcdkeconfig-type-schema-v1.1.0.json rename to examples/container-service-extension/v4.2/schemas/vcdkeconfig-type-schema-v1.1.0.json diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index a0efec801..fcd0a2def 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -28,7 +28,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0"}, false), + ValidateFunc: validation.StringInSlice([]string{"4.1.0", "4.1.1", "4.2.0", "4.2.1"}, false), Description: "The CSE version to use", DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { // This custom diff function allows to correctly compare versions. diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index 012d85e1d..78752907a 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -17,6 +17,7 @@ Supports the following **Container Service Extension** versions: * 4.1.0 * 4.1.1 * 4.2.0 +* 4.2.1 -> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) @@ -45,7 +46,7 @@ locals { data "vcd_cse_kubernetes_cluster" "my_clusters" { for_each = local.my_clusters org_id = data.vcd_org.org.id - cse_version = "4.2.0" + cse_version = "4.2.1" name = each.key } ``` diff --git a/website/docs/guides/container_service_extension_4_x_install.html.markdown b/website/docs/guides/container_service_extension_4_x_install.html.markdown index 81a5d799d..d87d4c58c 100644 --- a/website/docs/guides/container_service_extension_4_x_install.html.markdown +++ b/website/docs/guides/container_service_extension_4_x_install.html.markdown @@ -29,7 +29,7 @@ In order to complete the steps described in this guide, please be aware: ## Installation process --> To install CSE 4.2, this guide will make use of the example Terraform configuration located [here](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/install). +-> To install CSE 4.2, this guide will make use of the example Terraform configuration located [here](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/install). You can check it, customise it to your needs and apply. However, reading this guide first is recommended to understand what it does and how to use it. The installation process is split in two independent steps that must be run one after the other: @@ -76,7 +76,7 @@ To customise it, the [step 1 configuration][step1] asks for the following variab * `vcdkeconfig_template_filepath` references a local file that defines the `VCDKEConfig` [RDE][rde] contents. It should be a JSON file with template variables that Terraform can interpret, like - [the RDE template file for CSE 4.2](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/entities/vcdkeconfig.json.template) + [the RDE template file for CSE 4.2](https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/entities/vcdkeconfig.json.template) used in the step 1 configuration, that can be rendered correctly with the Terraform built-in function `templatefile`. (Note: In `terraform.tfvars.example` the path for the CSE 4.2 RDE contents is already provided). * `capvcd_version`: The version for CAPVCD. The default value is **"1.1.0"** for CSE 4.2. @@ -172,7 +172,7 @@ Then it will upload the required OVAs to them. The OVAs can be specified in `ter * `tkgm_ova_folder`: This will reference the path to the TKGm OVA, as an absolute or relative path. It should **not** end with a trailing `/`. * `tkgm_ova_files`: This will reference the file names of the TKGm OVAs, like `[ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc.ova, ubuntu-2004-kube-v1.24.11+vmware.1-tkg.1-2ccb2a001f8bd8f15f1bfbc811071830.ova]`. * `cse_ova_folder`: This will reference the path to the CSE OVA, as an absolute or relative path. It should **not** end with a trailing `/`. -* `cse_ova_file`: This will reference the file name of the CSE OVA, like `VMware_Cloud_Director_Container_Service_Extension-4.2.0.ova`. +* `cse_ova_file`: This will reference the file name of the CSE OVA, like `VMware_Cloud_Director_Container_Service_Extension-4.2.1.ova`. -> To download the required OVAs, please refer to the [CSE documentation][cse_docs]. You can also check the [Product Interoperability Matrix][product_matrix] to confirm the appropriate version of TKGm. @@ -355,7 +355,7 @@ resource "vcd_rde_type" "capvcdcluster_type_v130" { # Same attributes as 4.1, except for: version = "1.3.0" # New version # New schema: - schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2.0/schemas/capvcd-type-schema-v1.3.0.json" + schema_url = "https://raw.githubusercontent.com/vmware/terraform-provider-vcd/main/examples/container-service-extension/v4.2/schemas/capvcd-type-schema-v1.3.0.json" # Behaviors need to be created before any RDE Type depends_on = [vcd_rde_interface_behavior.capvcd_behavior] } @@ -504,8 +504,8 @@ Once all clusters are removed in the background by CSE Server, you may destroy t [role]: /providers/vmware/vcd/latest/docs/resources/role [routed_network]: /providers/vmware/vcd/latest/docs/resources/network_routed_v2 [sizing]: /providers/vmware/vcd/latest/docs/resources/vm_sizing_policy -[step1]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/install/step1 -[step2]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2.0/install/step2 +[step1]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/install/step1 +[step2]: https://github.com/vmware/terraform-provider-vcd/tree/main/examples/container-service-extension/v4.2/install/step2 [tkgm_docs]: https://docs.vmware.com/en/VMware-Tanzu-Kubernetes-Grid/index.html [user]: /providers/vmware/vcd/latest/docs/resources/org_user [ui_plugin]: /providers/vmware/vcd/latest/docs/resources/ui_plugin diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index cc2a02cb5..ed8242970 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -17,6 +17,7 @@ Supports the following **Container Service Extension** versions: * 4.1.0 * 4.1.1 * 4.2.0 +* 4.2.1 -> To install CSE in VMware Cloud Director, please follow [this guide](/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install) @@ -72,7 +73,7 @@ resource "vcd_api_token" "token" { } resource "vcd_cse_kubernetes_cluster" "my_cluster" { - cse_version = "4.2.0" + cse_version = "4.2.1" runtime = "tkg" name = "test2" kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id @@ -118,7 +119,7 @@ output "kubeconfig" { The following arguments are supported: -* `cse_version` - (Required) Specifies the CSE version to use. Accepted versions: `4.1.0`, `4.1.1` and `4.2.0` +* `cse_version` - (Required) Specifies the CSE version to use. Accepted versions: `4.1.0`, `4.1.1`, `4.2.0` and `4.2.1` * `runtime` - (Optional) Specifies the Kubernetes runtime to use. Defaults to `tkg` (Tanzu Kubernetes Grid) * `name` - (Required) The name of the Kubernetes cluster. It must contain only lowercase alphanumeric characters or "-", start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters @@ -225,7 +226,7 @@ Only the following arguments can be updated: * `machine_count` of any `worker_pool`: Supports scaling up and down. Use caution when resizing down to 0 nodes. The cluster must always have at least 1 running node, or else the cluster will enter an unrecoverable error state. * `auto_repair_on_errors`: Can only be updated in CSE 4.1.0, and it is recommended to set it to `false` when the cluster is created. - In versions 4.1.1 and 4.2.0, this is automatically done by the CSE Server, so this flag cannot be updated. + In versions higher than 4.1.0, this is automatically done by the CSE Server, so this flag cannot be updated. * `node_health_check`: Can be turned on/off. * `operations_timeout_minutes`: Does not require modifying the existing cluster @@ -272,7 +273,7 @@ such as `terraform plan`. Each comment in the code gives some context about how # None of the arguments are required during the Import phase, but they will be asked when operating it afterwards resource "vcd_cse_kubernetes_cluster" "imported_cluster" { name = "test2" # The name of the existing cluster - cse_version = "4.2.0" # The CSE version installed in your VCD + cse_version = "4.2.1" # The CSE version installed in your VCD kubernetes_template_id = data.vcd_catalog_vapp_template.tkg_ova.id # See below data sources vdc_id = data.vcd_org_vdc.vdc.id # See below data sources network_id = data.vcd_network_routed_v2.routed.id # See below data sources From 9527702410278c3b148c2f2b6ae1af5a45d48a4d Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 13:40:26 +0100 Subject: [PATCH 138/156] Fix test, attempt Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 2b1f8dd6f..d15d49322 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -82,12 +82,12 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { debugPrintf("#[DEBUG] CONFIGURATION step1: %s", step1) params["FuncName"] = t.Name() + "Step2" + params["AutoRepairOnErrors"] = "false" // Deactivate it to avoid non-empty plans. Also, it is recommended after cluster creation params["ControlPlaneCount"] = 3 step2 := templateFill(testAccVcdCseKubernetesCluster, params) debugPrintf("#[DEBUG] CONFIGURATION step2: %s", step2) params["FuncName"] = t.Name() + "Step3" - params["AutoRepairOnErrors"] = "false" // Deactivate it to avoid non-empty plans. Also, it is recommended after cluster creation params["ControlPlaneCount"] = 1 params["NodePoolCount"] = 2 step3 := templateFill(testAccVcdCseKubernetesCluster, params) From b9d1535fad9c17e5503f33b429cc80890d2010fe Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 15:33:07 +0100 Subject: [PATCH 139/156] Fix test, attempt Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 6 +++--- website/docs/r/cse_kubernetes_cluster.html.markdown | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index d15d49322..59709ee74 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -429,9 +429,9 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { { Config: step6, Check: resource.ComposeAggregateTestCheckFunc( - resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_id_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file"}), - resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_name_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file"}), - resourceFieldsEqual("data.vcd_cse_kubernetes_cluster.with_id_ds", "data.vcd_cse_kubernetes_cluster.with_name_ds", nil), + resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_id_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file", "events"}), + resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_name_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file", "events"}), + resourceFieldsEqual("data.vcd_cse_kubernetes_cluster.with_id_ds", "data.vcd_cse_kubernetes_cluster.with_name_ds", []string{"events"}), ), }, { diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index ed8242970..f34c214ce 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -251,7 +251,7 @@ output "kubeconfig" { Then, creating a file turns out to be trivial: ```shell -terraform output kubeconfig > $HOME/kubeconfig +terraform output -raw kubeconfig > $HOME/kubeconfig ``` The Kubeconfig can now be used with `kubectl` and the Kubernetes cluster can be used. From cf1a5e839c9d9035abc4648f2c863281a3b1510c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 15:40:46 +0100 Subject: [PATCH 140/156] Fix nil pointer Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index fcd0a2def..f1d20bb0f 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -739,14 +739,16 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste return nil, err } - err = d.Set("default_storage_class", []map[string]interface{}{{ - "storage_profile_id": cluster.DefaultStorageClass.StorageProfileId, - "name": cluster.DefaultStorageClass.Name, - "reclaim_policy": cluster.DefaultStorageClass.ReclaimPolicy, - "filesystem": cluster.DefaultStorageClass.Filesystem, - }}) - if err != nil { - return nil, err + if cluster.DefaultStorageClass != nil { + err = d.Set("default_storage_class", []map[string]interface{}{{ + "storage_profile_id": cluster.DefaultStorageClass.StorageProfileId, + "name": cluster.DefaultStorageClass.Name, + "reclaim_policy": cluster.DefaultStorageClass.ReclaimPolicy, + "filesystem": cluster.DefaultStorageClass.Filesystem, + }}) + if err != nil { + return nil, err + } } dSet(d, "state", cluster.State) From 6df2fc4cb97962bded95ebb7b96cea017d301f56 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 15:42:36 +0100 Subject: [PATCH 141/156] Fix warning msg Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index f1d20bb0f..c36dda6fc 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -790,7 +790,7 @@ func saveClusterDataToState(d *schema.ResourceData, vcdClient *VCDClient, cluste } else { warnings = append(warnings, fmt.Errorf("the Kubernetes cluster with ID '%s' is in '%s' state, meaning that "+ "the Kubeconfig cannot be retrieved and "+ - "some attributes could be unavailable", d.Id(), cluster.State)) + "some attributes could be unavailable", cluster.ID, cluster.State)) } d.SetId(cluster.ID) From e800b6599ed665ee8dee40be6429e9b2bafaa0b9 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 16:46:01 +0100 Subject: [PATCH 142/156] Fix test, attempt Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 86 ++++++++++++++++++- 1 file changed, 82 insertions(+), 4 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 59709ee74..9954e7b5f 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -124,6 +124,8 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { } cacheId := testCachedFieldValue{} clusterName := "vcd_cse_kubernetes_cluster.my_cluster" + dataWithName := "data.vcd_cse_kubernetes_cluster.with_name_ds" + dataWithId := "data.vcd_cse_kubernetes_cluster.with_id_ds" resource.Test(t, resource.TestCase{ ProviderFactories: testAccProviders, CheckDestroy: func(state *terraform.State) error { @@ -425,13 +427,89 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(clusterName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, - // Test data sources + // Test data sources. Can't use resourceFieldsEqual function as we need to ignore the "events" TypeList which has an unknown size { Config: step6, Check: resource.ComposeAggregateTestCheckFunc( - resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_id_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file", "events"}), - resourceFieldsEqual(clusterName, "data.vcd_cse_kubernetes_cluster.with_name_ds", []string{"org_id", "org", "operations_timeout_minutes", "api_token_file", "events"}), - resourceFieldsEqual("data.vcd_cse_kubernetes_cluster.with_id_ds", "data.vcd_cse_kubernetes_cluster.with_name_ds", []string{"events"}), + // Data source with name + resource.TestCheckResourceAttrPair(dataWithName, "id", clusterName, "id"), + resource.TestCheckResourceAttrPair(dataWithName, "cse_version", clusterName, "cse_version"), + resource.TestCheckResourceAttrPair(dataWithName, "runtime", clusterName, "runtime"), + resource.TestCheckResourceAttrPair(dataWithName, "name", clusterName, "name"), + resource.TestCheckResourceAttrPair(dataWithName, "kubernetes_template_id", clusterName, "kubernetes_template_id"), + resource.TestMatchResourceAttr(dataWithName, "org_id", regexp.MustCompile(`^urn:vcloud:org:.+$`)), + resource.TestCheckResourceAttrPair(dataWithName, "vdc_id", clusterName, "vdc_id"), + resource.TestCheckResourceAttrPair(dataWithName, "network_id", clusterName, "network_id"), + resource.TestCheckResourceAttrPair(dataWithName, "owner", clusterName, "owner"), + resource.TestCheckResourceAttrPair(dataWithName, "ssh_public_key", clusterName, "ssh_public_key"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.disk_size_gi", clusterName, "control_plane.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.sizing_policy_id", clusterName, "control_plane.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.storage_profile_id", clusterName, "control_plane.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.ip", clusterName, "control_plane.0.ip"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.#", clusterName, "worker_pool.#"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.name", clusterName, "worker_pool.0.name"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.machine_count", clusterName, "worker_pool.0.machine_count"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.disk_size_gi", clusterName, "worker_pool.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.sizing_policy_id", clusterName, "worker_pool.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithName, "worker_pool.0.storage_profile_id", clusterName, "worker_pool.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.storage_profile_id", clusterName, "default_storage_class.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.name", clusterName, "default_storage_class.0.name"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.reclaim_policy", clusterName, "default_storage_class.0.reclaim_policy"), + resource.TestCheckResourceAttrPair(dataWithName, "default_storage_class.0.filesystem", clusterName, "default_storage_class.0.filesystem"), + resource.TestCheckResourceAttrPair(dataWithName, "pods_cidr", clusterName, "pods_cidr"), + resource.TestCheckResourceAttrPair(dataWithName, "services_cidr", clusterName, "services_cidr"), + resource.TestCheckResourceAttrPair(dataWithName, "virtual_ip_subnet", clusterName, "virtual_ip_subnet"), + resource.TestCheckResourceAttrPair(dataWithName, "auto_repair_on_errors", clusterName, "auto_repair_on_errors"), + resource.TestCheckResourceAttrPair(dataWithName, "node_health_check", clusterName, "node_health_check"), + resource.TestCheckResourceAttrPair(dataWithName, "kubernetes_version", clusterName, "kubernetes_version"), + resource.TestCheckResourceAttrPair(dataWithName, "tkg_product_version", clusterName, "tkg_product_version"), + resource.TestCheckResourceAttrPair(dataWithName, "capvcd_version", clusterName, "capvcd_version"), + resource.TestCheckResourceAttrPair(dataWithName, "cluster_resource_set_bindings.#", clusterName, "cluster_resource_set_bindings.#"), + resource.TestCheckResourceAttrPair(dataWithName, "cpi_version", clusterName, "cpi_version"), + resource.TestCheckResourceAttrPair(dataWithName, "csi_version", clusterName, "csi_version"), + resource.TestCheckResourceAttrPair(dataWithName, "state", clusterName, "state"), + resource.TestCheckResourceAttrPair(dataWithName, "kubeconfig", clusterName, "kubeconfig"), + resource.TestMatchResourceAttr(dataWithName, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), + + // Data source with ID + resource.TestCheckResourceAttrPair(dataWithId, "id", dataWithName, "id"), + resource.TestCheckResourceAttrPair(dataWithId, "cse_version", dataWithName, "cse_version"), + resource.TestCheckResourceAttrPair(dataWithId, "runtime", dataWithName, "runtime"), + resource.TestCheckResourceAttrPair(dataWithId, "name", dataWithName, "name"), + resource.TestCheckResourceAttrPair(dataWithId, "kubernetes_template_id", dataWithName, "kubernetes_template_id"), + resource.TestCheckResourceAttrPair(dataWithId, "org_id", dataWithName, "org_id"), + resource.TestCheckResourceAttrPair(dataWithId, "vdc_id", dataWithName, "vdc_id"), + resource.TestCheckResourceAttrPair(dataWithId, "network_id", dataWithName, "network_id"), + resource.TestCheckResourceAttrPair(dataWithId, "owner", dataWithName, "owner"), + resource.TestCheckResourceAttrPair(dataWithId, "ssh_public_key", dataWithName, "ssh_public_key"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.disk_size_gi", dataWithName, "control_plane.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.sizing_policy_id", dataWithName, "control_plane.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.storage_profile_id", dataWithName, "control_plane.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithId, "control_plane.0.ip", dataWithName, "control_plane.0.ip"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.#", dataWithName, "worker_pool.#"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.name", dataWithName, "worker_pool.0.name"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.machine_count", dataWithName, "worker_pool.0.machine_count"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.disk_size_gi", dataWithName, "worker_pool.0.disk_size_gi"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.sizing_policy_id", dataWithName, "worker_pool.0.sizing_policy_id"), + resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.storage_profile_id", dataWithName, "worker_pool.0.storage_profile_id"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.storage_profile_id", dataWithName, "id"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.name", dataWithName, "default_storage_class.0.name"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.reclaim_policy", dataWithName, "default_storage_class.0.reclaim_policy"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.filesystem", dataWithName, "default_storage_class.0.filesystem"), + resource.TestCheckResourceAttrPair(dataWithId, "pods_cidr", dataWithName, "pods_cidr"), + resource.TestCheckResourceAttrPair(dataWithId, "services_cidr", dataWithName, "services_cidr"), + resource.TestCheckResourceAttrPair(dataWithId, "virtual_ip_subnet", dataWithName, "virtual_ip_subnet"), + resource.TestCheckResourceAttrPair(dataWithId, "auto_repair_on_errors", dataWithName, "auto_repair_on_errors"), + resource.TestCheckResourceAttrPair(dataWithId, "node_health_check", dataWithName, "node_health_check"), + resource.TestCheckResourceAttrPair(dataWithId, "kubernetes_version", dataWithName, "kubernetes_version"), + resource.TestCheckResourceAttrPair(dataWithId, "tkg_product_version", dataWithName, "tkg_product_version"), + resource.TestCheckResourceAttrPair(dataWithId, "capvcd_version", dataWithName, "capvcd_version"), + resource.TestCheckResourceAttrPair(dataWithId, "cluster_resource_set_bindings.#", dataWithName, "cluster_resource_set_bindings.#"), + resource.TestCheckResourceAttrPair(dataWithId, "cpi_version", dataWithName, "cpi_version"), + resource.TestCheckResourceAttrPair(dataWithId, "csi_version", dataWithName, "csi_version"), + resource.TestCheckResourceAttrPair(dataWithId, "state", dataWithName, "state"), + resource.TestCheckResourceAttrPair(dataWithId, "kubeconfig", dataWithName, "kubeconfig"), + resource.TestMatchResourceAttr(dataWithId, "events.#", regexp.MustCompile(`^[1-9][0-9]*$`)), ), }, { From 129525c22c89c3b8957759a0c3d3f1283738abbf Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 17:06:28 +0100 Subject: [PATCH 143/156] Add more tests to vcd_version DS Signed-off-by: abarreiro --- vcd/datasource_vcd_version_test.go | 34 ++++++++++++++++++++++++++-- website/docs/d/version.html.markdown | 34 +++++++++++++++++++++++++--- 2 files changed, 63 insertions(+), 5 deletions(-) diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go index c04b6962a..b3a925a96 100644 --- a/vcd/datasource_vcd_version_test.go +++ b/vcd/datasource_vcd_version_test.go @@ -5,6 +5,7 @@ package vcd import ( "fmt" "regexp" + "strings" "testing" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" @@ -45,10 +46,21 @@ func TestAccVcdVersion(t *testing.T) { debugPrintf("#[DEBUG] CONFIGURATION step3: %s", step3) params["FuncName"] = t.Name() + "-step4" + versionTokens := strings.Split(currentVersion, ".") + params["Condition"] = fmt.Sprintf("~> %s.%s", versionTokens[0], versionTokens[1]) + step4 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) + + params["FuncName"] = t.Name() + "-step5" + params["Condition"] = "!= 10.3.0" + step5 := templateFill(testAccVcdVersion, params) + debugPrintf("#[DEBUG] CONFIGURATION step5: %s", step5) + + params["FuncName"] = t.Name() + "-step6" params["Condition"] = " " // Not used, but illustrates the point of this check params["FailIfNotMatch"] = " " - step4 := templateFill(testAccVcdVersionWithoutArguments, params) - debugPrintf("#[DEBUG] CONFIGURATION step4: %s", step4) + step6 := templateFill(testAccVcdVersionWithoutArguments, params) + debugPrintf("#[DEBUG] CONFIGURATION step6: %s", step6) if vcdShortTest { t.Skip(acceptanceTestsSkipped) @@ -82,6 +94,24 @@ func TestAccVcdVersion(t *testing.T) { }, { Config: step4, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='~> %s.%s',fail_if_not_match='true'", currentVersion, versionTokens[0], versionTokens[1])), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), + ), + }, + { + Config: step5, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='!= 10.3.0',fail_if_not_match='true'", currentVersion)), + resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "api_version", apiVersion), + resource.TestCheckResourceAttr("data.vcd_version.version", "matches_condition", "true"), + ), + }, + { + Config: step6, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("data.vcd_version.version", "id", fmt.Sprintf("vcd_version='%s',condition='',fail_if_not_match='false'", currentVersion)), resource.TestCheckResourceAttr("data.vcd_version.version", "vcd_version", currentVersion), diff --git a/website/docs/d/version.html.markdown b/website/docs/d/version.html.markdown index ca57589ca..46c0fc504 100644 --- a/website/docs/d/version.html.markdown +++ b/website/docs/d/version.html.markdown @@ -8,8 +8,8 @@ description: |- # vcd\_version -Provides a VMware Cloud Director version data source to fetch the VCD version, the maximum API version and perform some optional -checks with version constraints. +Provides a VMware Cloud Director version data source to fetch the VCD version, the maximum supported API version and +perform some optional checks with version constraints. Supported in provider *v3.12+*. Requires System Administrator privileges. @@ -17,10 +17,38 @@ Supported in provider *v3.12+*. Requires System Administrator privileges. ```hcl # This data source will assert that the VCD version is exactly 10.5.1, otherwise it will fail -data "vcd_version" "gte_1051" { +data "vcd_version" "eq_1051" { condition = "= 10.5.1" fail_if_not_match = true } + +# This data source will assert that the VCD version is greater than or equal to 10.4.2, but it won't fail if it is not +data "vcd_version" "gte_1042" { + condition = ">= 10.4.2" + fail_if_not_match = false +} + +output "is_gte_1042" { + value = data.vcd_version.gte_1042.matches_condition # Will show false if we're using a VCD version < 10.4.2 +} + +# This data source will assert that the VCD version is less than 10.5.0 +data "vcd_version" "lt_1050" { + condition = "< 10.5.0" + fail_if_not_match = true +} + +# This data source will assert that the VCD version is 10.5.X +data "vcd_version" "is_105" { + condition = "~> 10.5" + fail_if_not_match = true +} + +# This data source will assert that the VCD version is not 10.5.1 +data "vcd_version" "not_1051" { + condition = "!= 10.5.1" + fail_if_not_match = true +} ``` ## Argument Reference From 057303041a6cd41a16e74166ce18bbd01a57be7d Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 17:11:59 +0100 Subject: [PATCH 144/156] Fix sample config Signed-off-by: abarreiro --- vcd/sample_vcd_test_config.json | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/vcd/sample_vcd_test_config.json b/vcd/sample_vcd_test_config.json index da6bbc0e5..b85b0fa96 100644 --- a/vcd/sample_vcd_test_config.json +++ b/vcd/sample_vcd_test_config.json @@ -226,17 +226,6 @@ "VCD_ADD_PROVIDER": "", "REMOVE_ORG_VDC_FROM_TEMPLATE": "" }, - "cse": { - "version": "4.1.0", - "solutionsOrg": "solutions_org", - "tenantOrg": "tenant_org", - "vdc": "tenant_vdc", - "routedNetwork": "tenant_net_routed", - "edgeGateway": "tenant_edgegateway", - "ovaCatalog": "tkgm_catalog", - "ovaName": "ubuntu-2004-kube-v1.25.7+vmware.2-tkg.1-8a74b9f12e488c54605b3537acb683bc" - }, - "cse" :{ "//" : "Only needed to test Container Service Extension specific resources", "version": "4.2.0", From 8401ce90bf63c9e0387f3d1acb4790cd862d19e9 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Wed, 6 Mar 2024 20:59:03 +0100 Subject: [PATCH 145/156] Fix sample config Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 9954e7b5f..998c60df2 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -492,7 +492,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.disk_size_gi", dataWithName, "worker_pool.0.disk_size_gi"), resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.sizing_policy_id", dataWithName, "worker_pool.0.sizing_policy_id"), resource.TestCheckResourceAttrPair(dataWithId, "worker_pool.0.storage_profile_id", dataWithName, "worker_pool.0.storage_profile_id"), - resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.storage_profile_id", dataWithName, "id"), + resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.storage_profile_id", dataWithName, "default_storage_class.0.storage_profile_id"), resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.name", dataWithName, "default_storage_class.0.name"), resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.reclaim_policy", dataWithName, "default_storage_class.0.reclaim_policy"), resource.TestCheckResourceAttrPair(dataWithId, "default_storage_class.0.filesystem", dataWithName, "default_storage_class.0.filesystem"), From 0f2c86e0ca9581ac6098ac47dff2dce335d700d3 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 7 Mar 2024 09:10:39 +0100 Subject: [PATCH 146/156] Fix test Signed-off-by: abarreiro --- ...esource_vcd_cse_kubernetes_cluster_test.go | 36 +++---------------- 1 file changed, 4 insertions(+), 32 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 998c60df2..7334ee0fa 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -237,14 +237,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), - func() resource.TestCheckFunc { - // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 - if cseVersion.GreaterThanOrEqual(v411) { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") - } else { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") - } - }(), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), @@ -292,14 +285,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), - func() resource.TestCheckFunc { - // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 - if cseVersion.GreaterThanOrEqual(v411) { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") - } else { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") - } - }(), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), resource.TestCheckResourceAttr(clusterName, "node_health_check", "true"), resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), @@ -348,14 +334,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), - func() resource.TestCheckFunc { - // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 - if cseVersion.GreaterThanOrEqual(v411) { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") - } else { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") - } - }(), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), @@ -408,14 +387,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestCheckResourceAttr(clusterName, "pods_cidr", "100.96.0.0/11"), resource.TestCheckResourceAttr(clusterName, "services_cidr", "100.64.0.0/13"), resource.TestCheckResourceAttr(clusterName, "virtual_ip_subnet", ""), - func() resource.TestCheckFunc { - // Auto Repair on Errors gets automatically deactivated after cluster creation since CSE 4.1.1 - if cseVersion.GreaterThanOrEqual(v411) { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false") - } else { - return resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "true") - } - }(), + resource.TestCheckResourceAttr(clusterName, "auto_repair_on_errors", "false"), resource.TestMatchResourceAttr(clusterName, "kubernetes_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+\+vmware\.[0-9]$`)), resource.TestMatchResourceAttr(clusterName, "tkg_product_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), resource.TestMatchResourceAttr(clusterName, "capvcd_version", regexp.MustCompile(`^v[0-9]+\.[0-9]+\.[0-9]+$`)), From 8c837d32d1f3c601024d33ebb055c328ec81fa11 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 7 Mar 2024 09:16:08 +0100 Subject: [PATCH 147/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 7334ee0fa..04c20b595 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -412,7 +412,7 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { resource.TestMatchResourceAttr(dataWithName, "org_id", regexp.MustCompile(`^urn:vcloud:org:.+$`)), resource.TestCheckResourceAttrPair(dataWithName, "vdc_id", clusterName, "vdc_id"), resource.TestCheckResourceAttrPair(dataWithName, "network_id", clusterName, "network_id"), - resource.TestCheckResourceAttrPair(dataWithName, "owner", clusterName, "owner"), + resource.TestCheckResourceAttrSet(dataWithName, "owner"), // This time the owner can be obtained resource.TestCheckResourceAttrPair(dataWithName, "ssh_public_key", clusterName, "ssh_public_key"), resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.disk_size_gi", clusterName, "control_plane.0.disk_size_gi"), resource.TestCheckResourceAttrPair(dataWithName, "control_plane.0.sizing_policy_id", clusterName, "control_plane.0.sizing_policy_id"), From 7043744fb80b9e23364576c626dedbc4fef8b839 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 7 Mar 2024 09:46:40 +0100 Subject: [PATCH 148/156] bump sdk Signed-off-by: abarreiro --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3841616e4..57615b34b 100644 --- a/go.mod +++ b/go.mod @@ -67,4 +67,4 @@ require ( google.golang.org/protobuf v1.31.0 // indirect ) -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240306102333-7b022788a3fe +replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240307084107-45b7864a5d0d diff --git a/go.sum b/go.sum index 4146048d3..d8b6cf68b 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,8 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240306102333-7b022788a3fe h1:92YhSbwwR+wTyyLo/a67Bo2XqTu/Knb4/HC8DF8ozHQ= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240306102333-7b022788a3fe/go.mod h1:NyNcb2ymhrzwv4GyYXyYOm1NbqRwGNxDWn90AtWniXc= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240307084107-45b7864a5d0d h1:jwUhWCy9oL+dGts0jene38fwcs3UHtNuO3WEHMq4DUA= +github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240307084107-45b7864a5d0d/go.mod h1:NyNcb2ymhrzwv4GyYXyYOm1NbqRwGNxDWn90AtWniXc= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= From 53d45300ca10dab56f451f0dc5930f2ffbcbcf08 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 7 Mar 2024 09:48:39 +0100 Subject: [PATCH 149/156] nit Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index 04c20b595..b56b92775 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -537,7 +537,7 @@ func TestAccVcdCseKubernetesClusterFailure(t *testing.T) { "Vdc": testConfig.Cse.TenantVdc, "EdgeGateway": testConfig.Cse.EdgeGateway, "Network": testConfig.Cse.RoutedNetwork, - "TokenName": t.Name() + "3", + "TokenName": t.Name(), "TokenFile": tokenFilename, "ControlPlaneCount": 1, "NodePoolCount": 1, From 834850b77849b4029da3e6cf94a7ad6cbfdefa7c Mon Sep 17 00:00:00 2001 From: abarreiro Date: Thu, 7 Mar 2024 11:07:14 +0100 Subject: [PATCH 150/156] Fix test Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster_test.go b/vcd/resource_vcd_cse_kubernetes_cluster_test.go index b56b92775..7070393f6 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster_test.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster_test.go @@ -491,11 +491,12 @@ func TestAccVcdCseKubernetesCluster(t *testing.T) { ImportStateIdFunc: func(state *terraform.State) (string, error) { return cacheId.fieldValue, nil }, - // Ignore api_token_file and operations_timeout_minutes as these are not computed from VCD, so they are missing + // Ignore 'api_token_file' and 'operations_timeout_minutes' as these are not computed from VCD, so they are missing // after any successful import. - // Ignore also owner and org as these may not be set in the resource configuration, but they are always + // Ignore also 'owner' and 'org' as these may not be set in the resource configuration, but they are always // set on imports. - ImportStateVerifyIgnore: []string{"api_token_file", "operations_timeout_minutes", "owner", "org"}, + // 'events' is ignored as the list may differ between runs. + ImportStateVerifyIgnore: []string{"api_token_file", "operations_timeout_minutes", "owner", "org", "events"}, }, }, }) From 4fa6923de14990309a52a80d056d1517a63b5940 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 8 Mar 2024 09:40:09 +0100 Subject: [PATCH 151/156] Apply suggestions Signed-off-by: abarreiro --- .../v4.1/cluster/3.11-cluster-creation.tf | 2 +- .../v4.1/install/step1/3.11-cse-install-1-provider-config.tf | 2 +- .../step1/3.11-cse-install-2-cse-server-prerequisites.tf | 2 +- .../install/step1/3.11-cse-install-3-cse-server-settings.tf | 2 +- .../v4.1/install/step2/3.11-cse-install-4-provider-config.tf | 2 +- .../v4.1/install/step2/3.11-cse-install-5-infrastructure.tf | 2 +- .../v4.1/install/step2/3.11-cse-install-6-ovas.tf | 2 +- .../v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf | 2 +- .../v4.1/install/step2/3.11-cse-install-8-optionals.tf | 2 +- .../v4.2/install/step1/3.11-cse-install-1-provider-config.tf | 2 +- .../step1/3.11-cse-install-2-cse-server-prerequisites.tf | 2 +- .../install/step1/3.11-cse-install-3-cse-server-settings.tf | 2 +- .../v4.2/install/step2/3.11-cse-install-4-provider-config.tf | 2 +- .../v4.2/install/step2/3.11-cse-install-5-infrastructure.tf | 2 +- .../v4.2/install/step2/3.11-cse-install-6-ovas.tf | 2 +- .../v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf | 2 +- .../v4.2/install/step2/3.11-cse-install-8-optionals.tf | 2 +- vcd/datasource_vcd_version.go | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf b/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf index a449fd426..adb0f3c9f 100644 --- a/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf +++ b/examples/container-service-extension/v4.1/cluster/3.11-cluster-creation.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.1 TKGm cluster creation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_cluster_management +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_cluster_management # before applying this configuration. # # * Please make sure to have CSE v4.1 installed in your VCD appliance and the CSE Server is correctly running. diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf index 9f7db5632..6d7a6ac1d 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-1-provider-config.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf index 3484fc6e0..2d1d6adca 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf index 306284e13..da8184288 100644 --- a/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf +++ b/examples/container-service-extension/v4.1/install/step1/3.11-cse-install-3-cse-server-settings.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf index af28b8d44..74ca0790b 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-4-provider-config.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation, step 2: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf index a5e2d6cde..8bf1614e0 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-5-infrastructure.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf index 6e4112601..d9526f3cc 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-6-ovas.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf index 41622cefe..a845605d5 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-7-cse-server-init.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf index 79fe239b2..48530284f 100644 --- a/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf +++ b/examples/container-service-extension/v4.1/install/step2/3.11-cse-install-8-optionals.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE v4.1 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf index 82b3c07fc..a76207bc3 100644 --- a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-1-provider-config.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf index 23dadbb19..1d79a488a 100644 --- a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-2-cse-server-prerequisites.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf index 78efdaf2e..11ddb0e56 100644 --- a/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf +++ b/examples/container-service-extension/v4.2/install/step1/3.11-cse-install-3-cse-server-settings.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation, step 1: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf index a7e452607..0decbddfa 100644 --- a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-4-provider-config.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation, step 2: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * The installation process is split into two steps as the first one creates a CSE admin user that needs to be diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf index 14b9b53e8..102bd7f02 100644 --- a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-5-infrastructure.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf index 9c7202174..75b5015c1 100644 --- a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-6-ovas.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf index 3463e2214..69a232905 100644 --- a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-7-cse-server-init.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf index 635d2717b..ed0354665 100644 --- a/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf +++ b/examples/container-service-extension/v4.2/install/step2/3.11-cse-install-8-optionals.tf @@ -1,7 +1,7 @@ # ------------------------------------------------------------------------------------------------------------ # CSE 4.2 installation: # -# * Please read the guide present at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install +# * Please read the guide at https://registry.terraform.io/providers/vmware/vcd/latest/docs/guides/container_service_extension_4_x_install # before applying this configuration. # # * Rename "terraform.tfvars.example" to "terraform.tfvars" and adapt the values to your needs. diff --git a/vcd/datasource_vcd_version.go b/vcd/datasource_vcd_version.go index 5b089c3c9..c4dec5f14 100644 --- a/vcd/datasource_vcd_version.go +++ b/vcd/datasource_vcd_version.go @@ -69,7 +69,7 @@ func datasourceVcdVersionRead(_ context.Context, d *schema.ResourceData, meta in matchesCondition := constraints.Check(checkVer) dSet(d, "matches_condition", matchesCondition) if !matchesCondition && d.Get("fail_if_not_match").(bool) { - return diag.Errorf("the VCD version doesn't match the version constraint '%s'", condition) + return diag.Errorf("the VCD version '%s' doesn't match the version constraint '%s'", vcdVersion, condition) } } From 89049136a09562ab587768c0857c50b24a78e500 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 8 Mar 2024 09:49:16 +0100 Subject: [PATCH 152/156] Apply suggestions Signed-off-by: abarreiro --- vcd/resource_vcd_cse_kubernetes_cluster.go | 19 ++++++++++++------- .../d/cse_kubernetes_cluster.html.markdown | 4 ++-- .../r/cse_kubernetes_cluster.html.markdown | 4 ++-- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/vcd/resource_vcd_cse_kubernetes_cluster.go b/vcd/resource_vcd_cse_kubernetes_cluster.go index c36dda6fc..2c0059b30 100644 --- a/vcd/resource_vcd_cse_kubernetes_cluster.go +++ b/vcd/resource_vcd_cse_kubernetes_cluster.go @@ -15,6 +15,11 @@ import ( ) func resourceVcdCseKubernetesCluster() *schema.Resource { + // This regular expression matches strings with at most 31 characters, composed only by lowercase alphanumeric characters or '-', + // that must start with an alphabetic character, and end with an alphanumeric. + // This is used for any "name" property in CSE, like cluster name, worker pool name or storage class name. + const kubernetesNameRegex = `^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$` + return &schema.Resource{ CreateContext: resourceVcdCseKubernetesClusterCreate, ReadContext: resourceVcdCseKubernetesRead, @@ -57,7 +62,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Required: true, ForceNew: true, Description: "The name of the Kubernetes cluster", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + ValidateDiagFunc: matchRegex(kubernetesNameRegex, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "kubernetes_template_id": { @@ -169,7 +174,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { // we need to be careful on reads to guarantee that order is respected. Type: schema.TypeList, Required: true, - Description: "Defines a node pool for the cluster", + Description: "Defines a worker pool for the cluster", Elem: &schema.Resource{ // Ideally, all of these sub-attributes should have ForceNew: true except for "machine_count", as // they can't be changed. However, this doesn't work well, so we check this at runtime. @@ -178,14 +183,14 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { Type: schema.TypeString, Required: true, Description: "The name of this worker pool. Must be unique", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + ValidateDiagFunc: matchRegex(kubernetesNameRegex, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "machine_count": { Type: schema.TypeInt, Optional: true, Default: 1, // As suggested in UI - Description: "The number of nodes that this worker pool has. Must be higher than 0", + Description: "The number of nodes that this worker pool has. Must be higher than or equal to 0", ValidateDiagFunc: minimumValue(0, "number of nodes must be higher than or equal to 0"), }, "disk_size_gi": { @@ -236,7 +241,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ForceNew: true, Type: schema.TypeString, Description: "Name to give to this storage class", - ValidateDiagFunc: matchRegex(`^[a-z](?:[a-z0-9-]{0,29}[a-z0-9])?$`, "name must contain only lowercase alphanumeric characters or '-',"+ + ValidateDiagFunc: matchRegex(kubernetesNameRegex, "name must contain only lowercase alphanumeric characters or '-',"+ "start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters"), }, "reclaim_policy": { @@ -244,7 +249,7 @@ func resourceVcdCseKubernetesCluster() *schema.Resource { ForceNew: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"delete", "retain"}, false), - Description: "'delete' deletes the volume when the PersistentVolumeClaim is deleted. 'retain' does not, and the volume can be manually reclaimed", + Description: "Reclaim policy. Possible values are: `delete` deletes the volume when the `PersistentVolumeClaim` is deleted; `retain` does not delete, and the volume can be manually reclaimed", }, "filesystem": { Required: true, @@ -474,7 +479,7 @@ func resourceVcdCseKubernetesClusterCreate(ctx context.Context, d *schema.Resour // If we get here, it means we got either a successful created cluster, a timeout or a cluster in "error" state. // Either way, from this point we should go to the Update logic as the cluster is definitely present in VCD, so we store the ID. // Also, we need to set the ID to be able to distinguish this cluster from all the others that may have the same name and RDE Type. - // We could use some other ways of filtering, but ID is the only accurate. + // We could use some other ways of filtering, but ID is the only accurate one. // If the cluster can't be created due to errors, users should delete it and retry, like in UI. d.SetId(cluster.ID) diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index 78752907a..ef4afeb7b 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -3,7 +3,7 @@ layout: "vcd" page_title: "VMware Cloud Director: vcd_cse_kubernetes_cluster" sidebar_current: "docs-vcd-data-source-cse-kubernetes-cluster" description: |- - Provides a resource to read Kubernetes clusters from VMware Cloud Director with Container Service Extension installed and running. + Provides a data source to read Kubernetes clusters from VMware Cloud Director with Container Service Extension installed and running. --- # vcd\_cse\_kubernetes\_cluster @@ -34,7 +34,7 @@ data "vcd_cse_kubernetes_cluster" "my_cluster" { ## Example Usage with Name -Sometimes using the cluster ID is not convenient, so this data source allows to use the cluster name. +Sometimes using the cluster ID is not convenient, so this data source allows using the cluster name. As VCD allows to have multiple clusters with the same name, this option must be used with caution as it will fail if there is more than one Kubernetes cluster with the same name in the same Organization: diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index f34c214ce..2c29174c3 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -230,10 +230,10 @@ Only the following arguments can be updated: * `node_health_check`: Can be turned on/off. * `operations_timeout_minutes`: Does not require modifying the existing cluster -You can also add more `worker_pool` blocks to add more worker pools to the cluster. **You can't delete Worker Pools**, but they can +You can also add more `worker_pool` blocks to add more Worker Pools to the cluster. **You can't delete Worker Pools**, but they can be scaled down to zero. -Updating any other argument will delete the existing cluster and create a new one, if the Terraform plan is applied. +Updating any other argument will delete the existing cluster and create a new one, when the Terraform plan is applied. Upgrading CSE version with `cse_version` is not supported as this operation would require human intervention, as stated [in the official documentation](https://docs.vmware.com/en/VMware-Cloud-Director-Container-Service-Extension/4.1/VMware-Cloud-Director-Container-Service-Extension-Using-Tenant-4.1/GUID-092C40B4-D0BA-4B90-813F-D36929F2F395.html). From 05bf78fd74b378010812f8b2a35b713779f58a12 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 8 Mar 2024 11:57:56 +0100 Subject: [PATCH 153/156] Add automatic import section Signed-off-by: abarreiro --- .../r/cse_kubernetes_cluster.html.markdown | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index 2c29174c3..e3e188342 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -258,9 +258,6 @@ The Kubeconfig can now be used with `kubectl` and the Kubernetes cluster can be ## Importing -~> The current implementation of Terraform import can only import resources into the state. -It does not generate configuration. [More information.](https://www.terraform.io/docs/import/) - An existing Kubernetes cluster can be [imported][docs-import] into this resource via supplying the **Cluster ID** for it. The ID can be easily obtained in VCD UI, in the CSE Kubernetes Container Clusters plugin. @@ -361,4 +358,24 @@ you can check the Kubernetes Container Clusters UI plugin, where all the availab After that, you can expand the configuration file and either update or delete the Kubernetes cluster. Running `terraform plan` at this stage will show the difference between the minimal configuration file and the Kubernetes cluster stored properties. +### Importing with Import blocks (Terraform v1.5+) + +~> Terraform warns that this procedure is considered **experimental**. Read more [here](/providers/vmware/vcd/latest/docs/guides/importing_resources) + +Given a Cluster ID, like `urn:vcloud:entity:vmware:capvcdCluster:f2d88194-3745-47ef-a6e1-5ee0bbce38f6`, you can write +the following HCL block in your Terraform configuration: + +```hcl +import { + to = vcd_cse_kubernetes_cluster.imported_cluster + id = "urn:vcloud:entity:vmware:capvcdCluster:f2d88194-3745-47ef-a6e1-5ee0bbce38f6" +} +``` + +Instead of using the suggested snippet in the section above, executing the command +`terraform plan -generate-config-out=generated_resources.tf` will generate a similar code, automatically. + +Once the code is validated, running `terraform apply` will perform the import operation and save the Kubernetes cluster +into the Terraform state. The Kubernetes cluster can now be operated with Terraform. + [docs-import]:https://www.terraform.io/docs/import/ From 1c21f8a5d7163900aba1d4b99f47c96b98525866 Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 8 Mar 2024 12:07:57 +0100 Subject: [PATCH 154/156] Explicitly put 4.1.1a Signed-off-by: abarreiro --- website/docs/d/cse_kubernetes_cluster.html.markdown | 2 +- website/docs/r/cse_kubernetes_cluster.html.markdown | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/d/cse_kubernetes_cluster.html.markdown b/website/docs/d/cse_kubernetes_cluster.html.markdown index ef4afeb7b..bd4d76e40 100644 --- a/website/docs/d/cse_kubernetes_cluster.html.markdown +++ b/website/docs/d/cse_kubernetes_cluster.html.markdown @@ -15,7 +15,7 @@ Supported in provider *v3.12+* Supports the following **Container Service Extension** versions: * 4.1.0 -* 4.1.1 +* 4.1.1 / 4.1.1a * 4.2.0 * 4.2.1 diff --git a/website/docs/r/cse_kubernetes_cluster.html.markdown b/website/docs/r/cse_kubernetes_cluster.html.markdown index e3e188342..66934d7d0 100644 --- a/website/docs/r/cse_kubernetes_cluster.html.markdown +++ b/website/docs/r/cse_kubernetes_cluster.html.markdown @@ -15,7 +15,7 @@ Supported in provider *v3.12+* Supports the following **Container Service Extension** versions: * 4.1.0 -* 4.1.1 +* 4.1.1 / 4.1.1a * 4.2.0 * 4.2.1 @@ -119,7 +119,7 @@ output "kubeconfig" { The following arguments are supported: -* `cse_version` - (Required) Specifies the CSE version to use. Accepted versions: `4.1.0`, `4.1.1`, `4.2.0` and `4.2.1` +* `cse_version` - (Required) Specifies the CSE version to use. Accepted versions: `4.1.0`, `4.1.1` (also for *4.1.1a*), `4.2.0` and `4.2.1` * `runtime` - (Optional) Specifies the Kubernetes runtime to use. Defaults to `tkg` (Tanzu Kubernetes Grid) * `name` - (Required) The name of the Kubernetes cluster. It must contain only lowercase alphanumeric characters or "-", start with an alphabetic character, end with an alphanumeric, and contain at most 31 characters From f554b841ce1315c0cc7417a2f3ed97c033cb35cf Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 8 Mar 2024 12:28:12 +0100 Subject: [PATCH 155/156] Bump SDK Signed-off-by: abarreiro --- go.mod | 4 +--- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 57615b34b..4296e215d 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/kr/pretty v0.2.1 - github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.3 + github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.5 ) require ( @@ -66,5 +66,3 @@ require ( google.golang.org/grpc v1.60.0 // indirect google.golang.org/protobuf v1.31.0 // indirect ) - -replace github.com/vmware/go-vcloud-director/v2 => github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240307084107-45b7864a5d0d diff --git a/go.sum b/go.sum index d8b6cf68b..0d1e0e9cb 100644 --- a/go.sum +++ b/go.sum @@ -4,8 +4,6 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240307084107-45b7864a5d0d h1:jwUhWCy9oL+dGts0jene38fwcs3UHtNuO3WEHMq4DUA= -github.com/adambarreiro/go-vcloud-director/v2 v2.17.0-alpha.1.0.20240307084107-45b7864a5d0d/go.mod h1:NyNcb2ymhrzwv4GyYXyYOm1NbqRwGNxDWn90AtWniXc= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -144,6 +142,8 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.5 h1:HL3T2EGsR8zaTbBbAPIzqQON7x9GQ+USKuz88I1N3cY= +github.com/vmware/go-vcloud-director/v2 v2.23.0-alpha.5/go.mod h1:NyNcb2ymhrzwv4GyYXyYOm1NbqRwGNxDWn90AtWniXc= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= From 1d6302f93a6f67f9be01e71f5b21c68f471615af Mon Sep 17 00:00:00 2001 From: abarreiro Date: Fri, 8 Mar 2024 12:33:41 +0100 Subject: [PATCH 156/156] Fix test Signed-off-by: abarreiro --- vcd/datasource_vcd_version_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vcd/datasource_vcd_version_test.go b/vcd/datasource_vcd_version_test.go index b3a925a96..dcc411624 100644 --- a/vcd/datasource_vcd_version_test.go +++ b/vcd/datasource_vcd_version_test.go @@ -81,7 +81,7 @@ func TestAccVcdVersion(t *testing.T) { }, { Config: step2, - ExpectError: regexp.MustCompile(`the VCD version doesn't match the version constraint '>= 99.99.99'`), + ExpectError: regexp.MustCompile(fmt.Sprintf(`the VCD version '%s' doesn't match the version constraint '>= 99.99.99'`, currentVersion)), }, { Config: step3,