Skip to content

Commit

Permalink
move wait_till logic into function, integrate it into cluster datasou…
Browse files Browse the repository at this point in the history
…rce (IBM-Cloud#5540)

* move wait_till logic into function, integrate it into cluster datasource

* add state to doc

* review comments

---------

Co-authored-by: Zoltan Illes <[email protected]>
  • Loading branch information
z0za and Zoltan Illes authored Sep 4, 2024
1 parent f78fb64 commit 35addcb
Show file tree
Hide file tree
Showing 6 changed files with 168 additions and 111 deletions.
43 changes: 43 additions & 0 deletions ibm/service/kubernetes/data_source_ibm_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@ import (
"fmt"
"log"
"strings"
"time"

"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)

func DataSourceIBMContainerCluster() *schema.Resource {
Expand All @@ -35,6 +37,23 @@ func DataSourceIBMContainerCluster() *schema.Resource {
"ibm_container_cluster",
"name"),
},
"wait_till": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{oneWorkerNodeReady, clusterNormal}, true),
Description: "wait_till can be configured for Master Ready, One worker Ready, Ingress Ready or Normal",
},
"wait_till_timeout": {
Type: schema.TypeInt,
Optional: true,
Default: "20",
Description: "timeout for wait_till in minutes",
RequiredWith: []string{"wait_till"},
},
"state": {
Type: schema.TypeString,
Computed: true,
},
"worker_count": {
Description: "Number of workers",
Type: schema.TypeInt,
Expand Down Expand Up @@ -390,6 +409,16 @@ func dataSourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{})
if v, ok := d.GetOk("name"); ok {
name = v.(string)
}

// timeoutStage will define the timeout stage
var timeoutStage string
var timeout time.Duration = 20 * time.Minute
if v, ok := d.GetOk("wait_till"); ok {
timeoutStage = strings.ToLower(v.(string))
timeoutInt := d.Get("wait_till_timeout").(int)
timeout = time.Duration(timeoutInt) * time.Minute
}

clusterFields, err := csAPI.Find(name, targetEnv)
if err != nil {
return fmt.Errorf("[ERROR] Error retrieving cluster: %s", err)
Expand Down Expand Up @@ -434,6 +463,20 @@ func dataSourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{})
filteredAlbs := flex.FlattenAlbs(albs, filterType)

d.SetId(clusterFields.ID)

if timeoutStage != "" {
err = waitForCluster(d, timeoutStage, timeout, meta)
if err != nil {
return err
}

clusterFields, err = csAPI.Find(name, targetEnv)
if err != nil {
return fmt.Errorf("[ERROR] Error retrieving cluster after waitForCluster: %s", err)
}
}

d.Set("state", clusterFields.State)
d.Set("worker_count", clusterFields.WorkerCount)
d.Set("workers", workers)
d.Set("region", clusterFields.Region)
Expand Down
54 changes: 51 additions & 3 deletions ibm/service/kubernetes/data_source_ibm_container_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,42 @@ import (
)

func TestAccIBMContainerClusterDataSource_basic(t *testing.T) {
clusterName := fmt.Sprintf("tf-cluster-%d", acctest.RandIntRange(10, 100))
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckIBMContainerClusterDataSourceBasic(clusterName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(
"data.ibm_container_cluster.testacc_ds_cluster", "id"),
resource.TestCheckResourceAttr(
"data.ibm_container_cluster.testacc_ds_cluster", "state", "deploying"),
),
},
{
Config: testAccCheckIBMContainerClusterDataSourceBasic_update(clusterName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(
"data.ibm_container_cluster.testacc_ds_cluster", "id"),
resource.TestCheckResourceAttr(
"data.ibm_container_cluster.testacc_ds_cluster", "state", "normal"),
),
},
},
})
}

func TestAccIBMContainerClusterDataSourceBindServiceBasic(t *testing.T) {
clusterName := fmt.Sprintf("tf-cluster-%d", acctest.RandIntRange(10, 100))
serviceName := fmt.Sprintf("tf-cluster-%d", acctest.RandIntRange(10, 100))
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
Steps: []resource.TestStep{
{
Config: testAccCheckIBMContainerClusterDataSource(clusterName, serviceName),
Config: testAccCheckIBMContainerClusterDataSourceBindServiceBasic(clusterName, serviceName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttrSet(
"data.ibm_container_cluster.testacc_ds_cluster", "id"),
Expand Down Expand Up @@ -56,14 +84,34 @@ func testAccIBMClusterVlansCheck(n string) resource.TestCheckFunc {
return nil
}
}
func testAccCheckIBMContainerClusterDataSource(clusterName, serviceName string) string {

func testAccCheckIBMContainerClusterDataSourceBasic(clusterName string) string {
return testAccCheckIBMContainerClusterBasic(clusterName, "IngressReady") + `
data "ibm_container_cluster" "testacc_ds_cluster" {
cluster_name_id = ibm_container_cluster.testacc_cluster.id
list_bounded_services = "false"
}
`
}

func testAccCheckIBMContainerClusterDataSourceBasic_update(clusterName string) string {
return testAccCheckIBMContainerClusterBasic(clusterName, "IngressReady") + `
data "ibm_container_cluster" "testacc_ds_cluster" {
cluster_name_id = ibm_container_cluster.testacc_cluster.id
list_bounded_services = "false"
wait_till = "normal"
}
`
}

func testAccCheckIBMContainerClusterDataSourceBindServiceBasic(clusterName, serviceName string) string {
return testAccCheckIBMContainerBindServiceBasic(clusterName, serviceName) + `
data "ibm_container_cluster" "testacc_ds_cluster" {
cluster_name_id = ibm_container_cluster.testacc_cluster.id
}
data "ibm_container_bind_service" "bind_service" {
cluster_name_id = ibm_container_bind_service.bind_service.cluster_name_id
service_instance_id = ibm_container_bind_service.bind_service.service_instance_id
service_instance_id = ibm_container_bind_service.bind_service.service_instance_id
namespace_id = "default"
}
`
Expand Down
131 changes: 36 additions & 95 deletions ibm/service/kubernetes/resource_ibm_container_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -701,26 +701,15 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{})
}
}

_, err = waitForClusterMasterAvailable(d, meta)
_, err = waitForClusterMasterAvailable(d, meta, d.Timeout(schema.TimeoutCreate))
if err != nil {
return err
}
waitForState := strings.ToLower(d.Get("wait_till").(string))

switch waitForState {
case strings.ToLower(oneWorkerNodeReady):
_, err = waitForClusterOneWorkerAvailable(d, meta)
if err != nil {
return err
}

case strings.ToLower(clusterNormal):
pendingStates := []string{clusterDeploying, clusterRequested, clusterPending, clusterDeployed, clusterCritical, clusterWarning}
_, err = waitForClusterState(d, meta, waitForState, pendingStates)
if err != nil {
return err
}

timeoutStage := strings.ToLower(d.Get("wait_till").(string))
err = waitForCluster(d, timeoutStage, d.Timeout(schema.TimeoutCreate), meta)
if err != nil {
return err
}

d.Set("force_delete_storage", d.Get("force_delete_storage").(bool))
Expand Down Expand Up @@ -759,6 +748,31 @@ func resourceIBMContainerClusterCreate(d *schema.ResourceData, meta interface{})
return resourceIBMContainerClusterUpdate(d, meta)
}

func waitForCluster(d *schema.ResourceData, timeoutStage string, timeout time.Duration, meta interface{}) error {
switch timeoutStage {
case strings.ToLower(masterNodeReady):
_, err := waitForClusterMasterAvailable(d, meta, timeout)
if err != nil {
return err
}

case strings.ToLower(oneWorkerNodeReady):
_, err := waitForClusterOneWorkerAvailable(d, meta, timeout)
if err != nil {
return err
}

case clusterNormal:
pendingStates := []string{clusterDeploying, clusterRequested, clusterPending, clusterDeployed, clusterCritical, clusterWarning}
_, err := waitForClusterState(d, meta, clusterNormal, pendingStates, timeout)
if err != nil {
return err
}
}

return nil
}

func resourceIBMContainerClusterRead(d *schema.ResourceData, meta interface{}) error {
csClient, err := meta.(conns.ClientSession).ContainerAPI()
if err != nil {
Expand Down Expand Up @@ -1275,46 +1289,8 @@ func waitForClusterDelete(d *schema.ResourceData, meta interface{}) (interface{}
return stateConf.WaitForState()
}

// WaitForClusterAvailable Waits for cluster creation
func WaitForClusterAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) {
csClient, err := meta.(conns.ClientSession).ContainerAPI()
if err != nil {
return nil, err
}
log.Printf("Waiting for cluster (%s) to be available.", d.Id())
id := d.Id()

stateConf := &resource.StateChangeConf{
Pending: []string{"retry", clusterProvisioning},
Target: []string{clusterNormal},
Refresh: clusterStateRefreshFunc(csClient.Clusters(), id, target),
Timeout: d.Timeout(schema.TimeoutCreate),
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}

return stateConf.WaitForState()
}

func clusterStateRefreshFunc(client v1.Clusters, instanceID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
clusterFields, err := client.FindWithOutShowResourcesCompatible(instanceID, target)
if err != nil {
return nil, "", fmt.Errorf("[ERROR] clusterStateRefreshFunc Error retrieving cluster: %s", err)
}
// Check active transactions
log.Println("Checking cluster")
//Check for cluster state to be normal
log.Println("Checking cluster state", strings.Compare(clusterFields.State, clusterNormal))
if strings.Compare(clusterFields.State, clusterNormal) != 0 {
return clusterFields, clusterProvisioning, nil
}
return clusterFields, clusterNormal, nil
}
}

// waitForClusterMasterAvailable Waits for cluster creation
func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) {
func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}, timeout time.Duration) (interface{}, error) {
targetEnv, err := getClusterTargetHeader(d, meta)
if err != nil {
return nil, err
Expand All @@ -1339,15 +1315,15 @@ func waitForClusterMasterAvailable(d *schema.ResourceData, meta interface{}) (in
}
return clusterFields, deployInProgress, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
Timeout: timeout,
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}

return stateConf.WaitForState()
}

func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState string, pendingState []string) (interface{}, error) {
func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState string, pendingState []string, timeout time.Duration) (interface{}, error) {
targetEnv, err := getClusterTargetHeader(d, meta)
if err != nil {
return nil, err
Expand Down Expand Up @@ -1376,7 +1352,7 @@ func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState

return cls, cls.State, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
Timeout: timeout,
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}
Expand All @@ -1385,7 +1361,7 @@ func waitForClusterState(d *schema.ResourceData, meta interface{}, waitForState
}

// waitForClusterOneWorkerAvailable Waits for cluster creation
func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}) (interface{}, error) {
func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{}, timeout time.Duration) (interface{}, error) {
targetEnv, err := getClusterTargetHeader(d, meta)
if err != nil {
return nil, err
Expand Down Expand Up @@ -1435,7 +1411,7 @@ func waitForClusterOneWorkerAvailable(d *schema.ResourceData, meta interface{})
}
return nil, normal, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
Timeout: timeout,
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}
Expand Down Expand Up @@ -1483,41 +1459,6 @@ func workerStateRefreshFunc(client v1.Workers, instanceID string, target v1.Clus
}
}

func WaitForClusterCreation(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) {
csClient, err := meta.(conns.ClientSession).ContainerAPI()
if err != nil {
return nil, err
}
log.Printf("Waiting for cluster (%s) to be available.", d.Id())
ClusterID := d.Id()

stateConf := &resource.StateChangeConf{
Pending: []string{"retry", clusterProvisioning},
Target: []string{clusterNormal},
Refresh: func() (interface{}, string, error) {
workerFields, err := csClient.Workers().List(ClusterID, target)
log.Println("Total workers: ", len(workerFields))
if err != nil {
return nil, "", fmt.Errorf("[ERROR] Error retrieving workers for cluster: %s", err)
}
log.Println("Checking workers...")
//verifying for atleast sing node to be in normal state
for _, e := range workerFields {
log.Println("Worker node status: ", e.State)
if e.State == workerNormal {
return workerFields, workerNormal, nil
}
}
return workerFields, workerProvisioning, nil
},
Timeout: d.Timeout(schema.TimeoutCreate),
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}

return stateConf.WaitForState()
}

func WaitForSubnetAvailable(d *schema.ResourceData, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) {
csClient, err := meta.(conns.ClientSession).ContainerAPI()
if err != nil {
Expand Down
20 changes: 19 additions & 1 deletion ibm/service/kubernetes/resource_ibm_container_cluster_feature.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package kubernetes
import (
"fmt"
"log"
"strings"
"time"

"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
Expand Down Expand Up @@ -326,7 +327,7 @@ func resourceIBMContainerClusterFeatureUpdate(d *schema.ResourceData, meta inter
return resourceIBMContainerClusterFeatureRead(d, meta)
}

// WaitForClusterAvailable Waits for cluster creation
// WaitForClusterAvailableForFeatureUpdate Waits for cluster creation
func WaitForClusterAvailableForFeatureUpdate(cluster string, timeout time.Duration, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) {
csClient, err := meta.(conns.ClientSession).ContainerAPI()
if err != nil {
Expand All @@ -347,6 +348,23 @@ func WaitForClusterAvailableForFeatureUpdate(cluster string, timeout time.Durati
return stateConf.WaitForState()
}

func clusterStateRefreshFunc(client v1.Clusters, instanceID string, target v1.ClusterTargetHeader) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
clusterFields, err := client.FindWithOutShowResourcesCompatible(instanceID, target)
if err != nil {
return nil, "", fmt.Errorf("[ERROR] clusterStateRefreshFunc Error retrieving cluster: %s", err)
}
// Check active transactions
log.Println("Checking cluster")
//Check for cluster state to be normal
log.Println("Checking cluster state", strings.Compare(clusterFields.State, clusterNormal))
if strings.Compare(clusterFields.State, clusterNormal) != 0 {
return clusterFields, clusterProvisioning, nil
}
return clusterFields, clusterNormal, nil
}
}

func WaitForWorkerAvailableForFeatureUpdate(cluster string, timeout time.Duration, meta interface{}, target v1.ClusterTargetHeader) (interface{}, error) {
csClient, err := meta.(conns.ClientSession).ContainerAPI()
if err != nil {
Expand Down
Loading

0 comments on commit 35addcb

Please sign in to comment.