diff --git a/internal/common/constant/cloud_region.go b/internal/common/constant/cloud_region.go new file mode 100644 index 0000000000..08b0756869 --- /dev/null +++ b/internal/common/constant/cloud_region.go @@ -0,0 +1,5 @@ +package constant + +const ( + UsWest2 = "US_WEST_2" +) diff --git a/internal/common/constant/instance_size.go b/internal/common/constant/instance_size.go new file mode 100644 index 0000000000..e24415386b --- /dev/null +++ b/internal/common/constant/instance_size.go @@ -0,0 +1,5 @@ +package constant + +const ( + M10 = "M10" +) diff --git a/internal/service/advancedcluster/resource_advanced_cluster.go b/internal/service/advancedcluster/resource_advanced_cluster.go index 5d18a609f5..0087c712b1 100644 --- a/internal/service/advancedcluster/resource_advanced_cluster.go +++ b/internal/service/advancedcluster/resource_advanced_cluster.go @@ -424,15 +424,7 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. } timeout := d.Timeout(schema.TimeoutCreate) - stateConf := &retry.StateChangeConf{ - Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"}, - Target: []string{"IDLE"}, - Refresh: resourceRefreshFunc(ctx, d.Get("name").(string), projectID, connV2), - Timeout: timeout, - MinTimeout: 1 * time.Minute, - Delay: 3 * time.Minute, - } - + stateConf := CreateStateChangeConfig(ctx, connV2, projectID, d.Get("name").(string), timeout) _, err = stateConf.WaitForStateContext(ctx) if err != nil { return diag.FromErr(fmt.Errorf(errorCreate, err)) @@ -467,6 +459,17 @@ func resourceCreate(ctx context.Context, d *schema.ResourceData, meta any) diag. return resourceRead(ctx, d, meta) } +func CreateStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { + return retry.StateChangeConf{ + Pending: []string{"CREATING", "UPDATING", "REPAIRING", "REPEATING", "PENDING"}, + Target: []string{"IDLE"}, + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), + Timeout: timeout, + MinTimeout: 1 * time.Minute, + Delay: 3 * time.Minute, + } +} + func resourceRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { connV2 := meta.(*config.MongoDBClient).AtlasV2 ids := conversion.DecodeStateID(d.Id()) @@ -763,15 +766,7 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. log.Println("[INFO] Waiting for MongoDB ClusterAdvanced to be destroyed") - stateConf := &retry.StateChangeConf{ - Pending: []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"}, - Target: []string{"DELETED"}, - Refresh: resourceRefreshFunc(ctx, clusterName, projectID, connV2), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 30 * time.Second, - Delay: 1 * time.Minute, // Wait 30 secs before starting - } - + stateConf := DeleteStateChangeConfig(ctx, connV2, projectID, clusterName, d.Timeout(schema.TimeoutDelete)) // Wait, catching any errors _, err = stateConf.WaitForStateContext(ctx) if err != nil { @@ -781,6 +776,17 @@ func resourceDelete(ctx context.Context, d *schema.ResourceData, meta any) diag. return nil } +func DeleteStateChangeConfig(ctx context.Context, connV2 *admin.APIClient, projectID, name string, timeout time.Duration) retry.StateChangeConf { + return retry.StateChangeConf{ + Pending: []string{"IDLE", "CREATING", "UPDATING", "REPAIRING", "DELETING"}, + Target: []string{"DELETED"}, + Refresh: resourceRefreshFunc(ctx, name, projectID, connV2), + Timeout: timeout, + MinTimeout: 30 * time.Second, + Delay: 1 * time.Minute, + } +} + func resourceImport(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { connV2 := meta.(*config.MongoDBClient).AtlasV2 diff --git a/internal/service/searchindex/resource_search_index_test.go b/internal/service/searchindex/resource_search_index_test.go index 230c059075..3de5f30d99 100644 --- a/internal/service/searchindex/resource_search_index_test.go +++ b/internal/service/searchindex/resource_search_index_test.go @@ -17,20 +17,20 @@ func TestAccSearchIndex_basic(t *testing.T) { func TestAccSearchIndex_withSearchType(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, nil) - indexName = acc.RandomName() - databaseName = acc.RandomName() - indexType = "search" - mappingsDynamic = "true" + projectID, clusterName = acc.ClusterNameExecution(t) + indexName = acc.RandomName() + databaseName = acc.RandomName() + indexType = "search" + mappingsDynamic = "true" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterInfo) + checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, ProtoV6ProviderFactories: acc.TestAccProviderV6Factories, CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configBasic(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, true), + Config: configBasic(projectID, indexName, databaseName, clusterName, true), Check: resource.ComposeTestCheckFunc(checks...), }, }, @@ -39,13 +39,13 @@ func TestAccSearchIndex_withSearchType(t *testing.T) { func TestAccSearchIndex_withMapping(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, nil) - indexName = acc.RandomName() - databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "false" + projectID, clusterName = acc.ClusterNameExecution(t) + indexName = acc.RandomName() + databaseName = acc.RandomName() + indexType = "" + mappingsDynamic = "false" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterInfo) + checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) checks = addAttrSetChecks(checks, "mappings_fields", "analyzers") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -53,7 +53,7 @@ func TestAccSearchIndex_withMapping(t *testing.T) { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configWithMapping(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr), + Config: configWithMapping(projectID, indexName, databaseName, clusterName), Check: resource.ComposeTestCheckFunc(checks...), }, }, @@ -62,19 +62,19 @@ func TestAccSearchIndex_withMapping(t *testing.T) { func TestAccSearchIndex_withSynonyms(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, nil) - indexName = acc.RandomName() - databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "true" - mapChecks = map[string]string{ + projectID, clusterName = acc.ClusterNameExecution(t) + indexName = acc.RandomName() + databaseName = acc.RandomName() + indexType = "" + mappingsDynamic = "true" + mapChecks = map[string]string{ "synonyms.#": "1", "synonyms.0.analyzer": "lucene.simple", "synonyms.0.name": "synonym_test", "synonyms.0.source_collection": collectionName, } ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterInfo) + checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) checks = addAttrChecks(checks, mapChecks) resource.ParallelTest(t, resource.TestCase{ @@ -83,7 +83,7 @@ func TestAccSearchIndex_withSynonyms(t *testing.T) { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configWithSynonyms(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, with), + Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, with), Check: resource.ComposeTestCheckFunc(checks...), }, }, @@ -92,13 +92,13 @@ func TestAccSearchIndex_withSynonyms(t *testing.T) { func TestAccSearchIndex_updatedToEmptySynonyms(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, nil) - indexName = acc.RandomName() - databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "true" + projectID, clusterName = acc.ClusterNameExecution(t) + indexName = acc.RandomName() + databaseName = acc.RandomName() + indexType = "" + mappingsDynamic = "true" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterInfo) + checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) checks1 := addAttrChecks(checks, map[string]string{ "synonyms.#": "1", "synonyms.0.analyzer": "lucene.simple", @@ -112,11 +112,11 @@ func TestAccSearchIndex_updatedToEmptySynonyms(t *testing.T) { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configWithSynonyms(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, with), + Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, with), Check: resource.ComposeTestCheckFunc(checks1...), }, { - Config: configWithSynonyms(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, without), + Config: configWithSynonyms(projectID, indexName, databaseName, clusterName, without), Check: resource.ComposeTestCheckFunc(checks2...), }, }, @@ -125,9 +125,9 @@ func TestAccSearchIndex_updatedToEmptySynonyms(t *testing.T) { func TestAccSearchIndex_updatedToEmptyAnalyzers(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, nil) - indexName = acc.RandomName() - databaseName = acc.RandomName() + projectID, clusterName = acc.ClusterNameExecution(t) + indexName = acc.RandomName() + databaseName = acc.RandomName() ) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -135,14 +135,14 @@ func TestAccSearchIndex_updatedToEmptyAnalyzers(t *testing.T) { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configAdditional(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, analyzersTF), + Config: configAdditional(projectID, indexName, databaseName, clusterName, analyzersTF), Check: resource.ComposeTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrWith(resourceName, "analyzers", acc.JSONEquals(analyzersJSON)), ), }, { - Config: configAdditional(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, ""), + Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), Check: resource.ComposeTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "analyzers", ""), @@ -154,9 +154,9 @@ func TestAccSearchIndex_updatedToEmptyAnalyzers(t *testing.T) { func TestAccSearchIndex_updatedToEmptyMappingsFields(t *testing.T) { var ( - clusterInfo = acc.GetClusterInfo(t, nil) - indexName = acc.RandomName() - databaseName = acc.RandomName() + projectID, clusterName = acc.ClusterNameExecution(t) + indexName = acc.RandomName() + databaseName = acc.RandomName() ) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(t) }, @@ -164,14 +164,14 @@ func TestAccSearchIndex_updatedToEmptyMappingsFields(t *testing.T) { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configAdditional(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, mappingsFieldsTF), + Config: configAdditional(projectID, indexName, databaseName, clusterName, mappingsFieldsTF), Check: resource.ComposeTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttrWith(resourceName, "mappings_fields", acc.JSONEquals(mappingsFieldsJSON)), ), }, { - Config: configAdditional(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, ""), + Config: configAdditional(projectID, indexName, databaseName, clusterName, ""), Check: resource.ComposeTestCheckFunc( checkExists(resourceName), resource.TestCheckResourceAttr(resourceName, "mappings_fields", ""), @@ -188,13 +188,13 @@ func TestAccSearchIndex_withVector(t *testing.T) { func basicTestCase(tb testing.TB) *resource.TestCase { tb.Helper() var ( - clusterInfo = acc.GetClusterInfo(tb, nil) - indexName = acc.RandomName() - databaseName = acc.RandomName() - indexType = "" - mappingsDynamic = "true" + projectID, clusterName = acc.ClusterNameExecution(tb) + indexName = acc.RandomName() + databaseName = acc.RandomName() + indexType = "" + mappingsDynamic = "true" ) - checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterInfo) + checks := commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName) return &resource.TestCase{ PreCheck: func() { acc.PreCheckBasic(tb) }, @@ -202,11 +202,11 @@ func basicTestCase(tb testing.TB) *resource.TestCase { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configBasic(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, false), + Config: configBasic(projectID, indexName, databaseName, clusterName, false), Check: resource.ComposeTestCheckFunc(checks...), }, { - Config: configBasic(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr, false), + Config: configBasic(projectID, indexName, databaseName, clusterName, false), ResourceName: resourceName, ImportStateIdFunc: importStateIDFunc(resourceName), ImportState: true, @@ -219,13 +219,13 @@ func basicTestCase(tb testing.TB) *resource.TestCase { func basicVectorTestCase(tb testing.TB) *resource.TestCase { tb.Helper() var ( - clusterInfo = acc.GetClusterInfo(tb, nil) - indexName = acc.RandomName() - indexType = "vectorSearch" - databaseName = acc.RandomName() - attributes = map[string]string{ + projectID, clusterName = acc.ClusterNameExecution(tb) + indexName = acc.RandomName() + indexType = "vectorSearch" + databaseName = acc.RandomName() + attributes = map[string]string{ "name": indexName, - "cluster_name": clusterInfo.ClusterName, + "cluster_name": clusterName, "database": databaseName, "collection_name": collectionName, "type": indexType, @@ -242,17 +242,17 @@ func basicVectorTestCase(tb testing.TB) *resource.TestCase { CheckDestroy: acc.CheckDestroySearchIndex, Steps: []resource.TestStep{ { - Config: configVector(clusterInfo.ProjectIDStr, indexName, databaseName, clusterInfo.ClusterNameStr, clusterInfo.ClusterTerraformStr), + Config: configVector(projectID, indexName, databaseName, clusterName), Check: resource.ComposeTestCheckFunc(checks...), }, }, } } -func commonChecks(indexName, indexType, mappingsDynamic, databaseName string, clusterInfo acc.ClusterInfo) []resource.TestCheckFunc { +func commonChecks(indexName, indexType, mappingsDynamic, databaseName, clusterName string) []resource.TestCheckFunc { attributes := map[string]string{ "name": indexName, - "cluster_name": clusterInfo.ClusterName, + "cluster_name": clusterName, "database": databaseName, "collection_name": collectionName, "type": indexType, @@ -291,15 +291,15 @@ func checkExists(resourceName string) resource.TestCheckFunc { } } -func configBasic(projectIDStr, indexName, databaseName, clusterNameStr, clusterTerraformStr string, explicitType bool) string { +func configBasic(projectID, indexName, databaseName, clusterName string, explicitType bool) string { var indexType string if explicitType { indexType = `type="search"` } - return clusterTerraformStr + fmt.Sprintf(` + return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q name = %[3]q database = %[4]q collection_name = %[5]q @@ -309,18 +309,18 @@ func configBasic(projectIDStr, indexName, databaseName, clusterNameStr, clusterT } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q index_id = mongodbatlas_search_index.test.index_id } - `, clusterNameStr, projectIDStr, indexName, databaseName, collectionName, searchAnalyzer, indexType) + `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, indexType) } -func configWithMapping(projectIDStr, indexName, databaseName, clusterNameStr, clusterTerraformStr string) string { - return clusterTerraformStr + fmt.Sprintf(` +func configWithMapping(projectID, indexName, databaseName, clusterName string) string { + return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q name = %[3]q database = %[4]q collection_name = %[5]q @@ -331,14 +331,14 @@ func configWithMapping(projectIDStr, indexName, databaseName, clusterNameStr, cl } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q index_id = mongodbatlas_search_index.test.index_id } - `, clusterNameStr, projectIDStr, indexName, databaseName, collectionName, searchAnalyzer, analyzersTF, mappingsFieldsTF) + `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, analyzersTF, mappingsFieldsTF) } -func configWithSynonyms(projectIDStr, indexName, databaseName, clusterNameStr, clusterTerraformStr string, has bool) string { +func configWithSynonyms(projectID, indexName, databaseName, clusterName string, has bool) string { var synonymsStr string if has { synonymsStr = fmt.Sprintf(` @@ -350,10 +350,10 @@ func configWithSynonyms(projectIDStr, indexName, databaseName, clusterNameStr, c `, collectionName) } - return clusterTerraformStr + fmt.Sprintf(` + return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q name = %[3]q database = %[4]q collection_name = %[5]q @@ -363,18 +363,18 @@ func configWithSynonyms(projectIDStr, indexName, databaseName, clusterNameStr, c } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q index_id = mongodbatlas_search_index.test.index_id } - `, clusterNameStr, projectIDStr, indexName, databaseName, collectionName, searchAnalyzer, synonymsStr) + `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, synonymsStr) } -func configAdditional(projectIDStr, indexName, databaseName, clusterNameStr, clusterTerraformStr, additional string) string { - return clusterTerraformStr + fmt.Sprintf(` +func configAdditional(projectID, indexName, databaseName, clusterName, additional string) string { + return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q name = %[3]q database = %[4]q collection_name = %[5]q @@ -382,14 +382,14 @@ func configAdditional(projectIDStr, indexName, databaseName, clusterNameStr, clu mappings_dynamic = true %[7]s } - `, clusterNameStr, projectIDStr, indexName, databaseName, collectionName, searchAnalyzer, additional) + `, clusterName, projectID, indexName, databaseName, collectionName, searchAnalyzer, additional) } -func configVector(projectIDStr, indexName, databaseName, clusterNameStr, clusterTerraformStr string) string { - return clusterTerraformStr + fmt.Sprintf(` +func configVector(projectID, indexName, databaseName, clusterName string) string { + return fmt.Sprintf(` resource "mongodbatlas_search_index" "test" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q name = %[3]q database = %[4]q collection_name = %[5]q @@ -402,11 +402,11 @@ func configVector(projectIDStr, indexName, databaseName, clusterNameStr, cluster } data "mongodbatlas_search_index" "data_index" { - cluster_name = %[1]s - project_id = %[2]s + cluster_name = %[1]q + project_id = %[2]q index_id = mongodbatlas_search_index.test.index_id } - `, clusterNameStr, projectIDStr, indexName, databaseName, collectionName, fieldsJSON) + `, clusterName, projectID, indexName, databaseName, collectionName, fieldsJSON) } func importStateIDFunc(resourceName string) resource.ImportStateIdFunc { diff --git a/internal/testutil/acc/atlas.go b/internal/testutil/acc/atlas.go index 6fc2976c89..5cca362bae 100644 --- a/internal/testutil/acc/atlas.go +++ b/internal/testutil/acc/atlas.go @@ -5,7 +5,10 @@ import ( "fmt" "os" "testing" + "time" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/common/constant" + "github.com/mongodb/terraform-provider-mongodbatlas/internal/service/advancedcluster" "github.com/stretchr/testify/require" "go.mongodb.org/atlas-sdk/v20231115008/admin" ) @@ -29,6 +32,54 @@ func deleteProject(id string) { } } +func createCluster(tb testing.TB, projectID, name string) string { + tb.Helper() + req := clusterReq(name, projectID) + _, _, err := ConnV2().ClustersApi.CreateCluster(context.Background(), projectID, &req).Execute() + require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) + + stateConf := advancedcluster.CreateStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) + _, err = stateConf.WaitForStateContext(context.Background()) + require.NoError(tb, err, "Cluster creation failed: %s, err: %s", name, err) + + return name +} + +func deleteCluster(projectID, name string) { + _, err := ConnV2().ClustersApi.DeleteCluster(context.Background(), projectID, name).Execute() + if err != nil { + fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) + } + stateConf := advancedcluster.DeleteStateChangeConfig(context.Background(), ConnV2(), projectID, name, 1*time.Hour) + _, err = stateConf.WaitForStateContext(context.Background()) + if err != nil { + fmt.Printf("Cluster deletion failed: %s %s, error: %s", projectID, name, err) + } +} + +func clusterReq(name, projectID string) admin.AdvancedClusterDescription { + return admin.AdvancedClusterDescription{ + Name: admin.PtrString(name), + GroupId: admin.PtrString(projectID), + ClusterType: admin.PtrString("REPLICASET"), + ReplicationSpecs: &[]admin.ReplicationSpec{ + { + RegionConfigs: &[]admin.CloudRegionConfig{ + { + ProviderName: admin.PtrString(constant.AWS), + RegionName: admin.PtrString(constant.UsWest2), + Priority: admin.PtrInt(7), + ElectableSpecs: &admin.HardwareSpec{ + InstanceSize: admin.PtrString(constant.M10), + NodeCount: admin.PtrInt(3), + }, + }, + }, + }, + }, + } +} + // ProjectID returns the id for a project name. // When `MONGODB_ATLAS_PROJECT_ID` is defined, it is used instead of creating a project. This is useful for local execution but not intended for CI executions. func ProjectID(tb testing.TB, name string) string { @@ -57,3 +108,16 @@ func projectIDLocal(tb testing.TB) string { tb.Logf("Using MONGODB_ATLAS_PROJECT_ID: %s", id) return id } + +func clusterNameLocal(tb testing.TB) string { + tb.Helper() + name := os.Getenv("MONGODB_ATLAS_CLUSTER_NAME") + if name == "" { + return "" + } + if InCI() { + tb.Fatal("MONGODB_ATLAS_CLUSTER_NAME can't be used in CI") + } + tb.Logf("Using MONGODB_ATLAS_CLUSTER_NAME: %s", name) + return name +} diff --git a/internal/testutil/acc/search_index.go b/internal/testutil/acc/search_index.go index 04023b5928..da8bc06d3f 100644 --- a/internal/testutil/acc/search_index.go +++ b/internal/testutil/acc/search_index.go @@ -18,7 +18,7 @@ func CheckDestroySearchIndex(state *terraform.State) error { } ids := conversion.DecodeStateID(rs.Primary.ID) searchIndex, _, err := Conn().Search.GetIndex(context.Background(), ids["project_id"], ids["cluster_name"], ids["index_id"]) - if err == nil && searchIndex != nil { + if err == nil && searchIndex != nil && searchIndex.Status != "IN_PROGRESS" { // index can be in progess for some seconds after delete is called return fmt.Errorf("index id (%s) still exists", ids["index_id"]) } } diff --git a/internal/testutil/acc/shared_resource.go b/internal/testutil/acc/shared_resource.go index ee772fbcf2..f50ddba133 100644 --- a/internal/testutil/acc/shared_resource.go +++ b/internal/testutil/acc/shared_resource.go @@ -16,6 +16,11 @@ func SetupSharedResources() func() { } func cleanupSharedResources() { + if sharedInfo.projectID != "" && sharedInfo.clusterName != "" { + fmt.Printf("Deleting execution cluster: %s, project id: %s\n", sharedInfo.clusterName, sharedInfo.projectID) + deleteCluster(sharedInfo.projectID, sharedInfo.clusterName) + } + if sharedInfo.projectID != "" { fmt.Printf("Deleting execution project: %s, id: %s\n", sharedInfo.projectName, sharedInfo.projectID) deleteProject(sharedInfo.projectID) @@ -47,9 +52,42 @@ func ProjectIDExecution(tb testing.TB) string { return sharedInfo.projectID } +// ClusterNameExecution returns the name of a created cluster for the execution of the tests in the resource package. +// This function relies on using an execution project and returns its id. +// When `MONGODB_ATLAS_CLUSTER_NAME` and `MONGODB_ATLAS_PROJECT_ID` are defined it will be used instead of creating resources. This is useful for local execution but not intended for CI executions. +func ClusterNameExecution(tb testing.TB) (projectID, clusterName string) { + tb.Helper() + SkipInUnitTest(tb) + require.True(tb, sharedInfo.init, "SetupSharedResources must called from TestMain test package") + + localProjectID := projectIDLocal(tb) + localClusterName := clusterNameLocal(tb) + if localProjectID != "" && localClusterName != "" { + return localProjectID, localClusterName + } + + // before locking for cluster creation we need to ensure we have an execution project created + if sharedInfo.projectID == "" { + _ = ProjectIDExecution(tb) + } + + sharedInfo.mu.Lock() + defer sharedInfo.mu.Unlock() + + // lazy creation so it's only done if really needed + if sharedInfo.clusterName == "" { + name := RandomClusterName() + tb.Logf("Creating execution cluster: %s\n", name) + sharedInfo.clusterName = createCluster(tb, sharedInfo.projectID, name) + } + + return sharedInfo.projectID, sharedInfo.clusterName +} + var sharedInfo = struct { projectID string projectName string + clusterName string mu sync.Mutex init bool }{}