diff --git a/docs/data-sources/migration.md b/docs/data-sources/migration.md
index f837cca..be2f84d 100644
--- a/docs/data-sources/migration.md
+++ b/docs/data-sources/migration.md
@@ -24,11 +24,13 @@ data "atlas_migration" "hello" {
### Required
-- `dir` (String) Select migration directory using URL format
- `url` (String, Sensitive) [driver://username:password@address/dbname?param=value] select a resource using the URL format
### Optional
+- `cloud` (Block, Optional) (see [below for nested schema](#nestedblock--cloud))
+- `dir` (String) Select migration directory using URL format
+- `remote_dir` (Block, Optional) (see [below for nested schema](#nestedblock--remote_dir))
- `revisions_schema` (String) The name of the schema the revisions table resides in
### Read-Only
@@ -38,3 +40,21 @@ data "atlas_migration" "hello" {
- `latest` (String) The latest version of the migration is in the migration directory
- `next` (String) Next migration version
- `status` (String) The Status of migration (OK, PENDING)
+
+
+### Nested Schema for `cloud`
+
+Optional:
+
+- `project` (String)
+- `token` (String)
+- `url` (String)
+
+
+
+### Nested Schema for `remote_dir`
+
+Optional:
+
+- `name` (String) The name of the remote directory. This attribute is required when remote_dir is set
+- `tag` (String) The tag of the remote directory
diff --git a/docs/index.md b/docs/index.md
index 5528d75..f760d99 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -35,4 +35,14 @@ resource "atlas_schema" "market" {
### Optional
+- `cloud` (Block, Optional) (see [below for nested schema](#nestedblock--cloud))
- `dev_url` (String, Sensitive) The URL of the dev database. This configuration is shared for all resources if there is no config on the resource.
+
+
+### Nested Schema for `cloud`
+
+Optional:
+
+- `project` (String)
+- `token` (String)
+- `url` (String)
diff --git a/docs/resources/migration.md b/docs/resources/migration.md
index a17ac13..3600cdb 100644
--- a/docs/resources/migration.md
+++ b/docs/resources/migration.md
@@ -30,13 +30,15 @@ resource "atlas_migration" "hello" {
### Required
-- `dir` (String) the URL of the migration directory, by default it is file://migrations, e.g a directory named migrations in the current working directory.
- `url` (String, Sensitive) The url of the database see https://atlasgo.io/cli/url
### Optional
+- `cloud` (Block, Optional) (see [below for nested schema](#nestedblock--cloud))
- `dev_url` (String, Sensitive) The url of the dev-db see https://atlasgo.io/cli/url
+- `dir` (String) the URL of the migration directory. dir or remote_dir block is required
- `env_name` (String) The name of the environment used for reporting runs to Atlas Cloud. Default: tf
+- `remote_dir` (Block, Optional) (see [below for nested schema](#nestedblock--remote_dir))
- `revisions_schema` (String) The name of the schema the revisions table resides in
- `version` (String) The version of the migration to apply, if not specified the latest version will be applied
@@ -45,6 +47,25 @@ resource "atlas_migration" "hello" {
- `id` (String) The ID of this resource
- `status` (Object) The status of the migration (see [below for nested schema](#nestedatt--status))
+
+### Nested Schema for `cloud`
+
+Optional:
+
+- `project` (String)
+- `token` (String)
+- `url` (String)
+
+
+
+### Nested Schema for `remote_dir`
+
+Optional:
+
+- `name` (String) The name of the remote directory. This attribute is required when remote_dir is set
+- `tag` (String) The tag of the remote directory
+
+
### Nested Schema for `status`
diff --git a/internal/provider/atlas_migration_data_source.go b/internal/provider/atlas_migration_data_source.go
index 5ede98e..40eb964 100644
--- a/internal/provider/atlas_migration_data_source.go
+++ b/internal/provider/atlas_migration_data_source.go
@@ -20,16 +20,23 @@ type (
}
// MigrationDataSourceModel describes the data source data model.
MigrationDataSourceModel struct {
- DirURL types.String `tfsdk:"dir"`
URL types.String `tfsdk:"url"`
RevisionsSchema types.String `tfsdk:"revisions_schema"`
+ DirURL types.String `tfsdk:"dir"`
+ Cloud *AtlasCloudBlock `tfsdk:"cloud"`
+ RemoteDir *RemoteDirBlock `tfsdk:"remote_dir"`
+
Status types.String `tfsdk:"status"`
Current types.String `tfsdk:"current"`
Next types.String `tfsdk:"next"`
Latest types.String `tfsdk:"latest"`
ID types.String `tfsdk:"id"`
}
+ RemoteDirBlock struct {
+ Name types.String `tfsdk:"name"`
+ Tag types.String `tfsdk:"tag"`
+ }
)
// Ensure provider defined types fully satisfy framework interfaces
@@ -38,8 +45,20 @@ var (
_ datasource.DataSourceWithConfigure = &MigrationDataSource{}
)
var (
- latestVersion = "Already at latest version"
- noMigration = "No migration applied yet"
+ latestVersion = "Already at latest version"
+ noMigration = "No migration applied yet"
+ remoteDirBlock = schema.SingleNestedBlock{
+ Attributes: map[string]schema.Attribute{
+ "name": schema.StringAttribute{
+ Description: "The name of the remote directory. This attribute is required when remote_dir is set",
+ Optional: true,
+ },
+ "tag": schema.StringAttribute{
+ Description: "The tag of the remote directory",
+ Optional: true,
+ },
+ },
+ }
)
// NewMigrationDataSource returns a new AtlasSchemaDataSource.
@@ -61,6 +80,10 @@ func (d *MigrationDataSource) Configure(ctx context.Context, req datasource.Conf
func (d *MigrationDataSource) Schema(_ context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) {
resp.Schema = schema.Schema{
Description: "Data source returns the information about the current migration.",
+ Blocks: map[string]schema.Block{
+ "cloud": cloudBlock,
+ "remote_dir": remoteDirBlock,
+ },
Attributes: map[string]schema.Attribute{
"url": schema.StringAttribute{
Description: "[driver://username:password@address/dbname?param=value] select a resource using the URL format",
@@ -69,7 +92,7 @@ func (d *MigrationDataSource) Schema(_ context.Context, _ datasource.SchemaReque
},
"dir": schema.StringAttribute{
Description: "Select migration directory using URL format",
- Required: true,
+ Optional: true,
},
"revisions_schema": schema.StringAttribute{
Description: "The name of the schema the revisions table resides in",
@@ -115,7 +138,7 @@ func (d *MigrationDataSource) Read(ctx context.Context, req datasource.ReadReque
}
defer os.RemoveAll(dir)
cfgPath := filepath.Join(dir, "atlas.hcl")
- if err := data.AtlasHCL(cfgPath); err != nil {
+ if err := data.AtlasHCL(cfgPath, d.cloud); err != nil {
resp.Diagnostics.AddError("Generate config failure",
fmt.Sprintf("Failed to write configuration file: %s", err.Error()))
return
@@ -140,7 +163,7 @@ func (d *MigrationDataSource) Read(ctx context.Context, req datasource.ReadReque
data.Next = types.StringValue(r.Next)
}
v := r.LatestVersion()
- data.ID = data.DirURL
+ data.ID = dirToID(data.RemoteDir, data.DirURL)
if v == "" {
data.Latest = types.StringNull()
} else {
@@ -149,11 +172,28 @@ func (d *MigrationDataSource) Read(ctx context.Context, req datasource.ReadReque
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
-func (d *MigrationDataSourceModel) AtlasHCL(path string) error {
+func (d *MigrationDataSourceModel) AtlasHCL(path string, cloud *AtlasCloudBlock) error {
cfg := templateData{
URL: d.URL.ValueString(),
DirURL: d.DirURL.ValueStringPointer(),
RevisionsSchema: d.RevisionsSchema.ValueString(),
}
+ if d.Cloud != nil && d.Cloud.Token.ValueString() != "" {
+ // Use the data source cloud block if it is set
+ cloud = d.Cloud
+ }
+ if cloud != nil {
+ cfg.Cloud = &cloudConfig{
+ Token: cloud.Token.ValueString(),
+ Project: cloud.Project.ValueStringPointer(),
+ URL: cloud.URL.ValueStringPointer(),
+ }
+ }
+ if d := d.RemoteDir; d != nil {
+ cfg.RemoteDir = &remoteDir{
+ Name: d.Name.ValueString(),
+ Tag: d.Tag.ValueStringPointer(),
+ }
+ }
return cfg.CreateFile(path)
}
diff --git a/internal/provider/atlas_migration_data_source_test.go b/internal/provider/atlas_migration_data_source_test.go
index df9ac98..69b8a6a 100644
--- a/internal/provider/atlas_migration_data_source_test.go
+++ b/internal/provider/atlas_migration_data_source_test.go
@@ -1,11 +1,20 @@
package provider_test
import (
+ "encoding/base64"
+ "encoding/json"
"fmt"
+ "io"
+ "net/http"
+ "net/http/httptest"
+ "path/filepath"
"regexp"
+ "strings"
"testing"
+ "ariga.io/atlas/sql/migrate"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+ "github.com/stretchr/testify/require"
)
func TestAccMigrationDataSource(t *testing.T) {
@@ -23,7 +32,7 @@ func TestAccMigrationDataSource(t *testing.T) {
}
`, mysqlURL, schema),
Check: resource.ComposeAggregateTestCheckFunc(
- resource.TestCheckResourceAttr("data.atlas_migration.hello", "id", "migrations?format=atlas"),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "id", "file://migrations?format=atlas"),
resource.TestCheckResourceAttr("data.atlas_migration.hello", "status", "PENDING"),
resource.TestCheckResourceAttr("data.atlas_migration.hello", "current", ""),
resource.TestCheckResourceAttr("data.atlas_migration.hello", "next", "20221101163823"),
@@ -49,3 +58,91 @@ func TestAccMigrationDataSource(t *testing.T) {
},
})
}
+
+func TestAccMigrationDataSource_RemoteDir(t *testing.T) {
+ var (
+ dir = migrate.MemDir{}
+ dbURL = fmt.Sprintf("sqlite://%s?_fk=true", filepath.Join(t.TempDir(), "sqlite.db"))
+ srv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var m struct {
+ Query string `json:"query"`
+ Variables struct {
+ Input json.RawMessage `json:"input"`
+ } `json:"variables"`
+ }
+ require.NoError(t, json.NewDecoder(r.Body).Decode(&m))
+ switch {
+ case strings.Contains(m.Query, "query"):
+ writeDir(t, &dir, w)
+ default:
+ t.Fatalf("unexpected query: %s", m.Query)
+ }
+ }))
+ config = fmt.Sprintf(`
+data "atlas_migration" "hello" {
+ url = "%s"
+ remote_dir {
+ name = "test"
+ }
+ cloud {
+ token = "aci_bearer_token"
+ url = "%s"
+ }
+}`, dbURL, srv.URL)
+ )
+ t.Cleanup(srv.Close)
+
+ t.Run("NoPendingFiles", func(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "id", "remote_dir://test"),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "status", "OK"),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "current", "No migration applied yet"),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "next", ""),
+ resource.TestCheckNoResourceAttr("data.atlas_migration.hello", "latest"),
+ ),
+ },
+ },
+ })
+ })
+
+ t.Run("WithFiles", func(t *testing.T) {
+ require.NoError(t, dir.WriteFile("1.sql", []byte("create table foo (id int)")))
+ require.NoError(t, dir.WriteFile("2.sql", []byte("create table bar (id int)")))
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "id", "remote_dir://test"),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "status", "PENDING"),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "current", ""),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "next", "1"),
+ resource.TestCheckResourceAttr("data.atlas_migration.hello", "latest", "2"),
+ ),
+ },
+ },
+ })
+ })
+}
+
+func writeDir(t *testing.T, dir migrate.Dir, w io.Writer) {
+ // Checksum before archiving.
+ hf, err := dir.Checksum()
+ require.NoError(t, err)
+ ht, err := hf.MarshalText()
+ require.NoError(t, err)
+ require.NoError(t, dir.WriteFile(migrate.HashFileName, ht))
+ // Archive and send.
+ arc, err := migrate.ArchiveDir(dir)
+ require.NoError(t, err)
+ _, err = fmt.Fprintf(w, `{"data":{"dir":{"content":%q}}}`, base64.StdEncoding.EncodeToString(arc))
+ require.NoError(t, err)
+}
diff --git a/internal/provider/atlas_migration_resource.go b/internal/provider/atlas_migration_resource.go
index 3516d98..0384589 100644
--- a/internal/provider/atlas_migration_resource.go
+++ b/internal/provider/atlas_migration_resource.go
@@ -7,7 +7,6 @@ import (
"path/filepath"
"strings"
- "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
"github.com/hashicorp/terraform-plugin-framework/attr"
"github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-framework/path"
@@ -15,7 +14,6 @@ import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
- "github.com/hashicorp/terraform-plugin-framework/schema/validator"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
@@ -35,6 +33,9 @@ type (
RevisionsSchema types.String `tfsdk:"revisions_schema"`
Version types.String `tfsdk:"version"`
+ Cloud *AtlasCloudBlock `tfsdk:"cloud"`
+ RemoteDir *RemoteDirBlock `tfsdk:"remote_dir"`
+
EnvName types.String `tfsdk:"env_name"`
Status types.Object `tfsdk:"status"`
ID types.String `tfsdk:"id"`
@@ -83,15 +84,11 @@ func (r *MigrationResource) Schema(_ context.Context, _ resource.SchemaRequest,
resp.Schema = schema.Schema{
Description: "The resource applies pending migration files on the connected database." +
"See https://atlasgo.io/",
+ Blocks: map[string]schema.Block{
+ "cloud": cloudBlock,
+ "remote_dir": remoteDirBlock,
+ },
Attributes: map[string]schema.Attribute{
- "dir": schema.StringAttribute{
- Description: "the URL of the migration directory, by default it is file://migrations, " +
- "e.g a directory named migrations in the current working directory.",
- Required: true,
- Validators: []validator.String{
- stringvalidator.LengthAtLeast(1),
- },
- },
"url": schema.StringAttribute{
Description: "The url of the database see https://atlasgo.io/cli/url",
Required: true,
@@ -106,6 +103,11 @@ func (r *MigrationResource) Schema(_ context.Context, _ resource.SchemaRequest,
Description: "The name of the schema the revisions table resides in",
Optional: true,
},
+ "dir": schema.StringAttribute{
+ Description: "the URL of the migration directory." +
+ " dir or remote_dir block is required",
+ Optional: true,
+ },
"env_name": schema.StringAttribute{
Description: "The name of the environment used for reporting runs to Atlas Cloud. Default: tf",
Optional: true,
@@ -143,7 +145,7 @@ func (r *MigrationResource) Create(ctx context.Context, req resource.CreateReque
return
}
// Only set ID when creating a new resource
- data.ID = dirToID(data.DirURL)
+ data.ID = dirToID(data.RemoteDir, data.DirURL)
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
@@ -174,7 +176,6 @@ func (r *MigrationResource) Read(ctx context.Context, req resource.ReadRequest,
resp.State.RemoveResource(ctx)
return
}
- data.ID = dirToID(data.DirURL)
data.Status = nextStatus
resp.Diagnostics.Append(resp.State.Set(ctx, &data)...)
}
@@ -206,6 +207,47 @@ func (r MigrationResource) ValidateConfig(ctx context.Context, req resource.Vali
return
}
resp.Diagnostics.Append(r.validateConfig(ctx, req.Config)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ // Validate the remote_dir block
+ switch {
+ case data.RemoteDir != nil:
+ if data.RemoteDir.Name.IsNull() {
+ resp.Diagnostics.AddError(
+ "remote_dir.name is unset",
+ "remote_dir.name is required when remote_dir is set",
+ )
+ return
+ }
+ // providerData.client is set when the provider is configured
+ if data.Cloud == nil && (r.cloud == nil && r.providerData.client != nil) {
+ resp.Diagnostics.AddError(
+ "cloud is unset",
+ "cloud is required when remote_dir is set",
+ )
+ return
+ }
+ if !data.DirURL.IsNull() {
+ resp.Diagnostics.AddError(
+ "dir is set",
+ "dir is not allowed when remote_dir is set",
+ )
+ return
+ }
+ case data.DirURL.IsNull():
+ resp.Diagnostics.AddError(
+ "dir is unset",
+ "dir is required when remote_dir is unset",
+ )
+ return
+ case len(data.DirURL.ValueString()) == 0:
+ resp.Diagnostics.AddError(
+ "dir is empty",
+ "dir is required when remote_dir is unset",
+ )
+ return
+ }
if data.Version.IsNull() {
resp.Diagnostics.AddAttributeWarning(
path.Root("version"),
@@ -242,7 +284,7 @@ func (r *MigrationResource) ModifyPlan(ctx context.Context, req resource.ModifyP
}
defer os.RemoveAll(dir)
cfgPath := filepath.Join(dir, "atlas.hcl")
- err = plan.AtlasHCL(cfgPath, r.devURL)
+ err = plan.AtlasHCL(cfgPath, r.devURL, r.cloud)
if err != nil {
resp.Diagnostics.AddError("Generate config failure",
fmt.Sprintf("Failed to create atlas.hcl: %s", err.Error()))
@@ -263,6 +305,11 @@ func (r *MigrationResource) ModifyPlan(ctx context.Context, req resource.ModifyP
} else {
plan.Version = types.StringValue(v)
}
+ // Update plan if the user didn't specify a version
+ resp.Diagnostics.Append(resp.Plan.SetAttribute(ctx, path.Root("version"), v)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
}
pendingCount, _ := report.Amount(plan.Version.ValueString())
if pendingCount == 0 {
@@ -311,7 +358,7 @@ func (r *MigrationResource) migrate(ctx context.Context, data *MigrationResource
}
defer os.RemoveAll(dir)
cfgPath := filepath.Join(dir, "atlas.hcl")
- err = data.AtlasHCL(cfgPath, r.devURL)
+ err = data.AtlasHCL(cfgPath, r.devURL, r.cloud)
if err != nil {
return diag.Diagnostics{
diag.NewErrorDiagnostic("Generate config failure",
@@ -364,7 +411,7 @@ func (r *MigrationResource) buildStatus(ctx context.Context, data *MigrationReso
}
defer os.RemoveAll(dir)
cfgPath := filepath.Join(dir, "atlas.hcl")
- err = data.AtlasHCL(cfgPath, r.devURL)
+ err = data.AtlasHCL(cfgPath, r.devURL, r.cloud)
if err != nil {
return types.ObjectNull(statusObjectAttrs), diag.Diagnostics{
diag.NewErrorDiagnostic("Generate config failure",
@@ -400,7 +447,11 @@ func (r *MigrationResource) buildStatus(ctx context.Context, data *MigrationReso
})
}
-func dirToID(dir types.String) types.String {
+// dirToID returns the ID of the resource.
+func dirToID(remoteDir *RemoteDirBlock, dir types.String) types.String {
+ if remoteDir != nil {
+ return types.StringValue(fmt.Sprintf("remote_dir://%s", remoteDir.Name.ValueString()))
+ }
return types.StringValue(fmt.Sprintf("file://%s", dir.ValueString()))
}
@@ -411,12 +462,29 @@ func defaultString(s types.String, def string) string {
return s.ValueString()
}
-func (d *MigrationResourceModel) AtlasHCL(name string, devURL string) error {
+func (d *MigrationResourceModel) AtlasHCL(name string, devURL string, cloud *AtlasCloudBlock) error {
cfg := templateData{
URL: d.URL.ValueString(),
DevURL: defaultString(d.DevURL, devURL),
DirURL: d.DirURL.ValueStringPointer(),
RevisionsSchema: d.RevisionsSchema.ValueString(),
}
+ if d.Cloud != nil && d.Cloud.Token.ValueString() != "" {
+ // Use the data source cloud block if it is set
+ cloud = d.Cloud
+ }
+ if cloud != nil {
+ cfg.Cloud = &cloudConfig{
+ Token: cloud.Token.ValueString(),
+ Project: cloud.Project.ValueStringPointer(),
+ URL: cloud.URL.ValueStringPointer(),
+ }
+ }
+ if d := d.RemoteDir; d != nil {
+ cfg.RemoteDir = &remoteDir{
+ Name: d.Name.ValueString(),
+ Tag: d.Tag.ValueStringPointer(),
+ }
+ }
return cfg.CreateFile(name)
}
diff --git a/internal/provider/atlas_migration_resource_test.go b/internal/provider/atlas_migration_resource_test.go
index 6036695..19e7dfe 100644
--- a/internal/provider/atlas_migration_resource_test.go
+++ b/internal/provider/atlas_migration_resource_test.go
@@ -2,10 +2,16 @@ package provider_test
import (
"context"
+ "encoding/json"
"fmt"
+ "net/http"
+ "net/http/httptest"
+ "path/filepath"
"regexp"
+ "strings"
"testing"
+ "ariga.io/atlas/sql/migrate"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
@@ -305,6 +311,99 @@ func TestAccMigrationResource_Dirty(t *testing.T) {
})
}
+func TestAccMigrationResource_RemoteDir(t *testing.T) {
+ var (
+ dir = migrate.MemDir{}
+ dbURL = fmt.Sprintf("sqlite://%s?_fk=true", filepath.Join(t.TempDir(), "sqlite.db"))
+ srv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var m struct {
+ Query string `json:"query"`
+ Variables struct {
+ Input json.RawMessage `json:"input"`
+ } `json:"variables"`
+ }
+ require.NoError(t, json.NewDecoder(r.Body).Decode(&m))
+ switch {
+ case strings.Contains(m.Query, "query"):
+ writeDir(t, &dir, w)
+ case strings.Contains(m.Query, "reportMigration"):
+ fmt.Fprint(w, `{"data":{"reportMigration":{"success":true}}}`)
+ default:
+ t.Fatalf("unexpected query: %s", m.Query)
+ }
+ }))
+ config = fmt.Sprintf(`
+ provider "atlas" {
+ cloud {
+ token = "aci_bearer_token"
+ url = "%[1]s"
+ project = "test"
+ }
+ }
+ data "atlas_migration" "hello" {
+ url = "%[2]s"
+ remote_dir {
+ name = "test"
+ }
+ }
+ resource "atlas_migration" "testdb" {
+ url = "%[2]s"
+ version = data.atlas_migration.hello.next
+ remote_dir {
+ name = data.atlas_migration.hello.remote_dir.name
+ tag = data.atlas_migration.hello.remote_dir.tag
+ }
+ }
+ `, srv.URL, dbURL)
+ )
+ t.Cleanup(srv.Close)
+ t.Run("NoPendingFiles", func(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "status.current", "No migration applied yet"),
+ resource.TestCheckNoResourceAttr("atlas_migration.testdb", "status.next"),
+ ),
+ },
+ },
+ })
+ })
+ t.Run("WithPendingFiles", func(t *testing.T) {
+ require.NoError(t, dir.WriteFile("1.sql", []byte("create table foo (id int)")))
+ require.NoError(t, dir.WriteFile("2.sql", []byte("create table bar (id int)")))
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: config,
+ ExpectNonEmptyPlan: true,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "id", "remote_dir://test"),
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "status.current", "1"),
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "status.latest", "2"),
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "status.next", "2"),
+ ),
+ },
+ {
+ Config: config,
+ ExpectNonEmptyPlan: true,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "id", "remote_dir://test"),
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "status.current", "2"),
+ resource.TestCheckResourceAttr("atlas_migration.testdb", "status.latest", "2"),
+ resource.TestCheckNoResourceAttr("atlas_migration.testdb", "status.next"),
+ ),
+ },
+ },
+ })
+ })
+}
+
func newFooProvider(name, resource string) func() (*schema.Provider, error) {
return func() (*schema.Provider, error) {
return &schema.Provider{
diff --git a/internal/provider/config/migrate.tmpl b/internal/provider/config/migrate.tmpl
index b63594d..ec61fe4 100644
--- a/internal/provider/config/migrate.tmpl
+++ b/internal/provider/config/migrate.tmpl
@@ -14,7 +14,9 @@ atlas {
{{ with .RemoteDir }}
data "remote_dir" "this" {
name = "{{ .Name }}"
- tag = "{{ or .Tag "latest" }}"
+{{- if .Tag }}
+ tag = "{{ .Tag }}"
+{{- end }}
}
{{- end }}
env {
diff --git a/internal/provider/provider.go b/internal/provider/provider.go
index 4bc98c5..aeaed67 100644
--- a/internal/provider/provider.go
+++ b/internal/provider/provider.go
@@ -42,17 +42,43 @@ type (
// provider is built and ran locally, and "test" when running acceptance
// testing.
version string
+ data providerData
}
// AtlasProviderModel describes the provider data model.
AtlasProviderModel struct {
// DevURL is the URL of the dev-db.
DevURL types.String `tfsdk:"dev_url"`
+ // Cloud is the Atlas Cloud configuration.
+ Cloud *AtlasCloudBlock `tfsdk:"cloud"`
+ }
+ AtlasCloudBlock struct {
+ Token types.String `tfsdk:"token"`
+ URL types.String `tfsdk:"url"`
+ Project types.String `tfsdk:"project"`
}
providerData struct {
// client is the client used to interact with the Atlas CLI.
client *atlas.Client
// devURL is the URL of the dev-db.
devURL string
+ // cloud is the Atlas Cloud configuration.
+ cloud *AtlasCloudBlock
+ }
+)
+
+var (
+ cloudBlock = schema.SingleNestedBlock{
+ Attributes: map[string]schema.Attribute{
+ "token": schema.StringAttribute{
+ Optional: true,
+ },
+ "url": schema.StringAttribute{
+ Optional: true,
+ },
+ "project": schema.StringAttribute{
+ Optional: true,
+ },
+ },
}
)
@@ -96,6 +122,9 @@ func (p *AtlasProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp
resp.Schema = schema.Schema{
Description: "The Atlas provider is used to manage your database migrations, using the DDL of Atlas.\n" +
"For documentation about Atlas, visit: https://atlasgo.io",
+ Blocks: map[string]schema.Block{
+ "cloud": cloudBlock,
+ },
Attributes: map[string]schema.Attribute{
"dev_url": schema.StringAttribute{
Description: "The URL of the dev database. This configuration is shared for all resources if there is no config on the resource.",
@@ -120,12 +149,12 @@ func (p *AtlasProvider) Configure(ctx context.Context, req provider.ConfigureReq
if resp.Diagnostics.HasError() {
return
}
- data := providerData{client: c}
+ p.data = providerData{client: c, cloud: model.Cloud}
if model != nil {
- data.devURL = model.DevURL.ValueString()
+ p.data.devURL = model.DevURL.ValueString()
}
- resp.DataSourceData = data
- resp.ResourceData = data
+ resp.DataSourceData = p.data
+ resp.ResourceData = p.data
}
// DataSources implements provider.Provider.
diff --git a/internal/provider/template.go b/internal/provider/template.go
index 5a3e0d1..f7f1759 100644
--- a/internal/provider/template.go
+++ b/internal/provider/template.go
@@ -2,8 +2,8 @@ package provider
import (
_ "embed"
- "html/template"
"os"
+ "text/template"
)
type (
diff --git a/internal/provider/template_test.go b/internal/provider/template_test.go
index 188a5a4..e522bac 100644
--- a/internal/provider/template_test.go
+++ b/internal/provider/template_test.go
@@ -10,7 +10,7 @@ import (
)
func TestTemplate(t *testing.T) {
- var update = true
+ var update = false
tests := []struct {
name string
data templateData
@@ -18,7 +18,7 @@ func TestTemplate(t *testing.T) {
{name: "token", data: templateData{
URL: "mysql://user:pass@localhost:3306/tf-db",
Cloud: &cloudConfig{
- Token: "token",
+ Token: "token+%=_-",
},
}},
{name: "cloud", data: templateData{
diff --git a/internal/provider/testdata/TestTemplate/cloud-cfg.hcl b/internal/provider/testdata/TestTemplate/cloud-cfg.hcl
index 61de208..b3142d5 100644
--- a/internal/provider/testdata/TestTemplate/cloud-cfg.hcl
+++ b/internal/provider/testdata/TestTemplate/cloud-cfg.hcl
@@ -9,7 +9,6 @@ atlas {
data "remote_dir" "this" {
name = "tf-dir"
- tag = "latest"
}
env {
name = atlas.env
diff --git a/internal/provider/testdata/TestTemplate/cloud-no-token-cfg.hcl b/internal/provider/testdata/TestTemplate/cloud-no-token-cfg.hcl
index aeb6025..42a1e25 100644
--- a/internal/provider/testdata/TestTemplate/cloud-no-token-cfg.hcl
+++ b/internal/provider/testdata/TestTemplate/cloud-no-token-cfg.hcl
@@ -2,7 +2,6 @@
data "remote_dir" "this" {
name = "tf-dir"
- tag = "latest"
}
env {
name = atlas.env
diff --git a/internal/provider/testdata/TestTemplate/token-cfg.hcl b/internal/provider/testdata/TestTemplate/token-cfg.hcl
index 1d5d33d..915d4a7 100644
--- a/internal/provider/testdata/TestTemplate/token-cfg.hcl
+++ b/internal/provider/testdata/TestTemplate/token-cfg.hcl
@@ -1,7 +1,7 @@
atlas {
cloud {
- token = "token"
+ token = "token+%=_-"
}
}