From 9a7fb65aaa3fa4510843de18b6298bbac3cc2e0e Mon Sep 17 00:00:00 2001 From: Fabian Mettler Date: Sun, 17 Dec 2023 00:46:05 +0100 Subject: [PATCH] storage: Introduce Storage Bucket management Signed-off-by: Fabian Mettler --- docs/resources/storage_bucket.md | 81 +++++ docs/resources/storage_bucket_key.md | 90 +++++ internal/provider/provider.go | 2 + internal/storage/resource_storage_bucket.go | 331 +++++++++++++++++ .../storage/resource_storage_bucket_key.go | 342 ++++++++++++++++++ .../resource_storage_bucket_key_test.go | 235 ++++++++++++ .../storage/resource_storage_bucket_test.go | 161 +++++++++ 7 files changed, 1242 insertions(+) create mode 100644 docs/resources/storage_bucket.md create mode 100644 docs/resources/storage_bucket_key.md create mode 100644 internal/storage/resource_storage_bucket.go create mode 100644 internal/storage/resource_storage_bucket_key.go create mode 100644 internal/storage/resource_storage_bucket_key_test.go create mode 100644 internal/storage/resource_storage_bucket_test.go diff --git a/docs/resources/storage_bucket.md b/docs/resources/storage_bucket.md new file mode 100644 index 0000000..20cc22e --- /dev/null +++ b/docs/resources/storage_bucket.md @@ -0,0 +1,81 @@ +# incus_storage_bucket + +Manages an Incus storage bucket. + +## Example Usage + +```hcl +resource "incus_storage_pool" "pool1" { + name = "mypool" + driver = "zfs" +} + +resource "incus_storage_bucket" "bucket1" { + name = "mybucket" + pool = incus_storage_pool.pool1.name +} +``` + +## Argument Reference + +* `name` - **Required** - Name of the storage bucket. + +* `pool` - **Required** - Name of storage pool to host the storage bucket. + +* `description` - *Optional* - Description of the storage bucket. + +* `config` - *Optional* - Map of key/value pairs of + [storage bucket config settings](https://linuxcontainers.org/incus/docs/main/howto/storage_buckets/#configure-storage-bucket-settings). + Config settings vary depending on the Storage Pool used. + +* `project` - *Optional* - Name of the project where the storage bucket will be stored. + +* `remote` - *Optional* - The remote in which the resource will be created. If + not provided, the provider's default remote will be used. + +* `target` - *Optional* - Specify a target node in a cluster. + + +## Attribute Reference + +The following attributes are exported: + +* `location` - Name of the node where storage bucket was created. It could be useful with Incus in cluster mode. + +## Importing + +Import ID syntax: `[:][]//` + +* `` - *Optional* - Remote name. +* `` - *Optional* - Project name. +* `` - **Required** - Storage pool name. +* `` - **Required** - Storage bucket name. + +### Import example + +Example using terraform import command: + +```shell +$ terraform import incus_storage_bucket.bucket1 proj/pool1/bucket1 +``` + +Example using the import block (only available in Terraform v1.5.0 and later): + +```hcl +resource "incus_storage_bucket" "mybucket" { + name = "bucket1" + pool = "pool1" + project = "proj" +} + +import { + to = incus_storage_bucket.mybucket + id = "proj/pool1/mybucket" +} +``` + +## Notes + +* Incus creates by default for each storage bucket an admin access key + and a secret key. This key can be imported using the `incus_storage_bucket_key` resource. + diff --git a/docs/resources/storage_bucket_key.md b/docs/resources/storage_bucket_key.md new file mode 100644 index 0000000..43ebbcb --- /dev/null +++ b/docs/resources/storage_bucket_key.md @@ -0,0 +1,90 @@ +# incus_storage_bucket_key + +Manages an Incus storage bucket key. + +~> **Note:** The exported attributes `access_key` and `secret_key` will be stored in the raw state as plain-text. [Read more about sensitive data in state](https://www.terraform.io/language/state/sensitive-data). + +## Example Usage + +```hcl +resource "incus_storage_pool" "pool1" { + name = "mypool" + driver = "zfs" +} + +resource "incus_storage_bucket" "bucket1" { + name = "mybucket" + pool = incus_storage_pool.pool1.name +} + +resource "incus_storage_bucket_key" "key1" { + name = "mykey" + pool = incus_storage_bucket.bucket1.pool + storage_bucket = incus_storage_bucket.bucket1.name +} +``` + +## Argument Reference + +* `name` - **Required** - Name of the storage bucket key. + +* `pool` - **Required** - Name of storage pool to host the storage bucket key. + +* `storage_bucket` - **Required** - Name of the storage bucket. + +* `description` - *Optional* - Description of the storage bucket key. + +* `role` - *Optional* - Name of the role that controls the access rights for the + key. If not specified, the default role is used, as described in the [official documentation](https://linuxcontainers.org/incus/docs/main/howto/storage_buckets/#manage-storage-bucket-keys). + +* `project` - *Optional* - Name of the project where the storage bucket key will be stored. + +* `remote` - *Optional* - The remote in which the resource will be created. If + not provided, the provider's default remote will be used. + + +## Attribute Reference + +The following attributes are exported: + +* `access_key` - Access key of the storage bucket key. + +* `secret_key` - Secret key of the storage bucket key. + +## Importing + +Import ID syntax: `[:][]///` + +* `` - *Optional* - Remote name. +* `` - *Optional* - Project name. +* `` - **Required** - Storage pool name. +* `` - **Required** - Storage bucket name. + +### Import example + +Example using terraform import command: + +```shell +$ terraform import incus_storage_bucket_key.key1 proj/pool1/bucket1/key1 +``` + +Example using the import block (only available in Terraform v1.5.0 and later): + +```hcl +resource "incus_storage_bucket_key" "mykey" { + name = "mykey" + project = "proj" + pool = "pool1" + storage_bucket = "bucket1" +} + +import { + to = incus_storage_bucket.mykey + id = "proj/pool1/bucket1/mykey" +} +``` + +## Notes + +* Incus creates by default for each storage bucket an admin access key + and a secret key. This key can be imported using the resource. diff --git a/internal/provider/provider.go b/internal/provider/provider.go index cbd87db..67c18be 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -274,6 +274,8 @@ func (p *IncusProvider) Resources(_ context.Context) []func() resource.Resource storage.NewStoragePoolResource, storage.NewStorageVolumeResource, storage.NewStorageVolumeCopyResource, + storage.NewStorageBucketResource, + storage.NewStorageBucketKeyResource, } } diff --git a/internal/storage/resource_storage_bucket.go b/internal/storage/resource_storage_bucket.go new file mode 100644 index 0000000..7352b64 --- /dev/null +++ b/internal/storage/resource_storage_bucket.go @@ -0,0 +1,331 @@ +package storage + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/mapdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + incus "github.com/lxc/incus/client" + "github.com/lxc/incus/shared/api" + "github.com/lxc/terraform-provider-incus/internal/common" + "github.com/lxc/terraform-provider-incus/internal/errors" + provider_config "github.com/lxc/terraform-provider-incus/internal/provider-config" +) + +type StorageBucketModel struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Pool types.String `tfsdk:"pool"` + Project types.String `tfsdk:"project"` + Target types.String `tfsdk:"target"` + Remote types.String `tfsdk:"remote"` + Config types.Map `tfsdk:"config"` + + // Computed. + Location types.String `tfsdk:"location"` +} + +// StorageBucketResource represent Incus storage bucket resource. +type StorageBucketResource struct { + provider *provider_config.IncusProviderConfig +} + +// NewStorageBucketResource return a new storage bucket resource. +func NewStorageBucketResource() resource.Resource { + return &StorageBucketResource{} +} + +func (r StorageBucketResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = fmt.Sprintf("%s_storage_bucket", req.ProviderTypeName) +} + +func (r StorageBucketResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "description": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(""), + }, + + "pool": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "project": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + + "remote": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "target": schema.StringAttribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplaceIfConfigured(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + + "config": schema.MapAttribute{ + Optional: true, + Computed: true, + ElementType: types.StringType, + Default: mapdefault.StaticValue(types.MapValueMust(types.StringType, map[string]attr.Value{})), + }, + + // Computed. + + "location": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +func (r *StorageBucketResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + data := req.ProviderData + if data == nil { + return + } + + provider, ok := data.(*provider_config.IncusProviderConfig) + if !ok { + resp.Diagnostics.Append(errors.NewProviderDataTypeError(req.ProviderData)) + return + } + + r.provider = provider +} + +func (r StorageBucketResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan StorageBucketModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + target := plan.Target.ValueString() + server, err := r.provider.InstanceServer(remote, project, target) + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + config, diags := common.ToConfigMap(ctx, plan.Config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.Name.ValueString() + + bucket := api.StorageBucketsPost{ + Name: bucketName, + StorageBucketPut: api.StorageBucketPut{ + Description: plan.Description.ValueString(), + Config: config, + }, + } + + _, err = server.CreateStoragePoolBucket(poolName, bucket) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to create storage bucket %q", bucketName), err.Error()) + return + } + + // Update Terraform state. + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state StorageBucketModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + target := state.Target.ValueString() + server, err := r.provider.InstanceServer(remote, project, target) + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + diags = r.SyncState(ctx, &resp.State, server, state) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan StorageBucketModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + target := plan.Target.ValueString() + server, err := r.provider.InstanceServer(remote, project, target) + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.Name.ValueString() + _, etag, err := server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + config, diags := common.ToConfigMap(ctx, plan.Config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + newBucket := api.StorageBucketPut{ + Config: config, + Description: plan.Description.ValueString(), + } + + err = server.UpdateStoragePoolBucket(poolName, bucketName, newBucket, etag) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to update storage bucket %q", bucketName), err.Error()) + return + } + + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state StorageBucketModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := state.Pool.ValueString() + bucketName := state.Name.ValueString() + err = server.DeleteStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to delete storage bucket %q", bucketName), err.Error()) + return + } +} + +func (r StorageBucketResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + meta := common.ImportMetadata{ + ResourceName: "storage_bucket", + RequiredFields: []string{"pool", "name"}, + } + + fields, diags := meta.ParseImportID(req.ID) + print("fields: %v", fields) + if diags != nil { + resp.Diagnostics.Append(diags) + return + } + + for k, v := range fields { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(k), v)...) + } +} + +// SyncState fetches the server's current state for a storage bucket and +// updates the provided model. It then applies this updated model as the +// new state in Terraform. +func (r StorageBucketResource) SyncState(ctx context.Context, tfState *tfsdk.State, server incus.InstanceServer, m StorageBucketModel) diag.Diagnostics { + var respDiags diag.Diagnostics + + poolName := m.Pool.ValueString() + bucketName := m.Name.ValueString() + bucket, _, err := server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + if errors.IsNotFoundError(err) { + tfState.RemoveResource(ctx) + return nil + } + + respDiags.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return respDiags + } + + config, diags := common.ToConfigMapType(ctx, bucket.Config) + respDiags.Append(diags...) + + m.Name = types.StringValue(bucket.Name) + m.Location = types.StringValue(bucket.Location) + m.Description = types.StringValue(bucket.Description) + m.Config = config + + m.Target = types.StringValue("") + if server.IsClustered() || bucket.Location != "none" { + m.Target = types.StringValue(bucket.Location) + } + + if respDiags.HasError() { + return respDiags + } + + return tfState.Set(ctx, &m) +} diff --git a/internal/storage/resource_storage_bucket_key.go b/internal/storage/resource_storage_bucket_key.go new file mode 100644 index 0000000..8bbdc8b --- /dev/null +++ b/internal/storage/resource_storage_bucket_key.go @@ -0,0 +1,342 @@ +package storage + +import ( + "context" + "fmt" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + incus "github.com/lxc/incus/client" + "github.com/lxc/incus/shared/api" + "github.com/lxc/terraform-provider-incus/internal/common" + "github.com/lxc/terraform-provider-incus/internal/errors" + provider_config "github.com/lxc/terraform-provider-incus/internal/provider-config" +) + +type StorageBucketKeyModel struct { + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Pool types.String `tfsdk:"pool"` + StorageBucket types.String `tfsdk:"storage_bucket"` + Role types.String `tfsdk:"role"` + Project types.String `tfsdk:"project"` + Remote types.String `tfsdk:"remote"` + + // Computed. + AccessKey types.String `tfsdk:"access_key"` + SecretKey types.String `tfsdk:"secret_key"` +} + +// StorageBucketKeyResource represent Incus storage bucket key resource. +type StorageBucketKeyResource struct { + provider *provider_config.IncusProviderConfig +} + +// NewStorageBucketKeyResource return a new storage bucket key resource. +func NewStorageBucketKeyResource() resource.Resource { + return &StorageBucketKeyResource{} +} + +func (r StorageBucketKeyResource) Metadata(ctx context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = fmt.Sprintf("%s_storage_bucket_key", req.ProviderTypeName) +} + +// TODO: setup proper schema for storage bucket key like volume for pool! +func (r StorageBucketKeyResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "description": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString(""), + }, + + "pool": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "storage_bucket": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + "role": schema.StringAttribute{ + Optional: true, + Computed: true, + Default: stringdefault.StaticString("read-only"), + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.OneOf("admin", "read-only"), + }, + }, + + "project": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + }, + }, + + "remote": schema.StringAttribute{ + Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + + // Computed. + + "access_key": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + + "secret_key": schema.StringAttribute{ + Computed: true, + Sensitive: true, + }, + }, + } +} + +func (r *StorageBucketKeyResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + data := req.ProviderData + if data == nil { + return + } + + provider, ok := data.(*provider_config.IncusProviderConfig) + if !ok { + resp.Diagnostics.Append(errors.NewProviderDataTypeError(req.ProviderData)) + return + } + + r.provider = provider +} + +func (r StorageBucketKeyResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan StorageBucketKeyModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.StorageBucket.ValueString() + + // Ensure storage bucket exists. + _, _, err = server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + keyName := plan.Name.ValueString() + + key := api.StorageBucketKeysPost{ + StorageBucketKeyPut: api.StorageBucketKeyPut{ + Description: plan.Description.ValueString(), + Role: plan.Role.ValueString(), + }, + Name: keyName, + } + + _, err = server.CreateStoragePoolBucketKey(poolName, bucketName, key) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to create storage bucket key %q of %q", keyName, bucketName), err.Error()) + return + } + + // Update Terraform state. + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketKeyResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + var state StorageBucketKeyModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + diags = r.SyncState(ctx, &resp.State, server, state) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketKeyResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan StorageBucketKeyModel + + diags := req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := plan.Remote.ValueString() + project := plan.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := plan.Pool.ValueString() + bucketName := plan.StorageBucket.ValueString() + + // Ensure strorage bucket exists. + _, _, err = server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + keyName := plan.Name.ValueString() + key, etag, err := server.GetStoragePoolBucketKey(poolName, bucketName, keyName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return + } + + newKey := api.StorageBucketKeyPut{ + Description: plan.Description.ValueString(), + Role: plan.Role.ValueString(), + // As we do not want to update the access key and the secret key, we provide the existing values for the update. + AccessKey: key.AccessKey, + SecretKey: key.SecretKey, + } + + err = server.UpdateStoragePoolBucketKey(poolName, bucketName, keyName, newKey, etag) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to update storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return + } + + diags = r.SyncState(ctx, &resp.State, server, plan) + resp.Diagnostics.Append(diags...) +} + +func (r StorageBucketKeyResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + var state StorageBucketKeyModel + + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + remote := state.Remote.ValueString() + project := state.Project.ValueString() + server, err := r.provider.InstanceServer(remote, project, "") + if err != nil { + resp.Diagnostics.Append(errors.NewInstanceServerError(err)) + return + } + + poolName := state.Pool.ValueString() + bucketName := state.StorageBucket.ValueString() + + // Ensure storage bucket exists. + _, _, err = server.GetStoragePoolBucket(poolName, bucketName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to retrieve storage bucket %q", bucketName), err.Error()) + return + } + + keyName := state.Name.ValueString() + err = server.DeleteStoragePoolBucketKey(poolName, bucketName, keyName) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Failed to delete storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return + } +} + +func (r StorageBucketKeyResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + meta := common.ImportMetadata{ + ResourceName: "storage_bucket_key", + RequiredFields: []string{"pool", "storage_bucket", "name"}, + } + + fields, diags := meta.ParseImportID(req.ID) + if diags != nil { + resp.Diagnostics.Append(diags) + return + } + + for k, v := range fields { + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(k), v)...) + } +} + +// SyncState fetches the server's current state for a storage bucket key and +// updates the provided model. It then applies this updated model as the +// new state in Terraform. +func (r StorageBucketKeyResource) SyncState(ctx context.Context, tfState *tfsdk.State, server incus.InstanceServer, m StorageBucketKeyModel) diag.Diagnostics { + var respDiags diag.Diagnostics + + poolName := m.Pool.ValueString() + bucketName := m.StorageBucket.ValueString() + keyName := m.Name.ValueString() + key, _, err := server.GetStoragePoolBucketKey(poolName, bucketName, keyName) + if err != nil { + if errors.IsNotFoundError(err) { + tfState.RemoveResource(ctx) + return nil + } + + respDiags.AddError(fmt.Sprintf("Failed to retrieve storage bucket key %q of bucket %q", keyName, bucketName), err.Error()) + return respDiags + } + + m.Name = types.StringValue(key.Name) + m.Description = types.StringValue(key.Description) + m.Role = types.StringValue(key.Role) + m.AccessKey = types.StringValue(key.AccessKey) + m.SecretKey = types.StringValue(key.SecretKey) + + return tfState.Set(ctx, &m) +} diff --git a/internal/storage/resource_storage_bucket_key_test.go b/internal/storage/resource_storage_bucket_key_test.go new file mode 100644 index 0000000..2610697 --- /dev/null +++ b/internal/storage/resource_storage_bucket_key_test.go @@ -0,0 +1,235 @@ +package storage_test + +import ( + "fmt" + petname "github.com/dustinkirkland/golang-petname" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/lxc/terraform-provider-incus/internal/acctest" + "testing" +) + +func TestAccStorageBucketKey_basic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_basic(poolName, bucketName, keyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("incus_storage_pool.pool1", "name", poolName), + resource.TestCheckResourceAttr("incus_storage_pool.pool1", "driver", "dir"), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "pool", poolName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "name", keyName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "storage_bucket", bucketName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "pool", poolName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "role", "read-only"), + resource.TestCheckResourceAttrSet("incus_storage_bucket_key.key1", "access_key"), + resource.TestCheckResourceAttrSet("incus_storage_bucket_key.key1", "secret_key"), + ), + }, + }, + }) +} + +func TestAccStorageBucketKey_role(t *testing.T) { + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + role := "admin" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_role(bucketName, keyName, role), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "pool", "default"), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "name", keyName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "storage_bucket", bucketName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "pool", "default"), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "role", role), + resource.TestCheckResourceAttrSet("incus_storage_bucket_key.key1", "access_key"), + resource.TestCheckResourceAttrSet("incus_storage_bucket_key.key1", "secret_key"), + ), + }, + }, + }) +} + +func TestAccStorageBucketKey_project(t *testing.T) { + projectName := petname.Name() + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_project(projectName, bucketName, keyName), + Check: resource.ComposeTestCheckFunc( + + resource.TestCheckResourceAttr("incus_project.project1", "name", projectName), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "pool", "default"), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "project", projectName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "name", keyName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "storage_bucket", bucketName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "pool", "default"), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "project", projectName), + resource.TestCheckResourceAttr("incus_storage_bucket_key.key1", "role", "read-only"), + resource.TestCheckResourceAttrSet("incus_storage_bucket_key.key1", "access_key"), + resource.TestCheckResourceAttrSet("incus_storage_bucket_key.key1", "secret_key"), + ), + }, + }, + }) +} + +func TestAccStorageBucketKey_importBasic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + resourceName := "incus_storage_bucket_key.key1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_basic(poolName, bucketName, keyName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("/%s/%s/%s", poolName, bucketName, keyName), + ImportStateVerifyIdentifierAttribute: "name", + ImportState: true, + ImportStateVerify: true, + ImportStateCheck: func(states []*terraform.InstanceState) error { + if len(states) != 1 { + return fmt.Errorf("expected 1 state, got %d", len(states)) + } + state := states[0] + if state.Attributes["access_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + if state.Attributes["secret_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + return nil + }, + }, + }, + }) +} + +func TestAccStorageBucketKey_importProject(t *testing.T) { + projectName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + keyName := petname.Generate(2, "-") + resourceName := "incus_storage_bucket_key.key1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucketKey_project(projectName, bucketName, keyName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/default/%s/%s", projectName, bucketName, keyName), + ImportStateVerifyIdentifierAttribute: "name", + ImportStateVerify: true, + ImportState: true, + ImportStateCheck: func(states []*terraform.InstanceState) error { + if len(states) != 1 { + return fmt.Errorf("expected 1 state, got %d", len(states)) + } + state := states[0] + if state.Attributes["access_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + if state.Attributes["secret_key"] == "" { + return fmt.Errorf("expected access_key to be set") + } + + return nil + }, + }, + }, + }) +} + +func testAccStorageBucketKey_basic(poolName string, bucketName string, keyName string) string { + return fmt.Sprintf(` +resource "incus_storage_pool" "pool1" { + name = "%s" + driver = "dir" +} + +resource "incus_storage_bucket" "bucket1" { + name = "%s" + pool = incus_storage_pool.pool1.name +} + +resource "incus_storage_bucket_key" "key1" { + name = "%s" + pool = incus_storage_bucket.bucket1.pool + storage_bucket = incus_storage_bucket.bucket1.name +} + `, poolName, bucketName, keyName) +} + +func testAccStorageBucketKey_role(bucketName string, keyName string, role string) string { + return fmt.Sprintf(` +resource "incus_storage_bucket" "bucket1" { + name = "%s" + pool = "default" +} + +resource "incus_storage_bucket_key" "key1" { + name = "%s" + pool = incus_storage_bucket.bucket1.pool + storage_bucket = incus_storage_bucket.bucket1.name + role = "%s" +} + `, bucketName, keyName, role) +} + +func testAccStorageBucketKey_project(projectName string, bucketName string, keyName string) string { + return fmt.Sprintf(` +resource "incus_project" "project1" { + name = "%s" + config = { + "features.storage.volumes" = false + } +} + +resource "incus_storage_bucket" "bucket1" { + name = "%s" + pool = "default" + project = incus_project.project1.name +} + +resource "incus_storage_bucket_key" "key1" { + name = "%s" + project = incus_storage_bucket.bucket1.project + pool = incus_storage_bucket.bucket1.pool + storage_bucket = incus_storage_bucket.bucket1.name +} + `, projectName, bucketName, keyName) +} diff --git a/internal/storage/resource_storage_bucket_test.go b/internal/storage/resource_storage_bucket_test.go new file mode 100644 index 0000000..36a250f --- /dev/null +++ b/internal/storage/resource_storage_bucket_test.go @@ -0,0 +1,161 @@ +package storage_test + +import ( + "fmt" + petname "github.com/dustinkirkland/golang-petname" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/lxc/terraform-provider-incus/internal/acctest" + "testing" +) + +func TestAccStorageBucket_basic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(poolName, bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("incus_storage_pool.pool1", "name", poolName), + resource.TestCheckResourceAttr("incus_storage_pool.pool1", "driver", "dir"), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "pool", poolName), + ), + }, + }, + }) +} + +func TestAccStorageBucket_target(t *testing.T) { + bucketName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckClustering(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_target(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "name", bucketName), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "pool", "default"), + ), + }, + }, + }) +} + +func TestAccStorageBucket_project(t *testing.T) { + projectName := petname.Name() + bucketName := petname.Generate(2, "-") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_project(projectName, bucketName), + Check: resource.ComposeTestCheckFunc( + + resource.TestCheckResourceAttr("incus_project.project1", "name", projectName), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "pool", "default"), + resource.TestCheckResourceAttr("incus_storage_bucket.bucket1", "project", projectName), + ), + }, + }, + }) +} + +func TestAccStorageBucket_importBasic(t *testing.T) { + poolName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + resourceName := "incus_storage_bucket.bucket1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(poolName, bucketName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("/%s/%s", poolName, bucketName), + ImportStateVerifyIdentifierAttribute: "name", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageBucket_importProject(t *testing.T) { + projectName := petname.Generate(2, "-") + bucketName := petname.Generate(2, "-") + resourceName := "incus_storage_bucket.bucket1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ProtoV6ProviderFactories: acctest.ProtoV6ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_project(projectName, bucketName), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s/default/%s", projectName, bucketName), + ImportStateVerifyIdentifierAttribute: "name", + ImportStateVerify: true, + ImportState: true, + }, + }, + }) +} + +func testAccStorageBucket_basic(poolName string, bucketName string) string { + return fmt.Sprintf(` +resource "incus_storage_pool" "pool1" { + name = "%s" + driver = "dir" +} + +resource "incus_storage_bucket" "bucket1" { + name = "%s" + pool = incus_storage_pool.pool1.name +} + `, poolName, bucketName) +} + +func testAccStorageBucket_target(bucketName string) string { + return fmt.Sprintf(` +resource "incus_storage_bucket" "bucket1" { + name = "%s" + pool = "default" + target = "node-2" +} + `, bucketName) +} + +func testAccStorageBucket_project(projectName string, bucketName string) string { + return fmt.Sprintf(` +resource "incus_project" "project1" { + name = "%s" + config = { + "features.storage.volumes" = false + } +} + +resource "incus_storage_bucket" "bucket1" { + name = "%s" + pool = "default" + project = incus_project.project1.name +} + `, projectName, bucketName) +}