diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index c62bb962..f74cffe4 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -7,7 +7,7 @@ Contributions are what make the open source community such an amazing place to b
2. Create your Feature Branch (`git checkout -b feature/TerraformFeature`)
3. Commit your Changes (`git commit -m 'Add some TerraformFeature'`)
4. Push to the Branch (`git push origin feature/TerraformFeature`)
-5. Open a [Pull Request](https://github.com/terraform-provider-minio/terraform-provider-minio/pulls)
+5. Open a [Pull Request](https://github.com/aminueza/terraform-provider-minio/pulls)
### Merging
diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml
index 95949bcb..27272554 100644
--- a/.github/workflows/docs.yml
+++ b/.github/workflows/docs.yml
@@ -2,13 +2,12 @@ name: Docs CI
on:
push:
branches:
- - master
- - feature/**
+ - main
tags:
- - 'v*'
+ - "v*"
pull_request:
branches:
- - master
+ - main
jobs:
mdvalidate:
name: Validate Markdown Files
@@ -20,7 +19,7 @@ jobs:
- name: Check out code
uses: actions/checkout@v4
with:
- fetch-depth: 1
+ fetch-depth: 1
- name: Markdown Link Validation
uses: gaurav-nelson/github-action-markdown-link-check@v1
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index a493411e..5df87321 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -1,9 +1,9 @@
name: Terraform Provider CI
on:
push:
- branches: [master]
+ branches: [main]
pull_request:
- branches: [master]
+ branches: [main]
jobs:
checkup:
name: Checkup
@@ -35,7 +35,7 @@ jobs:
version: "2023.1.6"
install-go: false
- name: Build the docker-compose stack
- run: docker-compose up -d minio
+ run: docker-compose up -d minio secondminio thirdminio fourthminio
- uses: hashicorp/setup-terraform@v2
with:
terraform_version: 1.4.7
@@ -48,4 +48,4 @@ jobs:
- name: Run install task
run: task install
- name: Run test task
- run: task test
+ run: sudo apt install jq -y && sed "s/172.17.0.1/`docker network inspect bridge | jq -r .[].IPAM.Config[].Gateway`/" Taskfile.yml && task test
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index d39270db..01d1f6f1 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -14,6 +14,8 @@ on:
push:
tags:
- "v*"
+permissions:
+ contents: write
jobs:
goreleaser:
runs-on: ubuntu-latest
@@ -33,7 +35,7 @@ jobs:
gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }}
passphrase: ${{ secrets.PASSPHRASE }}
- name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v4
+ uses: goreleaser/goreleaser-action@v5
with:
version: latest
args: release --rm-dist
diff --git a/.gitignore b/.gitignore
index 80a208ac..44459481 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,4 +19,5 @@ plan.bin
#go packages
vendor/*
.vscode
-.idea
\ No newline at end of file
+.idea
+dev.tfrc
diff --git a/README.md b/README.md
index f4959451..5a0c9d3f 100644
--- a/README.md
+++ b/README.md
@@ -1,24 +1,24 @@
-
+
Terraform Provider for MinIO
-
-
+
+
-
+
-
-
+
+
-
-
+
+
- Explore the docs »
+ Explore the docs »
@@ -51,7 +51,7 @@ It just means that we can't guarantee backward compatibility.
## Building and Installing
-Prebuilt versions of this provider are available on the [releases page](https://github.com/terraform-provider-minio/terraform-provider-minio/releases/latest).
+Prebuilt versions of this provider are available on the [releases page](https://github.com/aminueza/terraform-provider-minio/releases/latest).
But if you need to build it yourself, just download this repository, [install](https://taskfile.dev/#/installation) [Task](https://taskfile.dev/):
@@ -118,7 +118,7 @@ See our [examples](./examples/) folder.
## Roadmap
-See the [open issues](https://github.com/terraform-provider-minio/terraform-provider-minio/issues) for a list of proposed features (and known issues). See [CONTRIBUTING](./.github/CONTRIBUTING.md) for more information.
+See the [open issues](https://github.com/aminueza/terraform-provider-minio/issues) for a list of proposed features (and known issues). See [CONTRIBUTING](./.github/CONTRIBUTING.md) for more information.
## License
diff --git a/Taskfile.yml b/Taskfile.yml
index ad32ee97..8c66eaf2 100644
--- a/Taskfile.yml
+++ b/Taskfile.yml
@@ -52,10 +52,22 @@ tasks:
desc: Run the package tests.
env:
TF_ACC: 0
- MINIO_ENDPOINT: localhost:9000
+ MINIO_ENDPOINT: 172.17.0.1:9000
MINIO_USER: minio
MINIO_PASSWORD: minio123
MINIO_ENABLE_HTTPS: false
+ SECOND_MINIO_ENDPOINT: 172.17.0.1:9002
+ SECOND_MINIO_USER: minio
+ SECOND_MINIO_PASSWORD: minio321
+ SECOND_MINIO_ENABLE_HTTPS: false
+ THIRD_MINIO_ENDPOINT: 172.17.0.1:9004
+ THIRD_MINIO_USER: minio
+ THIRD_MINIO_PASSWORD: minio456
+ THIRD_MINIO_ENABLE_HTTPS: false
+ FOURTH_MINIO_ENDPOINT: 172.17.0.1:9006
+ FOURTH_MINIO_USER: minio
+ FOURTH_MINIO_PASSWORD: minio654
+ FOURTH_MINIO_ENABLE_HTTPS: false
cmds:
- go test -v -cover ./minio
silent: true
diff --git a/docker-compose.yml b/docker-compose.yml
index 0de55667..fd64154f 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,7 +1,7 @@
version: "3"
services:
minio:
- image: minio/minio:RELEASE.2023-03-13T19-46-17Z
+ image: minio/minio:RELEASE.2023-08-31T15-31-16Z
ports:
- "9000:9000"
- "9001:9001"
@@ -12,12 +12,66 @@ services:
MINIO_NOTIFY_WEBHOOK_ENABLE_primary: "on"
MINIO_NOTIFY_WEBHOOK_ENDPOINT_primary: https://webhook.example.com
command: server --console-address :9001 /data{0...3}
+ secondminio: # This is used to test bucket replication
+ image: minio/minio:RELEASE.2023-08-31T15-31-16Z
+ ports:
+ - "9002:9000"
+ - "9003:9001"
+ environment:
+ MINIO_ROOT_USER: minio
+ MINIO_ROOT_PASSWORD: minio321
+ MINIO_CI_CD: "1"
+ MINIO_NOTIFY_WEBHOOK_ENABLE_primary: "on"
+ MINIO_NOTIFY_WEBHOOK_ENDPOINT_primary: https://webhook.example.com
+ command: server --console-address :9001 /data{0...3}
+ thirdminio: # This is used to test bucket replication
+ image: minio/minio:RELEASE.2023-08-31T15-31-16Z
+ ports:
+ - "9004:9000"
+ - "9005:9001"
+ environment:
+ MINIO_ROOT_USER: minio
+ MINIO_ROOT_PASSWORD: minio456
+ MINIO_CI_CD: "1"
+ MINIO_NOTIFY_WEBHOOK_ENABLE_primary: "on"
+ MINIO_NOTIFY_WEBHOOK_ENDPOINT_primary: https://webhook.example.com
+ command: server --console-address :9001 /data{0...3}
+ fourthminio: # This is used to test bucket replication
+ image: minio/minio:RELEASE.2023-08-31T15-31-16Z
+ ports:
+ - "9006:9000"
+ - "9007:9001"
+ environment:
+ MINIO_ROOT_USER: minio
+ MINIO_ROOT_PASSWORD: minio654
+ MINIO_CI_CD: "1"
+ MINIO_NOTIFY_WEBHOOK_ENABLE_primary: "on"
+ MINIO_NOTIFY_WEBHOOK_ENDPOINT_primary: https://webhook.example.com
+ command: server --console-address :9001 /data{0...3}
adminio-ui:
image: rzrbld/adminio-ui:v1.93-210123
environment:
API_BASE_URL: "http://localhost:8080"
- ADMINIO_MULTI_BACKEND: "false"
- ADMINIO_BACKENDS: '[{"name":"myminio","url":"http://localhost:8080"},{"name":"localhost","url":"http://localhost:8081"},{"name":"error","url":"http://localhost:8082"}]'
+ ADMINIO_MULTI_BACKEND: "true"
+ ADMINIO_BACKENDS: |-
+ [
+ {
+ "name": "minio",
+ "url": "http://localhost:8080"
+ },
+ {
+ "name": "secondminio",
+ "url": "http://localhost:8081"
+ },
+ {
+ "name": "thirdminio",
+ "url": "http://localhost:8082"
+ },
+ {
+ "name": "fourthminio",
+ "url": "http://localhost:8083"
+ }
+ ]
NGX_ROOT_PATH: "/"
ports:
- "8000:80"
@@ -34,3 +88,42 @@ services:
- adminio-ui
ports:
- "8080:8080"
+ secondadminio-api:
+ image: rzrbld/adminio-api:v1.84-210123
+ environment:
+ MINIO_ACCESS: minio
+ MINIO_SECRET: minio321
+ MINIO_HOST_PORT: secondminio:9000
+ MINIO_KMS_MASTER_KEY: terraform-key:da2f4cfa32bed76507dcd44b42872328a8e14f25cd2a1ec0fb85d299a192a447
+ ADMINIO_HOST_PORT: :8080
+ depends_on:
+ - secondminio
+ - adminio-ui
+ ports:
+ - "8081:8080"
+ thirdadminio-api:
+ image: rzrbld/adminio-api:v1.84-210123
+ environment:
+ MINIO_ACCESS: minio
+ MINIO_SECRET: minio456
+ MINIO_HOST_PORT: thirdminio:9000
+ MINIO_KMS_MASTER_KEY: terraform-key:da2f4cfa32bed76507dcd44b42872328a8e14f25cd2a1ec0fb85d299a192a447
+ ADMINIO_HOST_PORT: :8080
+ depends_on:
+ - thirdminio
+ - adminio-ui
+ ports:
+ - "8082:8080"
+ fourthadminio-api:
+ image: rzrbld/adminio-api:v1.84-210123
+ environment:
+ MINIO_ACCESS: minio
+ MINIO_SECRET: minio654
+ MINIO_HOST_PORT: fourthminio:9000
+ MINIO_KMS_MASTER_KEY: terraform-key:da2f4cfa32bed76507dcd44b42872328a8e14f25cd2a1ec0fb85d299a192a447
+ ADMINIO_HOST_PORT: :8080
+ depends_on:
+ - fourthminio
+ - adminio-ui
+ ports:
+ - "8083:8080"
diff --git a/docs/resources/ilm_policy.md b/docs/resources/ilm_policy.md
index def443c8..492ac2ee 100644
--- a/docs/resources/ilm_policy.md
+++ b/docs/resources/ilm_policy.md
@@ -46,6 +46,7 @@ Optional:
- `expiration` (String)
- `filter` (String)
+- `noncurrent_version_expiration_days` (Int)
- `tags` (Map of String)
Read-Only:
diff --git a/docs/resources/s3_bucket_replication.md b/docs/resources/s3_bucket_replication.md
new file mode 100644
index 00000000..8c766277
--- /dev/null
+++ b/docs/resources/s3_bucket_replication.md
@@ -0,0 +1,254 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "minio_s3_bucket_replication Resource - terraform-provider-minio"
+subcategory: ""
+description: |-
+
+---
+
+# minio_s3_bucket_replication (Resource)
+
+
+
+## Example Usage
+
+```terraform
+resource "minio_s3_bucket" "my_bucket_in_a" {
+ bucket = "my-bucket"
+}
+
+resource "minio_s3_bucket" "my_bucket_in_b" {
+ provider = minio.deployment_b
+ bucket = "my-bucket"
+}
+
+resource "minio_s3_bucket_versioning" "my_bucket_in_a" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ versioning_configuration {
+ status = "Enabled"
+ }
+}
+
+resource "minio_s3_bucket_versioning" "my_bucket_in_b" {
+ provider = minio.deployment_b
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+
+ versioning_configuration {
+ status = "Enabled"
+ }
+}
+
+data "minio_iam_policy_document" "replication_policy" {
+ statement {
+ sid = "ReadBuckets"
+ effect = "Allow"
+ resources = ["arn:aws:s3:::*"]
+
+ actions = [
+ "s3:ListBucket",
+ ]
+ }
+
+ statement {
+ sid = "EnableReplicationOnBucket"
+ effect = "Allow"
+ resources = ["arn:aws:s3:::my-bucket"]
+
+ actions = [
+ "s3:GetReplicationConfiguration",
+ "s3:ListBucket",
+ "s3:ListBucketMultipartUploads",
+ "s3:GetBucketLocation",
+ "s3:GetBucketVersioning",
+ "s3:GetBucketObjectLockConfiguration",
+ "s3:GetEncryptionConfiguration",
+ ]
+ }
+
+ statement {
+ sid = "EnableReplicatingDataIntoBucket"
+ effect = "Allow"
+ resources = ["arn:aws:s3:::my-bucket/*"]
+
+ actions = [
+ "s3:GetReplicationConfiguration",
+ "s3:ReplicateTags",
+ "s3:AbortMultipartUpload",
+ "s3:GetObject",
+ "s3:GetObjectVersion",
+ "s3:GetObjectVersionTagging",
+ "s3:PutObject",
+ "s3:PutObjectRetention",
+ "s3:PutBucketObjectLockConfiguration",
+ "s3:PutObjectLegalHold",
+ "s3:DeleteObject",
+ "s3:ReplicateObject",
+ "s3:ReplicateDelete",
+ ]
+ }
+}
+
+# One-Way replication (A -> B)
+resource "minio_iam_policy" "replication_in_b" {
+ provider = minio.deployment_b
+ name = "ReplicationToMyBucketPolicy"
+ policy = data.minio_iam_policy_document.replication_policy.json
+}
+
+resource "minio_iam_user" "replication_in_b" {
+ provider = minio.deployment_b
+ name = "my-user"
+ force_destroy = true
+}
+
+resource "minio_iam_user_policy_attachment" "replication_in_b" {
+ provider = minio.deployment_b
+ user_name = minio_iam_user.replication_in_b.name
+ policy_name = minio_iam_policy.replication_in_b.id
+}
+
+resource "minio_iam_service_account" "replication_in_b" {
+ provider = minio.deployment_b
+ target_user = minio_iam_user.replication_in_b.name
+
+ depends_on = [
+ minio_iam_user_policy_attachment.replication_in_b
+ ]
+}
+
+resource "minio_s3_bucket_replication" "replication_in_b" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true # Must be true for two-way
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ secure = false
+ host = var.minio_server_b
+ bandwidth_limt = "100M"
+ access_key = minio_iam_service_account.replication_in_b.access_key
+ secret_key = minio_iam_service_account.replication_in_b.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b
+ ]
+}
+
+# Two-Way replication (A <-> B)
+resource "minio_iam_policy" "replication_in_a" {
+ name = "ReplicationToMyBucketPolicy"
+ policy = data.minio_iam_policy_document.replication_policy.json
+}
+
+resource "minio_iam_user" "replication_in_a" {
+ name = "my-user"
+ force_destroy = true
+}
+
+resource "minio_iam_user_policy_attachment" "replication_in_a" {
+ user_name = minio_iam_user.replication_in_a.name
+ policy_name = minio_iam_policy.replication_in_a.id
+}
+
+resource "minio_iam_service_account" "replication_in_a" {
+ target_user = minio_iam_user.replication_in_a.name
+
+ depends_on = [
+ minio_iam_user_policy_attachment.replication_in_b
+ ]
+}
+
+resource "minio_s3_bucket_replication" "replication_in_a" {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ provider = minio.deployment_b
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+ host = var.minio_server_a
+ secure = false
+ bandwidth_limt = "100M"
+ access_key = minio_iam_service_account.replication_in_a.access_key
+ secret_key = minio_iam_service_account.replication_in_a.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b,
+ ]
+}
+```
+
+
+## Schema
+
+### Required
+
+- `bucket` (String) Name of the bucket on which to setup replication rules
+
+### Optional
+
+- `rule` (Block List) Rule definitions (see [below for nested schema](#nestedblock--rule))
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+
+
+### Nested Schema for `rule`
+
+Required:
+
+- `target` (Block List, Min: 1, Max: 1) Bucket prefix (see [below for nested schema](#nestedblock--rule--target))
+
+Optional:
+
+- `delete_marker_replication` (Boolean) Whether or not to synchronise marker deletion
+- `delete_replication` (Boolean) Whether or not to propagate deletion
+- `enabled` (Boolean) Whether or not this rule is enabled
+- `existing_object_replication` (Boolean) Whether or not to synchronise object created prior the replication configuration
+- `metadata_sync` (Boolean) Whether or not to synchonise buckets and objects metadata (such as locks). This must be enabled to achieve a two-way replication
+- `prefix` (String) Bucket prefix object must be in to be syncronised
+- `priority` (Number) Rule priority. If omitted, the inverted index will be used as priority. This means that the first rule definition will have the higher priority
+- `tags` (Map of String) Tags which objects must have to be syncronised
+
+Read-Only:
+
+- `arn` (String) Rule ARN genrated by MinIO
+- `id` (String) Rule ID generated by MinIO
+
+
+### Nested Schema for `rule.target`
+
+Required:
+
+- `access_key` (String) Access key for the replication service account in the target MinIO
+- `bucket` (String) The name of the existing target bucket to replicate into
+- `host` (String) The target host (pair IP/port or domain port). If port is omitted, HTTPS port (or HTTP if unsecure) will be used. This host must be reachable by the MinIO instance itself
+
+Optional:
+
+- `bandwidth_limt` (String) Maximum bandwidth in byte per second that MinIO can used when syncronysing this target. Minimum is 100MB
+- `disable_proxy` (Boolean) Disable proxy for this target
+- `health_check_period` (String) Period where the health of this target will be checked. This must be a valid duration, such as `5s` or `2m`
+- `path` (String) Path of the Minio endpoint. This is usefull if MinIO API isn't served on at the root, e.g for `example.com/minio/`, the path would be `/minio/`
+- `path_style` (String) Whether to use path-style or virtual-hosted-syle request to this target (https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access). `auto` allows MinIO to chose automatically the appropriate option (Recommened)`
+- `region` (String) Region of the target MinIO. This will be used to generate the target ARN
+- `secret_key` (String, Sensitive) Secret key for the replication service account in the target MinIO. This is optional so it can be imported but prevent secret update
+- `secure` (Boolean) Whether to use HTTPS with this target (Recommended). Note that disabling HTTPS will yield Terraform warning for security reason`
+- `storage_class` (String) The storage class to use for the object on this target
+- `syncronous` (Boolean) Use synchronous replication.
diff --git a/examples/resources/minio_s3_bucket_replication/main.tf b/examples/resources/minio_s3_bucket_replication/main.tf
new file mode 100755
index 00000000..7bb1692c
--- /dev/null
+++ b/examples/resources/minio_s3_bucket_replication/main.tf
@@ -0,0 +1,24 @@
+terraform {
+ required_providers {
+ minio = {
+ source = "aminueza/minio"
+ version = ">= 1.19.0"
+ }
+ }
+}
+
+provider "minio" {
+ minio_server = var.minio_server_a
+ minio_region = var.minio_region_a
+ minio_user = var.minio_user_a
+ minio_password = var.minio_password_a
+}
+
+provider "minio" {
+ alias = "deployment_b"
+ minio_server = var.minio_server_b
+ minio_region = var.minio_region_b
+ minio_user = var.minio_user_b
+ minio_password = var.minio_password_b
+}
+
diff --git a/examples/resources/minio_s3_bucket_replication/resource.tf b/examples/resources/minio_s3_bucket_replication/resource.tf
new file mode 100644
index 00000000..966c4206
--- /dev/null
+++ b/examples/resources/minio_s3_bucket_replication/resource.tf
@@ -0,0 +1,178 @@
+resource "minio_s3_bucket" "my_bucket_in_a" {
+ bucket = "my-bucket"
+}
+
+resource "minio_s3_bucket" "my_bucket_in_b" {
+ provider = minio.deployment_b
+ bucket = "my-bucket"
+}
+
+resource "minio_s3_bucket_versioning" "my_bucket_in_a" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ versioning_configuration {
+ status = "Enabled"
+ }
+}
+
+resource "minio_s3_bucket_versioning" "my_bucket_in_b" {
+ provider = minio.deployment_b
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+
+ versioning_configuration {
+ status = "Enabled"
+ }
+}
+
+data "minio_iam_policy_document" "replication_policy" {
+ statement {
+ sid = "ReadBuckets"
+ effect = "Allow"
+ resources = ["arn:aws:s3:::*"]
+
+ actions = [
+ "s3:ListBucket",
+ ]
+ }
+
+ statement {
+ sid = "EnableReplicationOnBucket"
+ effect = "Allow"
+ resources = ["arn:aws:s3:::my-bucket"]
+
+ actions = [
+ "s3:GetReplicationConfiguration",
+ "s3:ListBucket",
+ "s3:ListBucketMultipartUploads",
+ "s3:GetBucketLocation",
+ "s3:GetBucketVersioning",
+ "s3:GetBucketObjectLockConfiguration",
+ "s3:GetEncryptionConfiguration",
+ ]
+ }
+
+ statement {
+ sid = "EnableReplicatingDataIntoBucket"
+ effect = "Allow"
+ resources = ["arn:aws:s3:::my-bucket/*"]
+
+ actions = [
+ "s3:GetReplicationConfiguration",
+ "s3:ReplicateTags",
+ "s3:AbortMultipartUpload",
+ "s3:GetObject",
+ "s3:GetObjectVersion",
+ "s3:GetObjectVersionTagging",
+ "s3:PutObject",
+ "s3:PutObjectRetention",
+ "s3:PutBucketObjectLockConfiguration",
+ "s3:PutObjectLegalHold",
+ "s3:DeleteObject",
+ "s3:ReplicateObject",
+ "s3:ReplicateDelete",
+ ]
+ }
+}
+
+# One-Way replication (A -> B)
+resource "minio_iam_policy" "replication_in_b" {
+ provider = minio.deployment_b
+ name = "ReplicationToMyBucketPolicy"
+ policy = data.minio_iam_policy_document.replication_policy.json
+}
+
+resource "minio_iam_user" "replication_in_b" {
+ provider = minio.deployment_b
+ name = "my-user"
+ force_destroy = true
+}
+
+resource "minio_iam_user_policy_attachment" "replication_in_b" {
+ provider = minio.deployment_b
+ user_name = minio_iam_user.replication_in_b.name
+ policy_name = minio_iam_policy.replication_in_b.id
+}
+
+resource "minio_iam_service_account" "replication_in_b" {
+ provider = minio.deployment_b
+ target_user = minio_iam_user.replication_in_b.name
+
+ depends_on = [
+ minio_iam_user_policy_attachment.replication_in_b
+ ]
+}
+
+resource "minio_s3_bucket_replication" "replication_in_b" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true # Must be true for two-way
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ secure = false
+ host = var.minio_server_b
+ bandwidth_limt = "100M"
+ access_key = minio_iam_service_account.replication_in_b.access_key
+ secret_key = minio_iam_service_account.replication_in_b.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b
+ ]
+}
+
+# Two-Way replication (A <-> B)
+resource "minio_iam_policy" "replication_in_a" {
+ name = "ReplicationToMyBucketPolicy"
+ policy = data.minio_iam_policy_document.replication_policy.json
+}
+
+resource "minio_iam_user" "replication_in_a" {
+ name = "my-user"
+ force_destroy = true
+}
+
+resource "minio_iam_user_policy_attachment" "replication_in_a" {
+ user_name = minio_iam_user.replication_in_a.name
+ policy_name = minio_iam_policy.replication_in_a.id
+}
+
+resource "minio_iam_service_account" "replication_in_a" {
+ target_user = minio_iam_user.replication_in_a.name
+
+ depends_on = [
+ minio_iam_user_policy_attachment.replication_in_b
+ ]
+}
+
+resource "minio_s3_bucket_replication" "replication_in_a" {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ provider = minio.deployment_b
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+ host = var.minio_server_a
+ secure = false
+ bandwidth_limt = "100M"
+ access_key = minio_iam_service_account.replication_in_a.access_key
+ secret_key = minio_iam_service_account.replication_in_a.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b,
+ ]
+}
\ No newline at end of file
diff --git a/examples/resources/minio_s3_bucket_replication/variables.tf b/examples/resources/minio_s3_bucket_replication/variables.tf
new file mode 100755
index 00000000..71619552
--- /dev/null
+++ b/examples/resources/minio_s3_bucket_replication/variables.tf
@@ -0,0 +1,39 @@
+variable "minio_region_a" {
+ description = "Default MINIO region"
+ default = "us-east-1"
+}
+
+variable "minio_server_a" {
+ description = "Default MINIO host and port"
+ default = "localhost:9000"
+}
+
+variable "minio_user_a" {
+ description = "MINIO user"
+ default = "minio"
+}
+
+variable "minio_password_a" {
+ description = "MINIO password"
+ default = "minio123"
+}
+
+variable "minio_region_b" {
+ description = "Default MINIO region"
+ default = "eu-west-1"
+}
+
+variable "minio_server_b" {
+ description = "Default MINIO host and port"
+ default = "localhost:9002"
+}
+
+variable "minio_user_b" {
+ description = "MINIO user"
+ default = "minio"
+}
+
+variable "minio_password_b" {
+ description = "MINIO password"
+ default = "minio321"
+}
\ No newline at end of file
diff --git a/go.mod b/go.mod
index c16a0034..fc1edce0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,15 +1,18 @@
-module github.com/terraform-provider-minio/terraform-provider-minio
+module github.com/aminueza/terraform-provider-minio
go 1.20
require (
github.com/aws/aws-sdk-go v1.45.9
+ github.com/dustin/go-humanize v1.0.1
github.com/google/go-cmp v0.5.9
github.com/hashicorp/awspolicyequivalence v1.6.0
github.com/hashicorp/go-cty v1.4.1-0.20200723130312-85980079f637
github.com/hashicorp/terraform-plugin-sdk/v2 v2.29.0
github.com/minio/madmin-go/v3 v3.0.18
github.com/minio/minio-go/v7 v7.0.63
+ github.com/rs/xid v1.5.0
+ golang.org/x/exp v0.0.0-20231006140011-7918f672742d
gotest.tools/v3 v3.5.0
)
@@ -18,7 +21,6 @@ require (
github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/cloudflare/circl v1.3.3 // indirect
- github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
@@ -66,7 +68,6 @@ require (
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/prometheus/prom2json v1.3.3 // indirect
- github.com/rs/xid v1.5.0 // indirect
github.com/secure-io/sio-go v0.3.1 // indirect
github.com/shirou/gopsutil/v3 v3.23.8 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
@@ -79,15 +80,15 @@ require (
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
github.com/zclconf/go-cty v1.14.0 // indirect
- golang.org/x/crypto v0.13.0 // indirect
- golang.org/x/mod v0.12.0 // indirect
- golang.org/x/net v0.15.0 // indirect
+ golang.org/x/crypto v0.14.0 // indirect
+ golang.org/x/mod v0.13.0 // indirect
+ golang.org/x/net v0.17.0 // indirect
golang.org/x/sync v0.3.0 // indirect
- golang.org/x/sys v0.12.0 // indirect
+ golang.org/x/sys v0.13.0 // indirect
golang.org/x/text v0.13.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb // indirect
- google.golang.org/grpc v1.58.0 // indirect
+ google.golang.org/grpc v1.58.3 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
)
diff --git a/go.sum b/go.sum
index 81c6648f..68c7d630 100644
--- a/go.sum
+++ b/go.sum
@@ -217,13 +217,15 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
-golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck=
-golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
+golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
+golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@@ -233,8 +235,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8=
-golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -265,8 +267,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -274,7 +276,7 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU=
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -290,8 +292,8 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
-golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
@@ -299,8 +301,8 @@ google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAs
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb h1:Isk1sSH7bovx8Rti2wZK0UZF6oraBDK74uoyLEEVFN0=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230913181813-007df8e322eb/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
-google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o=
-google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
+google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
+google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/main.go b/main.go
index b08cd92f..acfc7060 100644
--- a/main.go
+++ b/main.go
@@ -3,8 +3,8 @@ package main
import (
"flag"
+ "github.com/aminueza/terraform-provider-minio/minio"
"github.com/hashicorp/terraform-plugin-sdk/v2/plugin"
- "github.com/terraform-provider-minio/terraform-provider-minio/minio"
)
func main() {
diff --git a/minio/check_config.go b/minio/check_config.go
index 4f86cd01..9ee3bf53 100644
--- a/minio/check_config.go
+++ b/minio/check_config.go
@@ -1,6 +1,7 @@
package minio
import (
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
@@ -45,6 +46,20 @@ func BucketVersioningConfig(d *schema.ResourceData, meta interface{}) *S3MinioBu
}
}
+// BucketVersioningConfig creates config for managing minio bucket versioning
+func BucketReplicationConfig(d *schema.ResourceData, meta interface{}) (*S3MinioBucketReplication, diag.Diagnostics) {
+ m := meta.(*S3MinioClient)
+
+ replicationRules, diags := getBucketReplicationConfig(d.Get("rule").([]interface{}))
+
+ return &S3MinioBucketReplication{
+ MinioClient: m.S3Client,
+ MinioAdmin: m.S3Admin,
+ MinioBucket: d.Get("bucket").(string),
+ ReplicationRules: replicationRules,
+ }, diags
+}
+
// BucketNotificationConfig creates config for managing minio bucket notifications
func BucketNotificationConfig(d *schema.ResourceData, meta interface{}) *S3MinioBucketNotification {
m := meta.(*S3MinioClient)
diff --git a/minio/payload.go b/minio/payload.go
index 44f3af1f..5df016bb 100644
--- a/minio/payload.go
+++ b/minio/payload.go
@@ -1,6 +1,8 @@
package minio
import (
+ "time"
+
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/notification"
@@ -59,6 +61,61 @@ type S3MinioBucketVersioningConfiguration struct {
ExcludeFolders bool
}
+// S3PathSyle
+type S3PathSyle int8
+
+const (
+ S3PathSyleAuto S3PathSyle = iota
+ S3PathSyleOn
+ S3PathSyleOff
+)
+
+func (p S3PathSyle) String() string {
+ switch p {
+ case S3PathSyleOn:
+ return "on"
+ case S3PathSyleOff:
+ return "off"
+ default:
+ return "auto"
+ }
+}
+
+// S3MinioBucketReplicationConfiguration defines bucket replication rule
+type S3MinioBucketReplicationRule struct {
+ Id string
+ Arn string
+ Enabled bool
+ Priority int
+
+ Prefix string
+ Tags map[string]string
+
+ DeleteReplication bool
+ DeleteMarkerReplication bool
+ ExistingObjectReplication bool
+ MetadataSync bool
+
+ Target S3MinioBucketReplicationRuleTarget
+}
+
+// S3MinioBucketReplicationRuleTarget defines bucket replication rule target
+type S3MinioBucketReplicationRuleTarget struct {
+ Bucket string
+ StorageClass string
+ Host string
+ Secure bool
+ Path string
+ PathStyle S3PathSyle
+ Syncronous bool
+ DisableProxy bool
+ HealthCheckPeriod time.Duration
+ BandwidthLimit int64
+ Region string
+ AccessKey string
+ SecretKey string
+}
+
// S3MinioBucketVersioning defines bucket versioning
type S3MinioBucketVersioning struct {
MinioClient *minio.Client
@@ -66,6 +123,14 @@ type S3MinioBucketVersioning struct {
VersioningConfiguration *S3MinioBucketVersioningConfiguration
}
+// S3MinioBucketReplication defines bucket replication
+type S3MinioBucketReplication struct {
+ MinioAdmin *madmin.AdminClient
+ MinioClient *minio.Client
+ MinioBucket string
+ ReplicationRules []S3MinioBucketReplicationRule
+}
+
// S3MinioBucketNotification
type S3MinioBucketNotification struct {
MinioClient *minio.Client
diff --git a/minio/provider.go b/minio/provider.go
index 7b670fb4..360a54b1 100644
--- a/minio/provider.go
+++ b/minio/provider.go
@@ -9,6 +9,14 @@ import (
// Provider creates a new provider
func Provider() *schema.Provider {
+ return newProvider()
+}
+
+func newProvider(envvarPrefixed ...string) *schema.Provider {
+ envVarPrefix := ""
+ if len(envvarPrefixed) != 0 {
+ envVarPrefix = envvarPrefixed[0]
+ }
return &schema.Provider{
Schema: map[string]*schema.Schema{
"minio_server": {
@@ -16,7 +24,7 @@ func Provider() *schema.Provider {
Required: true,
Description: "Minio Host and Port",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_ENDPOINT",
+ envVarPrefix + "MINIO_ENDPOINT",
}, nil),
},
"minio_region": {
@@ -30,7 +38,7 @@ func Provider() *schema.Provider {
Optional: true,
Description: "Minio Access Key",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_ACCESS_KEY",
+ envVarPrefix + "MINIO_ACCESS_KEY",
}, nil),
Deprecated: "use minio_user instead",
},
@@ -39,7 +47,7 @@ func Provider() *schema.Provider {
Optional: true,
Description: "Minio Secret Key",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_SECRET_KEY",
+ envVarPrefix + "MINIO_SECRET_KEY",
}, nil),
Deprecated: "use minio_password instead",
},
@@ -48,7 +56,7 @@ func Provider() *schema.Provider {
Optional: true,
Description: "Minio User",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_USER",
+ envVarPrefix + "MINIO_USER",
}, nil),
ConflictsWith: []string{"minio_access_key"},
},
@@ -57,7 +65,7 @@ func Provider() *schema.Provider {
Optional: true,
Description: "Minio Password",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_PASSWORD",
+ envVarPrefix + "MINIO_PASSWORD",
}, nil),
ConflictsWith: []string{"minio_secret_key"},
},
@@ -66,7 +74,7 @@ func Provider() *schema.Provider {
Optional: true,
Description: "Minio Session Token",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_SESSION_TOKEN",
+ envVarPrefix + "MINIO_SESSION_TOKEN",
}, ""),
},
"minio_api_version": {
@@ -80,7 +88,7 @@ func Provider() *schema.Provider {
Optional: true,
Description: "Minio SSL enabled (default: false)",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_ENABLE_HTTPS",
+ envVarPrefix + "MINIO_ENABLE_HTTPS",
}, nil),
},
"minio_insecure": {
@@ -88,28 +96,28 @@ func Provider() *schema.Provider {
Optional: true,
Description: "Disable SSL certificate verification (default: false)",
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_INSECURE",
+ envVarPrefix + "MINIO_INSECURE",
}, nil),
},
"minio_cacert_file": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_CACERT_FILE",
+ envVarPrefix + "MINIO_CACERT_FILE",
}, nil),
},
"minio_cert_file": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_CERT_FILE",
+ envVarPrefix + "MINIO_CERT_FILE",
}, nil),
},
"minio_key_file": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
- "MINIO_KEY_FILE",
+ envVarPrefix + "MINIO_KEY_FILE",
}, nil),
},
},
@@ -122,6 +130,7 @@ func Provider() *schema.Provider {
"minio_s3_bucket": resourceMinioBucket(),
"minio_s3_bucket_policy": resourceMinioBucketPolicy(),
"minio_s3_bucket_versioning": resourceMinioBucketVersioning(),
+ "minio_s3_bucket_replication": resourceMinioBucketReplication(),
"minio_s3_bucket_notification": resourceMinioBucketNotification(),
"minio_s3_bucket_server_side_encryption": resourceMinioBucketServerSideEncryption(),
"minio_s3_object": resourceMinioObject(),
diff --git a/minio/provider_test.go b/minio/provider_test.go
index 2bcd72df..0e66d906 100644
--- a/minio/provider_test.go
+++ b/minio/provider_test.go
@@ -9,24 +9,58 @@ import (
var testAccProviders map[string]func() (*schema.Provider, error)
var testAccProvider *schema.Provider
+var testAccSecondProvider *schema.Provider
+var testAccThirdProvider *schema.Provider
+var testAccFourthProvider *schema.Provider
func init() {
- testAccProvider = Provider()
+ testAccProvider = newProvider()
+ testAccSecondProvider = newProvider("SECOND_")
+ testAccThirdProvider = newProvider("THIRD_")
+ testAccFourthProvider = newProvider("FOURTH_")
testAccProviders = map[string]func() (*schema.Provider, error){
"minio": func() (*schema.Provider, error) {
return testAccProvider, nil
},
+ "secondminio": func() (*schema.Provider, error) {
+ return testAccSecondProvider, nil
+ },
+ "thirdminio": func() (*schema.Provider, error) {
+ return testAccThirdProvider, nil
+ },
+ "fourthminio": func() (*schema.Provider, error) {
+ return testAccFourthProvider, nil
+ },
}
}
func TestProvider(t *testing.T) {
- if err := Provider().InternalValidate(); err != nil {
+ if err := newProvider().InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
- var _ *schema.Provider = Provider()
+ var _ *schema.Provider = newProvider()
+}
+
+var kEnvVarNeeded = []string{
+ "MINIO_ENDPOINT",
+ "MINIO_USER",
+ "MINIO_PASSWORD",
+ "MINIO_ENABLE_HTTPS",
+ "SECOND_MINIO_ENDPOINT",
+ "SECOND_MINIO_USER",
+ "SECOND_MINIO_PASSWORD",
+ "SECOND_MINIO_ENABLE_HTTPS",
+ "THIRD_MINIO_ENDPOINT",
+ "THIRD_MINIO_USER",
+ "THIRD_MINIO_PASSWORD",
+ "THIRD_MINIO_ENABLE_HTTPS",
+ "FOURTH_MINIO_ENDPOINT",
+ "FOURTH_MINIO_USER",
+ "FOURTH_MINIO_PASSWORD",
+ "FOURTH_MINIO_ENABLE_HTTPS",
}
func testAccPreCheck(t *testing.T) {
@@ -36,17 +70,11 @@ func testAccPreCheck(t *testing.T) {
valid = false
}
- if _, ok := os.LookupEnv("MINIO_ENDPOINT"); !ok {
- valid = false
- }
- if _, ok := os.LookupEnv("MINIO_USER"); !ok {
- valid = false
- }
- if _, ok := os.LookupEnv("MINIO_PASSWORD"); !ok {
- valid = false
- }
- if _, ok := os.LookupEnv("MINIO_ENABLE_HTTPS"); !ok {
- valid = false
+ for _, envvar := range kEnvVarNeeded {
+ if _, ok := os.LookupEnv(envvar); !ok {
+ valid = false
+ break
+ }
}
if !valid {
diff --git a/minio/resource_minio_iam_user.go b/minio/resource_minio_iam_user.go
index 7df04516..978ee97f 100644
--- a/minio/resource_minio_iam_user.go
+++ b/minio/resource_minio_iam_user.go
@@ -2,6 +2,7 @@ package minio
import (
"context"
+ "errors"
"fmt"
"log"
"regexp"
@@ -146,8 +147,19 @@ func minioReadUser(ctx context.Context, d *schema.ResourceData, meta interface{}
iamUserConfig := IAMUserConfig(d, meta)
output, err := iamUserConfig.MinioAdmin.GetUserInfo(ctx, d.Id())
+
+ errResp := madmin.ErrorResponse{}
+
+ if errors.As(err, &errResp) {
+ if errResp.Code == "XMinioAdminNoSuchUser" {
+ log.Printf("%s", NewResourceErrorStr("unable to find user", d.Id(), err))
+ d.SetId("")
+ return nil
+ }
+ }
+
if err != nil {
- return NewResourceError("error reading IAM User %s: %s", d.Id(), err)
+ return NewResourceError("error reading IAM User", d.Id(), err)
}
log.Printf("[WARN] (%v)", output)
@@ -178,7 +190,7 @@ func minioDeleteUser(ctx context.Context, d *schema.ResourceData, meta interface
err := deleteMinioIamUser(ctx, iamUserConfig)
if err != nil {
- return NewResourceError("error deleting IAM User %s: %s", d.Id(), err)
+ return NewResourceError("error deleting IAM User", d.Id(), err)
}
// Actively set resource as deleted as the update path might force a deletion via MinioForceDestroy
diff --git a/minio/resource_minio_iam_user_test.go b/minio/resource_minio_iam_user_test.go
index 842a8d14..172c162a 100644
--- a/minio/resource_minio_iam_user_test.go
+++ b/minio/resource_minio_iam_user_test.go
@@ -218,6 +218,43 @@ func TestAccAWSUser_UpdateAccessKey(t *testing.T) {
})
}
+func TestAccAWSUser_RecreateMissing(t *testing.T) {
+ var user madmin.UserInfo
+
+ name := fmt.Sprintf("test-user-%d", acctest.RandInt())
+ status := "enabled"
+ resourceName := "minio_iam_user.test"
+
+ resource.ParallelTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioUserDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccMinioUserConfig(name),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckMinioUserExists(resourceName, &user),
+ testAccCheckMinioUserAttributes(resourceName, name, status),
+ ),
+ },
+ {
+ PreConfig: func() {
+ _ = testAccCheckMinioUserDeleteExternally(name)
+ },
+ RefreshState: true,
+ ExpectNonEmptyPlan: true,
+ },
+ {
+ Config: testAccMinioUserConfig(name),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckMinioUserExists(resourceName, &user),
+ testAccCheckMinioUserAttributes(resourceName, name, status),
+ ),
+ },
+ },
+ })
+}
+
func testAccMinioUserConfigWithSecretOne(rName string) string {
return fmt.Sprintf(`
resource "minio_iam_user" "test5" {
@@ -356,6 +393,17 @@ func testAccCheckMinioUserDestroy(s *terraform.State) error {
return nil
}
+func testAccCheckMinioUserDeleteExternally(username string) error {
+ minioIam := testAccProvider.Meta().(*S3MinioClient).S3Admin
+
+ // Delete user
+ if err := minioIam.RemoveUser(context.Background(), username); err != nil {
+ return fmt.Errorf("user could not be deleted: %w", err)
+ }
+
+ return nil
+}
+
func testAccCheckMinioUserExfiltrateAccessKey(n string, accessKey *string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs := s.RootModule().Resources[n]
diff --git a/minio/resource_minio_ilm_policy.go b/minio/resource_minio_ilm_policy.go
index 5e7216f2..e2c6a039 100644
--- a/minio/resource_minio_ilm_policy.go
+++ b/minio/resource_minio_ilm_policy.go
@@ -44,6 +44,11 @@ func resourceMinioILMPolicy() *schema.Resource {
Optional: true,
ValidateDiagFunc: validateILMExpiration,
},
+ "noncurrent_version_expiration_days": {
+ Type: schema.TypeInt,
+ Optional: true,
+ ValidateDiagFunc: validateILMNoncurrentVersionExpiration,
+ },
"status": {
Type: schema.TypeString,
Computed: true,
@@ -74,6 +79,16 @@ func validateILMExpiration(v interface{}, p cty.Path) (errors diag.Diagnostics)
return
}
+func validateILMNoncurrentVersionExpiration(v interface{}, p cty.Path) (errors diag.Diagnostics) {
+ value := v.(int)
+
+ if value < 1 {
+ return diag.Errorf("noncurrent_version_expiration_days must be strictly positive")
+ }
+
+ return
+}
+
func minioCreateILMPolicy(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
c := meta.(*S3MinioClient).S3Client
@@ -86,6 +101,8 @@ func minioCreateILMPolicy(ctx context.Context, d *schema.ResourceData, meta inte
var filter lifecycle.Filter
+ noncurrentVersionExpirationDays := lifecycle.NoncurrentVersionExpiration{NoncurrentDays: lifecycle.ExpirationDays(rule["noncurrent_version_expiration_days"].(int))}
+
tags := map[string]string{}
for k, v := range rule["tags"].(map[string]interface{}) {
tags[k] = v.(string)
@@ -101,10 +118,11 @@ func minioCreateILMPolicy(ctx context.Context, d *schema.ResourceData, meta inte
}
r := lifecycle.Rule{
- ID: rule["id"].(string),
- Expiration: parseILMExpiration(rule["expiration"].(string)),
- Status: "Enabled",
- RuleFilter: filter,
+ ID: rule["id"].(string),
+ Expiration: parseILMExpiration(rule["expiration"].(string)),
+ NoncurrentVersionExpiration: noncurrentVersionExpirationDays,
+ Status: "Enabled",
+ RuleFilter: filter,
}
config.Rules = append(config.Rules, r)
}
@@ -144,6 +162,11 @@ func minioReadILMPolicy(ctx context.Context, d *schema.ResourceData, meta interf
expiration = r.Expiration.Date.Format("2006-01-02")
}
+ var noncurrentVersionExpirationDays int
+ if r.NoncurrentVersionExpiration.NoncurrentDays != 0 {
+ noncurrentVersionExpirationDays = int(r.NoncurrentVersionExpiration.NoncurrentDays)
+ }
+
var prefix string
tags := map[string]string{}
if len(r.RuleFilter.And.Tags) > 0 {
@@ -156,11 +179,12 @@ func minioReadILMPolicy(ctx context.Context, d *schema.ResourceData, meta interf
}
rule := map[string]interface{}{
- "id": r.ID,
- "expiration": expiration,
- "status": r.Status,
- "filter": prefix,
- "tags": tags,
+ "id": r.ID,
+ "expiration": expiration,
+ "noncurrent_version_expiration_days": noncurrentVersionExpirationDays,
+ "status": r.Status,
+ "filter": prefix,
+ "tags": tags,
}
rules = append(rules, rule)
}
diff --git a/minio/resource_minio_ilm_policy_test.go b/minio/resource_minio_ilm_policy_test.go
index 8af99e19..8e46f296 100644
--- a/minio/resource_minio_ilm_policy_test.go
+++ b/minio/resource_minio_ilm_policy_test.go
@@ -90,6 +90,29 @@ func TestAccILMPolicy_filterTags(t *testing.T) {
})
}
+func TestAccILMPolicy_expireNoncurrentVersion(t *testing.T) {
+ var lifecycleConfig lifecycle.Configuration
+ name := fmt.Sprintf("test-ilm-rule4-%d", acctest.RandInt())
+ resourceName := "minio_ilm_policy.rule4"
+
+ resource.ParallelTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioS3BucketDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccMinioILMPolicyExpireNoncurrentVersion(name),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckMinioILMPolicyExists(resourceName, &lifecycleConfig),
+ testAccCheckMinioLifecycleConfigurationValid(&lifecycleConfig),
+ resource.TestCheckResourceAttr(
+ resourceName, "rule.0.noncurrent_version_expiration_days", "5"),
+ ),
+ },
+ },
+ })
+}
+
func testAccCheckMinioLifecycleConfigurationValid(config *lifecycle.Configuration) resource.TestCheckFunc {
return func(s *terraform.State) error {
if config.Empty() || len(config.Rules) == 0 {
@@ -208,3 +231,20 @@ resource "minio_ilm_policy" "rule3" {
}
`, randInt)
}
+
+func testAccMinioILMPolicyExpireNoncurrentVersion(randInt string) string {
+ return fmt.Sprintf(`
+resource "minio_s3_bucket" "bucket4" {
+ bucket = "%s"
+ acl = "public-read"
+}
+resource "minio_ilm_policy" "rule4" {
+ bucket = "${minio_s3_bucket.bucket4.id}"
+ rule {
+ id = "expireNoncurrentVersion"
+ expiration = "5d"
+ noncurrent_version_expiration_days = 5
+ }
+}
+`, randInt)
+}
diff --git a/minio/resource_minio_s3_bucket.go b/minio/resource_minio_s3_bucket.go
index 2b1ea7b8..a754a0b6 100644
--- a/minio/resource_minio_s3_bucket.go
+++ b/minio/resource_minio_s3_bucket.go
@@ -220,7 +220,8 @@ func minioDeleteBucket(ctx context.Context, d *schema.ResourceData, meta interfa
// List all objects from a bucket-name with a matching prefix.
for object := range bucketConfig.MinioClient.ListObjects(ctx, d.Id(), minio.ListObjectsOptions{
- Recursive: true,
+ Recursive: true,
+ WithVersions: true,
}) {
if object.Err != nil {
log.Fatalln(object.Err)
diff --git a/minio/resource_minio_s3_bucket_replication.go b/minio/resource_minio_s3_bucket_replication.go
new file mode 100644
index 00000000..3eb09e73
--- /dev/null
+++ b/minio/resource_minio_s3_bucket_replication.go
@@ -0,0 +1,779 @@
+package minio
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "math"
+ "net/url"
+ "path"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "github.com/hashicorp/go-cty/cty"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+ "github.com/minio/madmin-go/v3"
+ "github.com/minio/minio-go/v7/pkg/replication"
+ "github.com/minio/minio-go/v7/pkg/s3utils"
+ "github.com/rs/xid"
+ "golang.org/x/exp/slices"
+)
+
+func resourceMinioBucketReplication() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: minioPutBucketReplication,
+ ReadContext: minioReadBucketReplication,
+ UpdateContext: minioPutBucketReplication,
+ DeleteContext: minioDeleteBucketReplication,
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ Schema: map[string]*schema.Schema{
+ "bucket": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ Description: "Name of the bucket on which to setup replication rules",
+ },
+ "rule": {
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Rule definitions",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Rule ID generated by MinIO",
+ },
+ "arn": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Rule ARN genrated by MinIO",
+ },
+ "enabled": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ Description: "Whether or not this rule is enabled",
+ },
+ "priority": {
+ Type: schema.TypeInt,
+ Optional: true,
+ ValidateFunc: validation.IntAtLeast(1),
+ DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
+ oldVal, _ := strconv.Atoi(oldValue)
+ newVal, _ := strconv.Atoi(newValue)
+ return oldVal == 0 && newVal == 0 || oldVal == newVal
+ },
+ Description: "Rule priority. If omitted, the inverted index will be used as priority. This means that the first rule definition will have the higher priority",
+ },
+ "prefix": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "",
+ Description: "Bucket prefix object must be in to be syncronised",
+ },
+ "tags": {
+ Type: schema.TypeMap,
+ Optional: true,
+ ValidateDiagFunc: validation.AllDiag(
+ validation.MapValueMatch(regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`), ""),
+ validation.MapKeyMatch(regexp.MustCompile(`^[a-zA-Z0-9-+\-._:/@ ]+$`), ""),
+ validation.MapValueLenBetween(1, 256),
+ validation.MapKeyLenBetween(1, 128),
+ ),
+ Description: "Tags which objects must have to be syncronised",
+ },
+ "delete_replication": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "Whether or not to propagate deletion",
+ },
+ "delete_marker_replication": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "Whether or not to synchronise marker deletion",
+ },
+ "existing_object_replication": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "Whether or not to synchronise object created prior the replication configuration",
+ },
+ "metadata_sync": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Description: "Whether or not to synchonise buckets and objects metadata (such as locks). This must be enabled to achieve a two-way replication",
+ },
+ "target": {
+ Type: schema.TypeList,
+ MinItems: 1,
+ MaxItems: 1,
+ Required: true,
+ Description: "Bucket prefix",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "bucket": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The name of the existing target bucket to replicate into",
+ },
+ "storage_class": {
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "The storage class to use for the object on this target",
+ },
+ "host": {
+ Type: schema.TypeString,
+ Required: true,
+ Description: "The target host (pair IP/port or domain port). If port is omitted, HTTPS port (or HTTP if unsecure) will be used. This host must be reachable by the MinIO instance itself",
+ },
+ "secure": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: true,
+ Description: "Whether to use HTTPS with this target (Recommended). Note that disabling HTTPS will yield Terraform warning for security reason`",
+ },
+ "path_style": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "auto",
+ ValidateFunc: validation.StringInSlice([]string{"on", "off", "auto"}, true),
+ Description: "Whether to use path-style or virtual-hosted-syle request to this target (https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access). `auto` allows MinIO to chose automatically the appropriate option (Recommened)`",
+ },
+ "path": {
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "Path of the Minio endpoint. This is usefull if MinIO API isn't served on at the root, e.g for `example.com/minio/`, the path would be `/minio/`",
+ },
+ "syncronous": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: false,
+ Description: "Use synchronous replication.",
+ },
+ "disable_proxy": {
+ Type: schema.TypeBool,
+ Optional: true,
+ Default: false,
+ Description: "Disable proxy for this target",
+ },
+ "health_check_period": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "30s",
+ DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
+ newVal, err := time.ParseDuration(newValue)
+ return err == nil && shortDur(newVal) == oldValue
+ },
+ ValidateFunc: validation.StringMatch(regexp.MustCompile(`^[0-9]+\s?[s|m|h]$`), "must be a valid golang duration"),
+ Description: "Period where the health of this target will be checked. This must be a valid duration, such as `5s` or `2m`",
+ },
+ "bandwidth_limt": {
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "0",
+ DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool {
+ newVal, err := humanize.ParseBytes(newValue)
+ return err == nil && humanize.Bytes(newVal) == oldValue
+ },
+ ValidateDiagFunc: func(i interface{}, _ cty.Path) (diags diag.Diagnostics) {
+ v, ok := i.(string)
+ if !ok {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "expected type of bandwidth_limt to be string",
+ })
+ return
+ }
+
+ if v == "" {
+ return
+ }
+
+ val, err := humanize.ParseBytes(v)
+ if err != nil {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "bandwidth_limt must be a positive value. It may use suffixes (k, m, g, ..) ",
+ })
+ return
+ }
+ if val < uint64(100*humanize.BigMByte.Int64()) {
+ diags = append(diags, diag.Diagnostic{
+ Severity: diag.Error,
+ Summary: "When set, bandwidth_limt must be at least 100MBps",
+ })
+
+ }
+ return
+ },
+ Description: "Maximum bandwidth in byte per second that MinIO can used when syncronysing this target. Minimum is 100MB",
+ },
+ "region": {
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "Region of the target MinIO. This will be used to generate the target ARN",
+ },
+ "access_key": {
+ Type: schema.TypeString,
+ Required: true,
+ ValidateFunc: validation.StringIsNotEmpty,
+ Description: "Access key for the replication service account in the target MinIO",
+ },
+ "secret_key": {
+ Type: schema.TypeString,
+ Optional: true,
+ Sensitive: true,
+ ValidateFunc: validation.StringIsNotEmpty,
+ Description: "Secret key for the replication service account in the target MinIO. This is optional so it can be imported but prevent secret update",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+func minioPutBucketReplication(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ bucketReplicationConfig, diags := BucketReplicationConfig(d, meta)
+ replicationConfig := bucketReplicationConfig.ReplicationRules
+
+ if replicationConfig == nil || diags.HasError() {
+ return diags
+ }
+
+ log.Printf("[DEBUG] S3 bucket: %s, put replication configuration: %v", bucketReplicationConfig.MinioBucket, replicationConfig)
+
+ cfg, err := convertBucketReplicationConfig(bucketReplicationConfig, replicationConfig)
+
+ if err != nil {
+ return NewResourceError(fmt.Sprintf("error generating bucket replication configuration for %q", bucketReplicationConfig.MinioBucket), d.Id(), err)
+ }
+
+ err = bucketReplicationConfig.MinioClient.SetBucketReplication(
+ ctx,
+ bucketReplicationConfig.MinioBucket,
+ cfg,
+ )
+
+ if err != nil {
+ return NewResourceError(fmt.Sprintf("error putting bucket replication configuration for %q", bucketReplicationConfig.MinioBucket), d.Id(), err)
+ }
+
+ d.SetId(bucketReplicationConfig.MinioBucket)
+
+ return nil
+}
+
+func minioReadBucketReplication(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ bucketReplicationConfig, diags := BucketReplicationConfig(d, meta)
+
+ if diags.HasError() {
+ return diags
+ }
+
+ client := bucketReplicationConfig.MinioClient
+ admclient := bucketReplicationConfig.MinioAdmin
+ bucketName := d.Id()
+
+ // Reverse index to store rule definition read from Minio to macth the order they have in the IaC. This prevent Terrfaform from try to re-order rule each time
+ rulePriorityMap := map[int]int{}
+ // Reverse index to store arn and index in the rule set. This is used to match bucket config and remote target order
+ ruleArnMap := map[string]int{}
+
+ if bucketReplicationConfig.ReplicationRules != nil {
+ for idx, rule := range bucketReplicationConfig.ReplicationRules {
+ priority := rule.Priority
+ if priority == 0 {
+ priority = -len(bucketReplicationConfig.ReplicationRules) + idx
+ }
+ rulePriorityMap[priority] = idx
+ }
+ }
+
+ log.Printf("[DEBUG] S3 bucket replication, read for bucket: %s", bucketName)
+
+ // First, gather the bucket replication config
+ rcfg, err := client.GetBucketReplication(ctx, bucketName)
+ if err != nil {
+ log.Printf("[WARN] Unable to fetch bucket replication config for %q: %v", bucketName, err)
+ return diag.FromErr(fmt.Errorf("error reading bucket replication configuration: %s", err))
+ }
+
+ rules := make([]map[string]interface{}, len(rcfg.Rules))
+
+ for idx, rule := range rcfg.Rules {
+ var ruleIdx int
+ var ok bool
+ if ruleIdx, ok = rulePriorityMap[rule.Priority]; !ok {
+ ruleIdx = idx
+ }
+ if _, ok = ruleArnMap[rule.Destination.Bucket]; ok {
+ log.Printf("[WARN] Conflict detetcted between two rules containing the same ARN for %q: %q", bucketName, rule.Destination.Bucket)
+ return diag.FromErr(fmt.Errorf("conflict detetcted between two rules containing the same ARN for %q: %q", bucketName, rule.Destination.Bucket))
+ }
+ ruleArnMap[rule.Destination.Bucket] = ruleIdx
+ target := map[string]interface{}{
+ "storage_class": rule.Destination.StorageClass,
+ }
+ var priority interface{} = rule.Priority
+ if len(bucketReplicationConfig.ReplicationRules) > ruleIdx && rule.Priority == -bucketReplicationConfig.ReplicationRules[ruleIdx].Priority {
+ priority = nil
+ }
+ rules[ruleIdx] = map[string]interface{}{
+ "id": rule.ID,
+ "arn": rule.Destination.Bucket,
+ "enabled": rule.Status == replication.Enabled,
+ "priority": priority,
+ "prefix": rule.Prefix(),
+ "delete_replication": rule.DeleteReplication.Status == replication.Enabled,
+ "delete_marker_replication": rule.DeleteMarkerReplication.Status == replication.Enabled,
+ "existing_object_replication": rule.ExistingObjectReplication.Status == replication.Enabled,
+ "metadata_sync": rule.SourceSelectionCriteria.ReplicaModifications.Status == replication.Enabled,
+ }
+
+ log.Printf("[DEBUG] Rule data for rule#%d is: %q", ruleIdx, rule)
+
+ if len(rule.Filter.And.Tags) != 0 || rule.Filter.And.Prefix != "" {
+ tags := map[string]string{}
+ for _, tag := range rule.Filter.And.Tags {
+ if tag.IsEmpty() {
+ continue
+ }
+ tags[tag.Key] = tag.Value
+ }
+ rules[ruleIdx]["tags"] = tags
+ } else if rule.Filter.Tag.Key != "" {
+ rules[ruleIdx]["tags"] = map[string]string{
+ rule.Filter.Tag.Key: rule.Filter.Tag.Value,
+ }
+ } else {
+ rules[ruleIdx]["tags"] = nil
+ }
+
+ // During import, there is no rules defined. Furthermore, since it is impossible to read the secret from the API, we
+ // default it to an empty string, allowing user to prevent remote changes by also using an empty string or omiting the secret_key
+ if len(bucketReplicationConfig.ReplicationRules) > ruleIdx {
+ target["secret_key"] = bucketReplicationConfig.ReplicationRules[ruleIdx].Target.SecretKey
+ }
+
+ rules[ruleIdx]["target"] = []interface{}{target}
+ }
+
+ // Second, we read the remote bucket config
+ existingRemoteTargets, err := admclient.ListRemoteTargets(ctx, bucketName, "")
+ if err != nil {
+ log.Printf("[WARN] Unable to fetch existing remote target config for %q: %v", bucketName, err)
+ return diag.FromErr(fmt.Errorf("error reading replication remote target configuration: %s", err))
+ }
+
+ if len(existingRemoteTargets) != len(rules) {
+ return diag.FromErr(fmt.Errorf("inconsistent number of remote target and bucket replication rules (%d != %d)", len(existingRemoteTargets), len(rules)))
+ }
+
+ for _, remoteTarget := range existingRemoteTargets {
+ var ruleIdx int
+ var ok bool
+ var target map[string]interface{}
+ if ruleIdx, ok = ruleArnMap[remoteTarget.Arn]; !ok {
+ return diag.FromErr(fmt.Errorf("unable to find the remote target configuration for ARN %q on %s", remoteTarget.Arn, bucketName))
+ }
+ var targets []interface{}
+ if targets, ok = rules[ruleIdx]["target"].([]interface{}); !ok || len(targets) != 1 {
+ return diag.FromErr(fmt.Errorf("unable to find the bucket replication configuration associated to ARN %q (rule#%d) on %s", remoteTarget.Arn, ruleIdx, bucketName))
+ }
+ if target, ok = targets[0].(map[string]interface{}); !ok || len(target) == 0 {
+ return diag.FromErr(fmt.Errorf("unable to extract the target information for the this remote target configuration associated on %s", bucketName))
+ }
+
+ pathComponent := strings.Split(remoteTarget.TargetBucket, "/")
+
+ log.Printf("[DEBUG] absolute remote target path is %s", remoteTarget.TargetBucket)
+
+ target["bucket"] = pathComponent[len(pathComponent)-1]
+ target["host"] = remoteTarget.Endpoint
+ target["secure"] = remoteTarget.Secure
+ target["path_style"] = remoteTarget.Path
+ target["path"] = strings.Join(pathComponent[:len(pathComponent)-1], "/")
+ target["syncronous"] = remoteTarget.ReplicationSync
+ target["disable_proxy"] = remoteTarget.DisableProxy
+ target["health_check_period"] = shortDur(remoteTarget.HealthCheckDuration)
+ target["bandwidth_limt"] = humanize.Bytes(uint64(remoteTarget.BandwidthLimit))
+ target["region"] = remoteTarget.Region
+ target["access_key"] = remoteTarget.Credentials.AccessKey
+
+ log.Printf("[DEBUG] serialise remote target data is %v", target)
+
+ rules[ruleIdx]["target"] = []interface{}{target}
+ }
+
+ if err := d.Set("bucket", d.Id()); err != nil {
+ return diag.FromErr(fmt.Errorf("error setting replication configuration: %w", err))
+ }
+
+ if err := d.Set("rule", rules); err != nil {
+ return diag.FromErr(fmt.Errorf("error setting replication configuration: %w", err))
+ }
+
+ return diags
+}
+
+func minioDeleteBucketReplication(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ bucketReplicationConfig, diags := BucketReplicationConfig(d, meta)
+
+ if len(bucketReplicationConfig.ReplicationRules) == 0 && !diags.HasError() {
+ log.Printf("[DEBUG] Removing bucket replication for unversioned bucket (%s) from state", d.Id())
+ } else if diags.HasError() {
+ return diags
+ }
+
+ client := bucketReplicationConfig.MinioClient
+ admclient := bucketReplicationConfig.MinioAdmin
+
+ rcfg, err := client.GetBucketReplication(ctx, bucketReplicationConfig.MinioBucket)
+ if err != nil {
+ log.Printf("[WARN] Unable to fetch bucket replication config for %q: %v", bucketReplicationConfig.MinioBucket, err)
+ return diag.FromErr(fmt.Errorf("error reading bucket replication configuration: %s", err))
+ }
+
+ log.Printf("[DEBUG] S3 bucket: %s, disabling replication", bucketReplicationConfig.MinioBucket)
+
+ rcfg.Rules = []replication.Rule{}
+ err = client.SetBucketReplication(ctx, bucketReplicationConfig.MinioBucket, rcfg)
+ if err != nil {
+ log.Printf("[WARN] Unable to set an empty replication config for %q: %v", bucketReplicationConfig.MinioBucket, err)
+ return diag.FromErr(fmt.Errorf("error writing bucket replication configuration: %s", err))
+ }
+
+ existingRemoteTargets, err := admclient.ListRemoteTargets(ctx, bucketReplicationConfig.MinioBucket, "")
+ if err != nil {
+ log.Printf("[WARN] Unable to fetch existing remote target config for %q: %v", bucketReplicationConfig.MinioBucket, err)
+ return diag.FromErr(fmt.Errorf("error reading replication remote target configuration: %s", err))
+ }
+ if len(existingRemoteTargets) != 0 {
+ return diag.FromErr(fmt.Errorf("%d remote targets are still present on the bukcet while none are expected", len(existingRemoteTargets)))
+ }
+
+ return diags
+}
+func toEnableFlag(b bool) string {
+ if b {
+ return "enable"
+ }
+ return "disable"
+}
+
+func convertBucketReplicationConfig(bucketReplicationConfig *S3MinioBucketReplication, c []S3MinioBucketReplicationRule) (rcfg replication.Config, err error) {
+ client := bucketReplicationConfig.MinioClient
+ admclient := bucketReplicationConfig.MinioAdmin
+
+ ctx := context.Background()
+
+ rcfg, err = client.GetBucketReplication(ctx, bucketReplicationConfig.MinioBucket)
+ if err != nil {
+ log.Printf("[WARN] Unable to fetch bucket replication config for %q: %v", bucketReplicationConfig.MinioBucket, err)
+ return
+ }
+
+ usedARNs := make([]string, len(c))
+ existingRemoteTargets, err := admclient.ListRemoteTargets(ctx, bucketReplicationConfig.MinioBucket, "")
+ if err != nil {
+ log.Printf("[WARN] Unable to fetch existing remote target config for %q: %v", bucketReplicationConfig.MinioBucket, err)
+ return
+ }
+
+ for i, rule := range c {
+ err = s3utils.CheckValidBucketName(rule.Target.Bucket)
+ if err != nil {
+ log.Printf("[WARN] Invalid bucket name for %q: %v", rule.Target.Bucket, err)
+ return
+ }
+
+ tgtBucket := rule.Target.Bucket
+ if rule.Target.Path != "" {
+ tgtBucket = path.Clean("./" + rule.Target.Path + "/" + tgtBucket)
+ }
+ log.Printf("[DEBUG] Full path to target bucket is %s", tgtBucket)
+
+ creds := &madmin.Credentials{AccessKey: rule.Target.AccessKey, SecretKey: rule.Target.SecretKey}
+ bktTarget := &madmin.BucketTarget{
+ TargetBucket: tgtBucket,
+ Secure: rule.Target.Secure,
+ Credentials: creds,
+ Endpoint: rule.Target.Host,
+ Path: rule.Target.PathStyle.String(),
+ API: "s3v4",
+ Type: madmin.ReplicationService,
+ Region: rule.Target.Region,
+ BandwidthLimit: rule.Target.BandwidthLimit,
+ ReplicationSync: rule.Target.Syncronous,
+ DisableProxy: rule.Target.DisableProxy,
+ HealthCheckDuration: rule.Target.HealthCheckPeriod,
+ }
+ targets, _ := admclient.ListRemoteTargets(ctx, bucketReplicationConfig.MinioBucket, string(madmin.ReplicationService))
+ log.Printf("[DEBUG] Existing remote targets %q: %v", bucketReplicationConfig.MinioBucket, targets)
+ var arn string
+
+ var existingRemoteTarget *madmin.BucketTarget
+ if rule.Arn != "" {
+ for i, target := range targets {
+ if target.Arn == rule.Arn {
+ existingRemoteTarget = &targets[i]
+ break
+ }
+ // At this stage, we could also anticipate already existing remote target failure since endpoint is unique
+ // per bucket (https://github.com/minio/minio/blob/master/cmd/bucket-targets.go#L356) but this behavior could change in the future
+ }
+ }
+
+ if existingRemoteTarget == nil {
+ log.Printf("[DEBUG] Adding new remote target %v for %q", *bktTarget, bucketReplicationConfig.MinioBucket)
+ arn, err = admclient.SetRemoteTarget(ctx, bucketReplicationConfig.MinioBucket, bktTarget)
+ if err != nil {
+ log.Printf("[WARN] Unable to configure remote target %v for %q: %v", *bktTarget, bucketReplicationConfig.MinioBucket, err)
+ return
+ }
+ } else {
+ var remoteTargetUpdate []madmin.TargetUpdateType
+
+ if *existingRemoteTarget.Credentials != *bktTarget.Credentials {
+ existingRemoteTarget.Credentials = bktTarget.Credentials
+ remoteTargetUpdate = append(remoteTargetUpdate, madmin.CredentialsUpdateType)
+ }
+ if existingRemoteTarget.ReplicationSync != bktTarget.ReplicationSync {
+ existingRemoteTarget.ReplicationSync = bktTarget.ReplicationSync
+ remoteTargetUpdate = append(remoteTargetUpdate, madmin.SyncUpdateType)
+ }
+ if existingRemoteTarget.DisableProxy != bktTarget.DisableProxy {
+ existingRemoteTarget.DisableProxy = bktTarget.DisableProxy
+ remoteTargetUpdate = append(remoteTargetUpdate, madmin.ProxyUpdateType)
+ }
+ if existingRemoteTarget.BandwidthLimit != bktTarget.BandwidthLimit {
+ existingRemoteTarget.BandwidthLimit = bktTarget.BandwidthLimit
+ remoteTargetUpdate = append(remoteTargetUpdate, madmin.BandwidthLimitUpdateType)
+ }
+ if existingRemoteTarget.HealthCheckDuration != bktTarget.HealthCheckDuration {
+ existingRemoteTarget.HealthCheckDuration = bktTarget.HealthCheckDuration
+ remoteTargetUpdate = append(remoteTargetUpdate, madmin.HealthCheckDurationUpdateType)
+ }
+ if existingRemoteTarget.Path != bktTarget.Path {
+ existingRemoteTarget.Path = bktTarget.Path
+ remoteTargetUpdate = append(remoteTargetUpdate, madmin.PathUpdateType)
+ }
+ log.Printf("[DEBUG] Editing remote target %v for %q", *bktTarget, bucketReplicationConfig.MinioBucket)
+ arn, err = admclient.UpdateRemoteTarget(ctx, existingRemoteTarget, remoteTargetUpdate...)
+ if err != nil {
+ log.Printf("[WARN] Unable to update the remote target %v for %q: %v", *bktTarget, bucketReplicationConfig.MinioBucket, err)
+ return
+ }
+ }
+
+ tagList := []string{}
+ for k, v := range rule.Tags {
+ var escapedValue *url.URL
+ escapedValue, err = url.Parse(v)
+ if err != nil {
+ return
+ }
+
+ tagList = append(tagList, fmt.Sprintf("%s=%s", k, escapedValue.String()))
+ }
+
+ opts := replication.Options{
+ TagString: strings.Join(tagList, "&"),
+ IsTagSet: len(tagList) != 0,
+ StorageClass: rule.Target.StorageClass,
+ Priority: strconv.Itoa(int(math.Abs(float64(rule.Priority)))),
+ Prefix: rule.Prefix,
+ RuleStatus: toEnableFlag(rule.Enabled),
+ ID: rule.Id,
+ DestBucket: arn,
+ ReplicateDeleteMarkers: toEnableFlag(rule.DeleteMarkerReplication),
+ ReplicateDeletes: toEnableFlag(rule.DeleteReplication),
+ ReplicaSync: toEnableFlag(rule.MetadataSync),
+ ExistingObjectReplicate: toEnableFlag(rule.ExistingObjectReplication),
+ }
+ if strings.TrimSpace(opts.ID) == "" {
+ rule.Id = xid.New().String()
+ opts.ID = rule.Id
+ opts.Op = replication.AddOption
+ log.Printf("[DEBUG] Adding replication option for rule#%d: %v", i, opts)
+ err = rcfg.AddRule(opts)
+ } else {
+ opts.Op = replication.SetOption
+ log.Printf("[DEBUG] Editing replication option for rule#%d: %v", i, opts)
+ err = rcfg.EditRule(opts)
+ }
+
+ if err != nil {
+ return
+ }
+ usedARNs[i] = arn
+ }
+
+ for _, existingRemoteTarget := range existingRemoteTargets {
+ if !slices.Contains(usedARNs, existingRemoteTarget.Arn) {
+ err = admclient.RemoveRemoteTarget(ctx, bucketReplicationConfig.MinioBucket, existingRemoteTarget.Arn)
+ }
+
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func getBucketReplicationConfig(v []interface{}) (result []S3MinioBucketReplicationRule, errs diag.Diagnostics) {
+ if len(v) == 0 || v[0] == nil {
+ return
+ }
+
+ result = make([]S3MinioBucketReplicationRule, len(v))
+ for i, rule := range v {
+ var ok bool
+ tfMap, ok := rule.(map[string]interface{})
+ if !ok {
+ errs = append(errs, diag.Errorf("Unable to extra the rule %d", i)...)
+ continue
+ }
+ log.Printf("[DEBUG] rule[%d] contains %v", i, tfMap)
+
+ result[i].Arn, _ = tfMap["arn"].(string)
+ result[i].Id, _ = tfMap["id"].(string)
+
+ if result[i].Enabled, ok = tfMap["enabled"].(bool); !ok {
+ log.Printf("[DEBUG] rule[%d].enabled omitted. Defaulting to true", i)
+ result[i].Enabled = true
+ }
+
+ if result[i].Priority, ok = tfMap["priority"].(int); !ok || result[i].Priority == 0 {
+ // Since priorities are always positive, we use a negative value to indicate they were automatically generated
+ result[i].Priority = -len(v) + i
+ log.Printf("[DEBUG] rule[%d].priority omitted. Defaulting to index (%d)", i, -result[i].Priority)
+ }
+
+ result[i].Prefix, _ = tfMap["prefix"].(string)
+
+ if tags, ok := tfMap["tags"].(map[string]interface{}); ok {
+ log.Printf("[DEBUG] rule[%d].tags map contains: %v", i, tags)
+ tagMap := map[string]string{}
+ for k, val := range tags {
+ var valOk bool
+ tagMap[k], valOk = val.(string)
+ if !valOk {
+ errs = append(errs, diag.Errorf("rule[%d].tags[%s] value must be a string, not a %s", i, k, reflect.TypeOf(val))...)
+ }
+ }
+ result[i].Tags = tagMap
+ } else {
+ errs = append(errs, diag.Errorf("unable to extarct rule[%d].tags of type %s", i, reflect.TypeOf(tfMap["tags"]))...)
+ }
+
+ log.Printf("[DEBUG] rule[%d].tags are: %v", i, result[i].Tags)
+
+ result[i].DeleteReplication, ok = tfMap["delete_replication"].(bool)
+ result[i].DeleteReplication = result[i].DeleteReplication && ok
+ result[i].DeleteMarkerReplication, ok = tfMap["delete_marker_replication"].(bool)
+ result[i].DeleteMarkerReplication = result[i].DeleteMarkerReplication && ok
+ result[i].ExistingObjectReplication, ok = tfMap["existing_object_replication"].(bool)
+ result[i].ExistingObjectReplication = result[i].ExistingObjectReplication && ok
+ result[i].MetadataSync, ok = tfMap["metadata_sync"].(bool)
+ result[i].MetadataSync = result[i].MetadataSync && ok
+
+ var targets []interface{}
+ if targets, ok = tfMap["target"].([]interface{}); !ok || len(targets) != 1 {
+ errs = append(errs, diag.Errorf("Unexpected value type for rule[%d].target. Exactly one target configuration is expected", i)...)
+ continue
+ }
+ var target map[string]interface{}
+ if target, ok = targets[0].(map[string]interface{}); !ok {
+ errs = append(errs, diag.Errorf("Unexpected value type for rule[%d].target. Unable to convert to a usable type", i)...)
+ continue
+ }
+
+ if result[i].Target.Bucket, ok = target["bucket"].(string); !ok {
+ errs = append(errs, diag.Errorf("rule[%d].target.bucket cannot be omitted", i)...)
+ }
+
+ result[i].Target.StorageClass, _ = target["storage_class"].(string)
+
+ if result[i].Target.Host, ok = target["host"].(string); !ok {
+ errs = append(errs, diag.Errorf("rule[%d].target.host cannot be omitted", i)...)
+ }
+
+ result[i].Target.Path, _ = target["path"].(string)
+ result[i].Target.Region, _ = target["region"].(string)
+
+ if result[i].Target.AccessKey, ok = target["access_key"].(string); !ok {
+ errs = append(errs, diag.Errorf("rule[%d].target.access_key cannot be omitted", i)...)
+ }
+
+ if result[i].Target.SecretKey, ok = target["secret_key"].(string); !ok {
+ errs = append(errs, diag.Errorf("rule[%d].target.secret_key cannot be omitted", i)...)
+ }
+
+ if result[i].Target.Secure, ok = target["secure"].(bool); !result[i].Target.Secure || !ok {
+ errs = append(errs, diag.Diagnostic{
+ Severity: diag.Warning,
+ Summary: fmt.Sprintf("rule[%d].target.secure is false. It is unsafe to use bucket replication over HTTP", i),
+ })
+ }
+
+ result[i].Target.Syncronous, ok = target["syncronous"].(bool)
+ result[i].Target.Syncronous = result[i].Target.Syncronous && ok
+ result[i].Target.DisableProxy, ok = target["disable_proxy"].(bool)
+ result[i].Target.DisableProxy = result[i].Target.DisableProxy && ok
+
+ var bandwidthStr string
+ var bandwidth uint64
+ var err error
+ if bandwidthStr, ok = target["bandwidth_limt"].(string); ok {
+ bandwidth, err = humanize.ParseBytes(bandwidthStr)
+ if err != nil {
+ log.Printf("[WARN] invalid bandwidth value %q: %v", result[i].Target.BandwidthLimit, err)
+ errs = append(errs, diag.Errorf("rule[%d].target.bandwidth_limt is invalid. Make sure to use k, m, g as preffix only", i)...)
+ } else {
+ result[i].Target.BandwidthLimit = int64(bandwidth)
+ }
+ }
+
+ var healthcheckDuration string
+ if healthcheckDuration, ok = target["health_check_period"].(string); ok {
+ result[i].Target.HealthCheckPeriod, err = time.ParseDuration(healthcheckDuration)
+ if err != nil {
+ log.Printf("[WARN] invalid healthcheck value %q: %v", result[i].Target.HealthCheckPeriod, err)
+ errs = append(errs, diag.Errorf("rule[%d].target.health_check_period is invalid. Make sure to use a valid golang time duration notation", i)...)
+ }
+ }
+
+ var pathstyle string
+ pathstyle, _ = target["path_style"].(string)
+ switch strings.TrimSpace(strings.ToLower(pathstyle)) {
+ case "on":
+ result[i].Target.PathStyle = S3PathSyleOn
+ case "off":
+ result[i].Target.PathStyle = S3PathSyleOff
+ default:
+ if pathstyle != "auto" && pathstyle != "" {
+ errs = append(errs, diag.Diagnostic{
+ Severity: diag.Warning,
+ Summary: fmt.Sprintf("rule[%d].target.path_style must be \"on\", \"off\" or \"auto\". Defaulting to \"auto\"", i),
+ })
+ }
+ result[i].Target.PathStyle = S3PathSyleAuto
+ }
+
+ }
+ return
+}
diff --git a/minio/resource_minio_s3_bucket_replication_test.go b/minio/resource_minio_s3_bucket_replication_test.go
new file mode 100644
index 00000000..7de618d5
--- /dev/null
+++ b/minio/resource_minio_s3_bucket_replication_test.go
@@ -0,0 +1,1655 @@
+package minio
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+ "github.com/minio/madmin-go/v3"
+ "github.com/minio/minio-go/v7/pkg/replication"
+)
+
+const kOneWayComplexResource = `
+resource "minio_s3_bucket_replication" "replication_in_all" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ rule {
+ enabled = false
+
+ delete_replication = true
+ delete_marker_replication = false
+ existing_object_replication = false
+ metadata_sync = false
+
+ priority = 10
+ prefix = "bar/"
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ host = local.second_minio_host
+ region = "eu-west-1"
+ secure = false
+ access_key = minio_iam_service_account.replication_in_b.access_key
+ secret_key = minio_iam_service_account.replication_in_b.secret_key
+ }
+ }
+
+ rule {
+ delete_replication = false
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = false
+
+ priority = 100
+ prefix = "foo/"
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_c.bucket
+ host = local.third_minio_host
+ region = "ap-south-1"
+ secure = false
+ access_key = minio_iam_service_account.replication_in_c.access_key
+ secret_key = minio_iam_service_account.replication_in_c.secret_key
+ health_check_period = "60s"
+ }
+ }
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = false
+ existing_object_replication = true
+ metadata_sync = false
+
+ priority = 200
+ tags = {
+ "foo" = "bar"
+ }
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_d.bucket
+ host = local.fourth_minio_host
+ region = "us-west-2"
+ secure = false
+ bandwidth_limt = "1G"
+ access_key = minio_iam_service_account.replication_in_d.access_key
+ secret_key = minio_iam_service_account.replication_in_d.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b,
+ minio_s3_bucket_versioning.my_bucket_in_c,
+ minio_s3_bucket_versioning.my_bucket_in_d,
+ ]
+}`
+
+// (
+//
+// resourceName,
+// minioIdentidier,
+// minioProvider,
+// ruleOneMinioIdentidier,
+// ruleOneMinioHost,
+// ruleOneMinioRegion,
+// ruleOneMinioIdentidier,
+// ruleOneMinioIdentidier,
+// ruleTwoMinioIdentidier,
+// ruleTwoMinioHost,
+// ruleTwoMinioRegion,
+// ruleTwoMinioIdentidier,
+// ruleTwoMinioIdentidier,
+// ruleThreeMinioIdentidier,
+// ruleThreeMinioHost,
+// ruleThreeMinioRegion,
+// ruleThreeMinioIdentidier,
+// ruleThreeMinioIdentidier,
+//
+// )
+const kTemplateComplexResource = `
+resource "minio_s3_bucket_replication" "%s" {
+ bucket = minio_s3_bucket.my_bucket_in_%s.bucket
+ provider = %s
+
+ rule {
+ enabled = false
+
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true
+
+ prefix = "bar/"
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_%s.bucket
+ host = local.%s_minio_host
+ region = %q
+ secure = false
+ access_key = minio_iam_service_account.replication_in_%s.access_key
+ secret_key = minio_iam_service_account.replication_in_%s.secret_key
+ }
+ }
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true
+
+ prefix = "foo/"
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_%s.bucket
+ host = local.%s_minio_host
+ region = %q
+ secure = false
+ access_key = minio_iam_service_account.replication_in_%s.access_key
+ secret_key = minio_iam_service_account.replication_in_%s.secret_key
+ health_check_period = "60s"
+ }
+ }
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = false
+ existing_object_replication = true
+ metadata_sync = true
+
+ tags = {
+ "foo" = "bar"
+ }
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_%s.bucket
+ host = local.%s_minio_host
+ region = %q
+ secure = false
+ access_key = minio_iam_service_account.replication_in_%s.access_key
+ secret_key = minio_iam_service_account.replication_in_%s.secret_key
+ bandwidth_limt = "1G"
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b,
+ minio_s3_bucket_versioning.my_bucket_in_c,
+ minio_s3_bucket_versioning.my_bucket_in_d,
+ ]
+}
+`
+
+// Rule 1 ring is a -> b -> c -> d
+// Rule 2 ring is a -> c -> d -> b
+// Rule 3 ring is a -> d -> b -> c
+// a -> eu-central-1
+// b -> eu-west-1
+// c -> ap-south-1
+// d -> us-west-2
+var kTwoWayComplexResource = fmt.Sprintf(kTemplateComplexResource,
+ "replication_in_bcd",
+ "a",
+ "minio",
+ // Rule 1
+ "b",
+ "second",
+ "eu-west-1",
+ "b",
+ "b",
+ // Rule 2
+ "c",
+ "third",
+ "ap-south-1",
+ "c",
+ "c",
+ // Rule 3
+ "d",
+ "fourth",
+ "us-west-2",
+ "d",
+ "d",
+) +
+ fmt.Sprintf(kTemplateComplexResource,
+ "replication_in_acd",
+ "b",
+ "secondminio",
+ // Rule 1
+ "c",
+ "third",
+ "ap-south-1",
+ "c",
+ "c",
+ // Rule 2
+ "d",
+ "fourth",
+ "us-west-2",
+ "d",
+ "d",
+ // Rule 3
+ "a",
+ "primary",
+ "eu-central-1",
+ "a",
+ "a",
+ ) +
+ fmt.Sprintf(kTemplateComplexResource,
+ "replication_in_abd",
+ "c",
+ "thirdminio",
+ // Rule 1
+ "d",
+ "fourth",
+ "us-west-2",
+ "d",
+ "d",
+ // Rule 2
+ "a",
+ "primary",
+ "eu-central-1",
+ "a",
+ "a",
+ // Rule 3
+ "b",
+ "second",
+ "eu-west-1",
+ "b",
+ "b",
+ ) +
+ fmt.Sprintf(kTemplateComplexResource,
+ "replication_in_abc",
+ "d",
+ "fourthminio",
+ // Rule 1
+ "a",
+ "primary",
+ "eu-central-1",
+ "a",
+ "a",
+ // Rule 2
+ "b",
+ "second",
+ "eu-west-1",
+ "b",
+ "b",
+ // Rule 3
+ "c",
+ "third",
+ "ap-south-1",
+ "c",
+ "c",
+ )
+
+const kOneWaySimpleResource = `
+resource "minio_s3_bucket_replication" "replication_in_b" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ rule {
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = false
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ host = local.second_minio_host
+ secure = false
+ bandwidth_limt = "100M"
+ access_key = minio_iam_service_account.replication_in_b.access_key
+ secret_key = minio_iam_service_account.replication_in_b.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b
+ ]
+}`
+
+const kTwoWaySimpleResource = `
+resource "minio_s3_bucket_replication" "replication_in_b" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ rule {
+ priority = 100
+
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ host = local.second_minio_host
+ secure = false
+ region = "eu-west-1"
+ syncronous = true
+ bandwidth_limt = "100M"
+ access_key = minio_iam_service_account.replication_in_b.access_key
+ secret_key = minio_iam_service_account.replication_in_b.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b
+ ]
+}
+
+resource "minio_s3_bucket_replication" "replication_in_a" {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ provider = secondminio
+
+ rule {
+ priority = 100
+
+ delete_replication = true
+ delete_marker_replication = true
+ existing_object_replication = true
+ metadata_sync = true
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+ host = local.primary_minio_host
+ region = "eu-north-1"
+ secure = false
+ bandwidth_limt = "800M"
+ health_check_period = "2m"
+ access_key = minio_iam_service_account.replication_in_a.access_key
+ secret_key = minio_iam_service_account.replication_in_a.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b
+ ]
+}`
+
+func TestAccS3BucketReplication_oneway_simple(t *testing.T) {
+ bucketName := acctest.RandomWithPrefix("tf-acc-test-a")
+ secondBucketName := acctest.RandomWithPrefix("tf-acc-test-b")
+ username := acctest.RandomWithPrefix("tf-acc-usr")
+
+ primaryMinioEndpoint := os.Getenv("MINIO_ENDPOINT")
+ secondaryMinioEndpoint := os.Getenv("SECOND_MINIO_ENDPOINT")
+
+ // Test in parallel cannot work as remote target endpoint would conflict
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioS3BucketDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 2) +
+ kOneWaySimpleResource,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_b",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: true,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 100000000,
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_b",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ "rule.0.priority", // This is ommited in our test case, so it gets automatically generated and thus mismatch
+ },
+ Config: kOneWaySimpleResource,
+ },
+ },
+ })
+}
+
+func TestAccS3BucketReplication_oneway_simple_update(t *testing.T) {
+ bucketName := acctest.RandomWithPrefix("tf-acc-test-a")
+ secondBucketName := acctest.RandomWithPrefix("tf-acc-test-b")
+ username := acctest.RandomWithPrefix("tf-acc-usr")
+
+ primaryMinioEndpoint := os.Getenv("MINIO_ENDPOINT")
+ secondaryMinioEndpoint := os.Getenv("SECOND_MINIO_ENDPOINT")
+
+ // Test in parallel cannot work as remote target endpoint would conflict
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioS3BucketDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 2) +
+ kOneWaySimpleResource,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_b",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: true,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 100000000,
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 2) + `
+resource "minio_s3_bucket_replication" "replication_in_b" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ rule {
+ priority = 50
+ delete_replication = false
+ delete_marker_replication = false
+ existing_object_replication = true
+ metadata_sync = false
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ host = local.second_minio_host
+ secure = false
+ bandwidth_limt = "150M"
+ health_check_period = "5m"
+ access_key = minio_iam_service_account.replication_in_b.access_key
+ secret_key = minio_iam_service_account.replication_in_b.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b
+ ]
+}`,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_b",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: true,
+ Priority: 50,
+
+ Prefix: "",
+ Tags: map[string]string{},
+
+ DeleteReplication: false,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: true,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Minute * 5,
+ BandwidthLimit: 150000000,
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 2) +
+ `
+resource "minio_s3_bucket_replication" "replication_in_b" {
+ bucket = minio_s3_bucket.my_bucket_in_a.bucket
+
+ rule {
+ enabled = false
+
+ delete_replication = false
+ delete_marker_replication = false
+ existing_object_replication = true
+ metadata_sync = false
+
+ target {
+ bucket = minio_s3_bucket.my_bucket_in_b.bucket
+ host = local.second_minio_host
+ secure = false
+ bandwidth_limt = "150M"
+ health_check_period = "5m"
+ access_key = minio_iam_service_account.replication_in_b.access_key
+ secret_key = minio_iam_service_account.replication_in_b.secret_key
+ }
+ }
+
+ depends_on = [
+ minio_s3_bucket_versioning.my_bucket_in_a,
+ minio_s3_bucket_versioning.my_bucket_in_b
+ ]
+}`,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_b",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: false,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{},
+
+ DeleteReplication: false,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: true,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Minute * 5,
+ BandwidthLimit: 150000000,
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 2) +
+ kOneWaySimpleResource,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_b",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: true,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 100000000,
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_b",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ "rule.0.priority", // This is ommited in our test case, so it gets automatically generated and thus mismatch
+ },
+ Config: kOneWaySimpleResource,
+ },
+ },
+ })
+}
+func TestAccS3BucketReplication_oneway_complex(t *testing.T) {
+ bucketName := acctest.RandomWithPrefix("tf-acc-test-a")
+ secondBucketName := acctest.RandomWithPrefix("tf-acc-test-b")
+ thirdBucketName := acctest.RandomWithPrefix("tf-acc-test-c")
+ fourthBucketName := acctest.RandomWithPrefix("tf-acc-test-d")
+ username := acctest.RandomWithPrefix("tf-acc-usr")
+
+ primaryMinioEndpoint := os.Getenv("MINIO_ENDPOINT")
+ secondaryMinioEndpoint := os.Getenv("SECOND_MINIO_ENDPOINT")
+ thirdMinioEndpoint := os.Getenv("THIRD_MINIO_ENDPOINT")
+ fourthMinioEndpoint := os.Getenv("FOURTH_MINIO_ENDPOINT")
+
+ // Test in parallel cannot work as remote target endpoint would conflict
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioS3BucketDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint, thirdMinioEndpoint, fourthMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_c", "thirdminio", thirdBucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_d", "fourthminio", fourthBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName, thirdBucketName, fourthBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 4) +
+ kOneWayComplexResource,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_all",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: false,
+ Priority: 10,
+
+ Prefix: "bar/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: false,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "eu-west-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 100,
+
+ Prefix: "foo/",
+ Tags: map[string]string{},
+
+ DeleteReplication: false,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: thirdBucketName,
+ StorageClass: "",
+ Host: thirdMinioEndpoint,
+ Path: "/",
+ Region: "ap-south-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 60,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 200,
+
+ Prefix: "",
+ Tags: map[string]string{
+ "foo": "bar",
+ },
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: true,
+ MetadataSync: false,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: fourthBucketName,
+ StorageClass: "",
+ Host: fourthMinioEndpoint,
+ Path: "/",
+ Region: "us-west-2",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 1 * humanize.BigGByte.Int64(),
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_all",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ "rule.1.target.0.secret_key",
+ "rule.2.target.0.secret_key",
+ },
+ Config: kOneWayComplexResource,
+ },
+ },
+ })
+}
+
+func TestAccS3BucketReplication_twoway_simple(t *testing.T) {
+ bucketName := acctest.RandomWithPrefix("tf-acc-test-a")
+ secondBucketName := acctest.RandomWithPrefix("tf-acc-test-b")
+ username := acctest.RandomWithPrefix("tf-acc-usr")
+
+ primaryMinioEndpoint := os.Getenv("MINIO_ENDPOINT")
+ secondaryMinioEndpoint := os.Getenv("SECOND_MINIO_ENDPOINT")
+
+ // Test in parallel cannot work as remote target endpoint would conflict
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioS3BucketDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 2) +
+ kTwoWaySimpleResource,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_b",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: true,
+ Priority: 100,
+
+ Prefix: "",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Region: "eu-west-1",
+ Syncronous: true,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 100000000,
+ },
+ },
+ },
+ ),
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_a",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: true,
+ Priority: 100,
+
+ Prefix: "",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: bucketName,
+ StorageClass: "",
+ Host: primaryMinioEndpoint,
+ Region: "eu-north-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 120,
+ BandwidthLimit: 800000000,
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_b",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ },
+ Config: kTwoWaySimpleResource,
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_a",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ },
+ Config: kTwoWaySimpleResource,
+ },
+ },
+ })
+}
+func TestAccS3BucketReplication_twoway_complex(t *testing.T) {
+ bucketName := acctest.RandomWithPrefix("tf-acc-test-a")
+ secondBucketName := acctest.RandomWithPrefix("tf-acc-test-b")
+ thirdBucketName := acctest.RandomWithPrefix("tf-acc-test-c")
+ fourthBucketName := acctest.RandomWithPrefix("tf-acc-test-d")
+ username := acctest.RandomWithPrefix("tf-acc-usr")
+
+ primaryMinioEndpoint := os.Getenv("MINIO_ENDPOINT")
+ secondaryMinioEndpoint := os.Getenv("SECOND_MINIO_ENDPOINT")
+ thirdMinioEndpoint := os.Getenv("THIRD_MINIO_ENDPOINT")
+ fourthMinioEndpoint := os.Getenv("FOURTH_MINIO_ENDPOINT")
+
+ // Test in parallel cannot work as remote target endpoint would conflict
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioS3BucketDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccBucketReplicationConfigLocals(primaryMinioEndpoint, secondaryMinioEndpoint, thirdMinioEndpoint, fourthMinioEndpoint) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_a", "minio", bucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_b", "secondminio", secondBucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_c", "thirdminio", thirdBucketName) +
+ testAccBucketReplicationConfigBucket("my_bucket_in_d", "fourthminio", fourthBucketName) +
+ testAccBucketReplicationConfigPolicy(bucketName, secondBucketName, thirdBucketName, fourthBucketName) +
+ testAccBucketReplicationConfigServiceAccount(username, 4) +
+ kTwoWayComplexResource,
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_bcd",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: false,
+ Priority: 3,
+
+ Prefix: "bar/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "eu-west-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 2,
+
+ Prefix: "foo/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: thirdBucketName,
+ StorageClass: "",
+ Host: thirdMinioEndpoint,
+ Path: "/",
+ Region: "ap-south-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 60,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{
+ "foo": "bar",
+ },
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: fourthBucketName,
+ StorageClass: "",
+ Host: fourthMinioEndpoint,
+ Path: "/",
+ Region: "us-west-2",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 1 * humanize.BigGByte.Int64(),
+ },
+ },
+ },
+ ),
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_acd",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: false,
+ Priority: 3,
+
+ Prefix: "bar/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: thirdBucketName,
+ StorageClass: "",
+ Host: thirdMinioEndpoint,
+ Path: "/",
+ Region: "ap-south-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 2,
+
+ Prefix: "foo/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: fourthBucketName,
+ StorageClass: "",
+ Host: fourthMinioEndpoint,
+ Path: "/",
+ Region: "us-west-2",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 60,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{
+ "foo": "bar",
+ },
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: bucketName,
+ StorageClass: "",
+ Host: primaryMinioEndpoint,
+ Path: "/",
+ Region: "eu-central-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 1 * humanize.BigGByte.Int64(),
+ },
+ },
+ },
+ ),
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_abd",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: false,
+ Priority: 3,
+
+ Prefix: "bar/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: fourthBucketName,
+ StorageClass: "",
+ Host: fourthMinioEndpoint,
+ Path: "/",
+ Region: "us-west-2",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 2,
+
+ Prefix: "foo/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: bucketName,
+ StorageClass: "",
+ Host: primaryMinioEndpoint,
+ Path: "/",
+ Region: "eu-central-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 60,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{
+ "foo": "bar",
+ },
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "eu-west-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 1 * humanize.BigGByte.Int64(),
+ },
+ },
+ },
+ ),
+ testAccCheckBucketHasReplication(
+ "minio_s3_bucket_replication.replication_in_abc",
+ []S3MinioBucketReplicationRule{
+ {
+ Enabled: false,
+ Priority: 3,
+
+ Prefix: "bar/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: bucketName,
+ StorageClass: "",
+ Host: primaryMinioEndpoint,
+ Path: "/",
+ Region: "eu-central-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 2,
+
+ Prefix: "foo/",
+ Tags: map[string]string{},
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: true,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: secondBucketName,
+ StorageClass: "",
+ Host: secondaryMinioEndpoint,
+ Path: "/",
+ Region: "eu-west-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 60,
+ BandwidthLimit: 0,
+ },
+ },
+ {
+ Enabled: true,
+ Priority: 1,
+
+ Prefix: "",
+ Tags: map[string]string{
+ "foo": "bar",
+ },
+
+ DeleteReplication: true,
+ DeleteMarkerReplication: false,
+ ExistingObjectReplication: true,
+ MetadataSync: true,
+
+ Target: S3MinioBucketReplicationRuleTarget{
+ Bucket: thirdBucketName,
+ StorageClass: "",
+ Host: thirdMinioEndpoint,
+ Path: "/",
+ Region: "ap-south-1",
+ Syncronous: false,
+ Secure: false,
+ PathStyle: S3PathSyleAuto,
+ HealthCheckPeriod: time.Second * 30,
+ BandwidthLimit: 1 * humanize.BigGByte.Int64(),
+ },
+ },
+ },
+ ),
+ ),
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_bcd",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ "rule.1.target.0.secret_key",
+ "rule.2.target.0.secret_key",
+ // Prorities are ignored in this test case, as it gets automatically generated and thus mismatch
+ "rule.0.priority",
+ "rule.1.priority",
+ "rule.2.priority",
+ },
+ Config: kTwoWayComplexResource,
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_acd",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ "rule.1.target.0.secret_key",
+ "rule.2.target.0.secret_key",
+ // Prorities are ignored in this test case, as it gets automatically generated and thus mismatch
+ "rule.0.priority",
+ "rule.1.priority",
+ "rule.2.priority",
+ },
+ Config: kTwoWayComplexResource,
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_abd",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ "rule.1.target.0.secret_key",
+ "rule.2.target.0.secret_key",
+ // Prorities are ignored in this test case, as it gets automatically generated and thus mismatch
+ "rule.0.priority",
+ "rule.1.priority",
+ "rule.2.priority",
+ },
+ Config: kTwoWayComplexResource,
+ },
+ {
+ ResourceName: "minio_s3_bucket_replication.replication_in_abc",
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{
+ "rule.0.target.0.secret_key",
+ "rule.1.target.0.secret_key",
+ "rule.2.target.0.secret_key",
+ // Prorities are ignored in this test case, as it gets automatically generated and thus mismatch
+ "rule.0.priority",
+ "rule.1.priority",
+ "rule.2.priority",
+ },
+ Config: kTwoWayComplexResource,
+ },
+ },
+ })
+}
+
+var kMinioHostIdentifier = []string{
+ "primary",
+ "second",
+ "third",
+ "fourth",
+}
+
+var kMinioHostLetter = []string{
+ "a",
+ "b",
+ "c",
+ "d",
+}
+
+func testAccBucketReplicationConfigLocals(minioHost ...string) string {
+ var varBlock string
+ for i, val := range minioHost {
+ varBlock = varBlock + fmt.Sprintf(" %s_minio_host = %q\n", kMinioHostIdentifier[i], val)
+ }
+ return fmt.Sprintf(`
+locals {
+ %s
+}
+`, varBlock)
+}
+
+func testAccBucketReplicationConfigBucket(resourceName string, provider string, bucketName string) string {
+ return fmt.Sprintf(`
+resource "minio_s3_bucket" %q {
+ provider = %s
+ bucket = %q
+}
+
+resource "minio_s3_bucket_versioning" %q {
+ provider = %s
+ bucket = %q
+
+ versioning_configuration {
+ status = "Enabled"
+ }
+
+ depends_on = [
+ minio_s3_bucket.%s
+ ]
+}
+`, resourceName, provider, bucketName, resourceName, provider, bucketName, resourceName)
+}
+
+func testAccBucketReplicationConfigServiceAccount(username string, count int) (varBlock string) {
+ for i := 0; i < count; i++ {
+ indentifier := kMinioHostIdentifier[i]
+ if i == 0 {
+ indentifier = "minio"
+ } else {
+ indentifier = indentifier + "minio"
+ }
+ letter := kMinioHostLetter[i]
+ varBlock = varBlock + fmt.Sprintf(`
+resource "minio_iam_policy" "replication_in_%s" {
+ provider = %s
+ name = "ReplicationToMyBucketPolicy"
+ policy = data.minio_iam_policy_document.replication_policy.json
+}
+
+resource "minio_iam_user" "replication_in_%s" {
+ provider = %s
+ name = %q
+ force_destroy = true
+}
+
+resource "minio_iam_user_policy_attachment" "replication_in_%s" {
+ provider = %s
+ user_name = minio_iam_user.replication_in_%s.name
+ policy_name = minio_iam_policy.replication_in_%s.id
+}
+
+resource "minio_iam_service_account" "replication_in_%s" {
+ provider = %s
+ target_user = minio_iam_user.replication_in_%s.name
+
+ depends_on = [
+ minio_iam_user_policy_attachment.replication_in_%s,
+ minio_iam_policy.replication_in_%s,
+ ]
+}
+
+`, letter, indentifier, letter, indentifier, username, letter, indentifier, letter, letter, letter, indentifier, letter, letter, letter)
+ }
+ return varBlock
+}
+
+func testAccBucketReplicationConfigPolicy(bucketArn ...string) string {
+ bucketObjectArn := make([]string, len(bucketArn))
+ for i, bucket := range bucketArn {
+ bucketArn[i] = fmt.Sprintf("\"arn:aws:s3:::%s\"", bucket)
+ bucketObjectArn[i] = fmt.Sprintf("\"arn:aws:s3:::%s/*\"", bucket)
+ }
+ return fmt.Sprintf(`
+data "minio_iam_policy_document" "replication_policy" {
+ statement {
+ sid = "ReadBuckets"
+ effect = "Allow"
+ resources = ["arn:aws:s3:::*"]
+
+ actions = [
+ "s3:ListBucket",
+ ]
+ }
+
+ statement {
+ sid = "EnableReplicationOnBucket"
+ effect = "Allow"
+ resources = [%s]
+
+ actions = [
+ "s3:GetReplicationConfiguration",
+ "s3:ListBucket",
+ "s3:ListBucketMultipartUploads",
+ "s3:GetBucketLocation",
+ "s3:GetBucketVersioning",
+ "s3:GetBucketObjectLockConfiguration",
+ "s3:GetEncryptionConfiguration",
+ ]
+ }
+
+ statement {
+ sid = "EnableReplicatingDataIntoBucket"
+ effect = "Allow"
+ resources = [%s]
+
+ actions = [
+ "s3:GetReplicationConfiguration",
+ "s3:ReplicateTags",
+ "s3:AbortMultipartUpload",
+ "s3:GetObject",
+ "s3:GetObjectVersion",
+ "s3:GetObjectVersionTagging",
+ "s3:PutObject",
+ "s3:PutObjectRetention",
+ "s3:PutBucketObjectLockConfiguration",
+ "s3:PutObjectLegalHold",
+ "s3:DeleteObject",
+ "s3:ReplicateObject",
+ "s3:ReplicateDelete",
+ ]
+ }
+}
+`, strings.Join(bucketArn, ","), strings.Join(bucketObjectArn, ","))
+}
+
+func testAccCheckBucketHasReplication(n string, config []S3MinioBucketReplicationRule) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ rs, ok := s.RootModule().Resources[n]
+ if !ok {
+ return fmt.Errorf("not found: %s", n)
+ }
+
+ if rs.Primary.ID == "" {
+ return fmt.Errorf("no ID is set")
+ }
+
+ var provider *S3MinioClient
+ switch rs.Provider {
+ case "registry.terraform.io/hashicorp/minio":
+ provider = testAccProvider.Meta().(*S3MinioClient)
+ case "registry.terraform.io/hashicorp/secondminio":
+ provider = testAccSecondProvider.Meta().(*S3MinioClient)
+ case "registry.terraform.io/hashicorp/thirdminio":
+ provider = testAccThirdProvider.Meta().(*S3MinioClient)
+ case "registry.terraform.io/hashicorp/fourthminio":
+ provider = testAccFourthProvider.Meta().(*S3MinioClient)
+ default:
+ return fmt.Errorf("Provider %q unknown", rs.Provider)
+ }
+
+ minioC := provider.S3Client
+ minioadm := provider.S3Admin
+ actualConfig, err := minioC.GetBucketReplication(context.Background(), rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error on GetBucketReplication: %v", err)
+ }
+
+ if len(actualConfig.Rules) != len(config) {
+ return fmt.Errorf("non-equivalent status error:\n\nexpected: %d\n\ngot: %d", len(actualConfig.Rules), len(config))
+ }
+
+ // Check computed fields
+ // for i, rule := range config {
+ // if id, ok := rs.Primary.Attributes[fmt.Sprintf("rule.%d.id", i)]; !ok || len(id) != 20 {
+ // return fmt.Errorf("Rule#%d doesn't have a valid ID: %q", i, id)
+ // }
+ // if arn, ok := rs.Primary.Attributes[fmt.Sprintf("rule.%d.arn", i)]; !ok || len(arn) != len(fmt.Sprintf("arn:minio:replication::xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx:%s", rule.Target.Bucket)) {
+ // return fmt.Errorf("Rule#%d doesn't have a valid ARN:\n\nexpected: arn:minio:replication::xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx:%s\n\ngot: %v", i, rule.Target.Bucket, arn)
+ // }
+ // }
+
+ // Check bucket replication
+ actualReplicationConfigByPriority := map[int]replication.Rule{}
+ for _, rule := range actualConfig.Rules {
+ actualReplicationConfigByPriority[rule.Priority] = rule
+ }
+ for i, rule := range config {
+ existingRule, ok := actualReplicationConfigByPriority[rule.Priority]
+ if !ok {
+ return fmt.Errorf("Rule with priority %d not found. Available: %v", rule.Priority, actualReplicationConfigByPriority)
+ }
+ if (existingRule.Status == replication.Enabled) != rule.Enabled {
+ return fmt.Errorf("Mismatch status on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, (existingRule.Status == replication.Enabled), rule.Enabled)
+ }
+ if existingRule.Priority != rule.Priority {
+ return fmt.Errorf("Mismatch priority on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingRule.Priority, rule.Priority)
+ }
+ if (existingRule.DeleteMarkerReplication.Status == replication.Enabled) != rule.DeleteMarkerReplication {
+ return fmt.Errorf("Mismatch DeleteMarkerReplication on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, (existingRule.DeleteMarkerReplication.Status == replication.Enabled), rule.DeleteMarkerReplication)
+ }
+ if (existingRule.DeleteReplication.Status == replication.Enabled) != rule.DeleteReplication {
+ return fmt.Errorf("Mismatch DeleteReplication on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, (existingRule.DeleteReplication.Status == replication.Enabled), rule.DeleteReplication)
+ }
+ if (existingRule.SourceSelectionCriteria.ReplicaModifications.Status == replication.Enabled) != rule.MetadataSync {
+ return fmt.Errorf("Mismatch SourceSelectionCriteria on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, (existingRule.SourceSelectionCriteria.ReplicaModifications.Status == replication.Enabled), rule.MetadataSync)
+ }
+ if (existingRule.ExistingObjectReplication.Status == replication.Enabled) != rule.ExistingObjectReplication {
+ return fmt.Errorf("Mismatch ExistingObjectReplication on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, (existingRule.ExistingObjectReplication.Status == replication.Enabled), rule.ExistingObjectReplication)
+ }
+ if !strings.HasPrefix(existingRule.Destination.Bucket, fmt.Sprintf("arn:minio:replication:%s:", rule.Target.Region)) {
+ return fmt.Errorf("Mismatch ARN bucket prefix on res %q, rule#%d:\n\nexpected: arn:minio:replication:%s:\n\ngot: %v", n, i, rule.Target.Region, existingRule.Destination.Bucket)
+ }
+ if !strings.HasSuffix(existingRule.Destination.Bucket, ":"+rule.Target.Bucket) {
+ return fmt.Errorf("Mismatch Target bucket name on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingRule.Destination.Bucket, rule.Target.Bucket)
+ }
+ if existingRule.Destination.StorageClass != rule.Target.StorageClass {
+ return fmt.Errorf("Mismatch Target StorageClass on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingRule.Destination.StorageClass, rule.Target.StorageClass)
+ }
+ if existingRule.Prefix() != rule.Prefix {
+ return fmt.Errorf("Mismatch Prefix on res %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingRule.Prefix(), rule.Prefix)
+ }
+ tags := strings.Split(existingRule.Tags(), "&")
+ for i, v := range tags {
+ if v != "" {
+ continue
+ }
+ tags = append(tags[:i], tags[i+1:]...)
+ }
+ if len(tags) != len(rule.Tags) {
+ return fmt.Errorf("Mismatch tags %q, rule#%d:\n\nexpected: %v (size %d)\n\ngot: %v (size %d)", n, i, tags, len(tags), rule.Tags, len(rule.Tags))
+ }
+ for _, kv := range tags {
+ val := strings.SplitN(kv, "=", 2)
+ k := val[0]
+ v := val[1]
+ if cv, ok := rule.Tags[k]; !ok || v != cv {
+ return fmt.Errorf("Mismatch tags %q, rule#%d:\n\nexpected: %s=%q\n\ngot: %s=%q (found: %t)", n, i, k, v, k, cv, ok)
+ }
+ }
+ }
+
+ // Check remote target
+ actualTargets, err := minioadm.ListRemoteTargets(context.Background(), rs.Primary.ID, "")
+ if err != nil {
+ return fmt.Errorf("error on ListRemoteTargets: %v", err)
+ }
+
+ if len(actualTargets) != len(config) {
+ return fmt.Errorf("non-equivalent status error:\n\nexpected: %d\n\ngot: %d", len(actualTargets), len(config))
+ }
+ actualRemoteTargetByArn := map[string]madmin.BucketTarget{}
+ for _, target := range actualTargets {
+ actualRemoteTargetByArn[target.Arn] = target
+ }
+ for i, rule := range config {
+ existingRule, ok := actualReplicationConfigByPriority[rule.Priority]
+ if !ok {
+ return fmt.Errorf("Rule with priority %d not found. Available: %v", rule.Priority, actualReplicationConfigByPriority)
+ }
+ existingTarget, ok := actualRemoteTargetByArn[existingRule.Destination.Bucket]
+ if !ok {
+ return fmt.Errorf("Target with ARN %q not found. Available: %v", existingRule.Destination.Bucket, actualRemoteTargetByArn)
+
+ }
+
+ if existingTarget.Endpoint != rule.Target.Host {
+ return fmt.Errorf("Mismatch endpoint %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.Endpoint, rule.Target.Host)
+ }
+ if existingTarget.Secure != rule.Target.Secure {
+ return fmt.Errorf("Mismatch Secure %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.Secure, rule.Target.Secure)
+ }
+ if existingTarget.BandwidthLimit != rule.Target.BandwidthLimit {
+ return fmt.Errorf("Mismatch BandwidthLimit %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.BandwidthLimit, rule.Target.BandwidthLimit)
+ }
+ if existingTarget.HealthCheckDuration != rule.Target.HealthCheckPeriod {
+ return fmt.Errorf("Mismatch HealthCheckDuration %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.HealthCheckDuration, rule.Target.HealthCheckPeriod)
+ }
+ if existingTarget.Secure != rule.Target.Secure {
+ return fmt.Errorf("Mismatch Secure %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.Secure, rule.Target.Secure)
+ }
+ bucket := rule.Target.Bucket
+ cleanPath := strings.TrimPrefix(strings.TrimPrefix(rule.Target.Path, "/"), ".")
+ if cleanPath != "" {
+ bucket = cleanPath + "/" + rule.Target.Bucket
+ }
+ if existingTarget.TargetBucket != bucket {
+ return fmt.Errorf("Mismatch TargetBucket %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.TargetBucket, bucket)
+ }
+ if existingTarget.ReplicationSync != rule.Target.Syncronous {
+ return fmt.Errorf("Mismatch synchronous mode %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.ReplicationSync, rule.Target.Syncronous)
+ }
+ if existingTarget.Region != rule.Target.Region {
+ return fmt.Errorf("Mismatch region %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.Region, rule.Target.Region)
+ }
+ if existingTarget.Path != rule.Target.PathStyle.String() {
+ return fmt.Errorf("Mismatch path style %q, rule#%d:\n\nexpected: %v\n\ngot: %v", n, i, existingTarget.Path, rule.Target.PathStyle.String())
+ }
+ // Asserting exact AccessKey value is too painful. Furthermore, since MinIO assert the credential validity before accepting the new remote target, the value is very low
+ if len(existingTarget.Credentials.AccessKey) != 20 {
+ return fmt.Errorf("Mismatch AccessKey %q, rule#%d:\n\nexpected: 20-char string\n\ngot: %v", n, i, existingTarget.Credentials.AccessKey)
+ }
+ }
+
+ return nil
+ }
+}
diff --git a/minio/resource_minio_s3_bucket_test.go b/minio/resource_minio_s3_bucket_test.go
index 865391fb..1d1f683e 100644
--- a/minio/resource_minio_s3_bucket_test.go
+++ b/minio/resource_minio_s3_bucket_test.go
@@ -12,6 +12,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+ "github.com/minio/minio-go/v7"
)
func TestAccMinioS3Bucket_basic(t *testing.T) {
@@ -256,7 +257,7 @@ func TestAccMinioS3Bucket_PrivateBucketUnreadable(t *testing.T) {
preConfig := testAccMinioS3BucketConfigWithACL(ri, "private")
resourceName := "minio_s3_bucket.bucket"
- resource.ParallelTest(t, resource.TestCase{
+ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviders,
CheckDestroy: testAccCheckMinioS3BucketDestroy,
@@ -267,7 +268,7 @@ func TestAccMinioS3Bucket_PrivateBucketUnreadable(t *testing.T) {
testAccCheckMinioS3BucketExists(resourceName),
resource.TestCheckResourceAttr(
resourceName, "acl", "private"),
- testAccCheckBucketNotReadableAnonymously(resourceName),
+ testAccCheckBucketNotReadableAnonymously(ri),
),
},
},
@@ -308,9 +309,40 @@ func TestMinioS3BucketName(t *testing.T) {
}
}
-func testAccCheckMinioS3BucketDestroy(s *terraform.State) error {
- conn := testAccProvider.Meta().(*S3MinioClient).S3Client
+func testAccCheckMinioS3BucketDestroy(s *terraform.State) (err error) {
+
+ err = providerMinioS3BucketDestroy(testAccProvider.Meta().(*S3MinioClient).S3Client, s)
+ if err != nil {
+ return
+ }
+
+ if testAccSecondProvider.Meta() == nil {
+ return
+ }
+
+ err = providerMinioS3BucketDestroy(testAccSecondProvider.Meta().(*S3MinioClient).S3Client, s)
+ if err != nil {
+ return
+ }
+
+ if testAccThirdProvider.Meta() == nil {
+ return
+ }
+
+ err = providerMinioS3BucketDestroy(testAccThirdProvider.Meta().(*S3MinioClient).S3Client, s)
+ if err != nil {
+ return
+ }
+
+ if testAccFourthProvider.Meta() == nil {
+ return
+ }
+
+ err = providerMinioS3BucketDestroy(testAccFourthProvider.Meta().(*S3MinioClient).S3Client, s)
+ return
+}
+func providerMinioS3BucketDestroy(conn *minio.Client, s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "minio_s3_bucket" {
continue
@@ -409,7 +441,7 @@ func testAccBucketArn(randInt string) string {
}
func testAccBucketDomainName(randInt string) string {
- return fmt.Sprintf("http://localhost:9000/minio/%s", randInt)
+ return fmt.Sprintf("http://172.17.0.1:9000/minio/%s", randInt)
}
func testAccBucketACL(acl string) string {
@@ -504,7 +536,7 @@ func testAccCheckBucketNotReadableAnonymously(bucket string) resource.TestCheckF
return err
}
if resp.StatusCode != 403 {
- return fmt.Errorf("should not be able to list buckets")
+ return fmt.Errorf("should not be able to list buckets (Got a %d status)", resp.StatusCode)
}
return nil
}
diff --git a/minio/resource_minio_s3_bucket_versioning.go b/minio/resource_minio_s3_bucket_versioning.go
index f762b312..3d060242 100644
--- a/minio/resource_minio_s3_bucket_versioning.go
+++ b/minio/resource_minio_s3_bucket_versioning.go
@@ -72,7 +72,7 @@ func minioPutBucketVersioning(ctx context.Context, d *schema.ResourceData, meta
)
if err != nil {
- return NewResourceError("error putting bucket versioning configuration: %v", d.Id(), err)
+ return NewResourceError("error putting bucket versioning configuration", bucketVersioningConfig.MinioBucket, err)
}
d.SetId(bucketVersioningConfig.MinioBucket)
@@ -87,7 +87,7 @@ func minioReadBucketVersioning(ctx context.Context, d *schema.ResourceData, meta
versioningConfig, err := bucketVersioningConfig.MinioClient.GetBucketVersioning(ctx, d.Id())
if err != nil {
- return NewResourceError("failed to load bucket versioning", d.Id(), err)
+ return NewResourceError("failed to load bucket versioning", bucketVersioningConfig.MinioBucket, err)
}
config := make(map[string]interface{})
@@ -126,7 +126,7 @@ func minioDeleteBucketVersioning(ctx context.Context, d *schema.ResourceData, me
err := bucketVersioningConfig.MinioClient.SuspendVersioning(ctx, bucketVersioningConfig.MinioBucket)
if err != nil {
- return NewResourceError("error suspending bucket versioning: %s", bucketVersioningConfig.MinioBucket, err)
+ return NewResourceError("error suspending bucket versioning", bucketVersioningConfig.MinioBucket, err)
}
return nil
diff --git a/minio/resource_minio_s3_bucket_versioning_test.go b/minio/resource_minio_s3_bucket_versioning_test.go
index 7014c1d9..6a3adb6d 100644
--- a/minio/resource_minio_s3_bucket_versioning_test.go
+++ b/minio/resource_minio_s3_bucket_versioning_test.go
@@ -88,6 +88,30 @@ func TestAccS3BucketVersioning_update(t *testing.T) {
})
}
+func TestAccS3BucketVersioning_forceDestroy(t *testing.T) {
+ name := acctest.RandomWithPrefix("tf-version-force-destroy")
+
+ resource.ParallelTest(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+ ProviderFactories: testAccProviders,
+ CheckDestroy: testAccCheckMinioS3BucketDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccBucketVersioningObjectConfig(name, "Enabled"),
+ Check: resource.ComposeTestCheckFunc(
+ testAccCheckMinioS3BucketExists("minio_s3_bucket.bucket"),
+ testAccCheckBucketHasVersioning(
+ "minio_s3_bucket_versioning.bucket",
+ S3MinioBucketVersioningConfiguration{
+ Status: "Enabled",
+ },
+ ),
+ ),
+ },
+ },
+ })
+}
+
func testAccBucketVersioningConfig(bucketName string, status string, prefixes []string, excludeFolders bool) string {
prefixSlice := []string{}
for _, v := range prefixes {
@@ -111,6 +135,22 @@ resource "minio_s3_bucket_versioning" "bucket" {
`, bucketName, status, strings.Join(prefixSlice, ", "), excludeFolders)
}
+func testAccBucketVersioningObjectConfig(bucketName string, status string) string {
+ return fmt.Sprintf(`
+resource "minio_s3_bucket" "bucket" {
+ bucket = "%s"
+ force_destroy = true
+}
+
+resource "minio_s3_bucket_versioning" "bucket" {
+ bucket = minio_s3_bucket.bucket.bucket
+ versioning_configuration {
+ status = "%s"
+ }
+}
+`, bucketName, status)
+}
+
func testAccCheckBucketHasVersioning(n string, config S3MinioBucketVersioningConfiguration) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
diff --git a/minio/resource_minio_service_account.go b/minio/resource_minio_service_account.go
index b7ccb8ed..ea83339b 100644
--- a/minio/resource_minio_service_account.go
+++ b/minio/resource_minio_service_account.go
@@ -175,6 +175,12 @@ func minioReadServiceAccount(ctx context.Context, d *schema.ResourceData, meta i
return NewResourceError("reading service account failed", d.Id(), err)
}
+ _ = d.Set("disable_user", output.AccountStatus == "off")
+
+ if err := d.Set("target_user", output.ParentUser); err != nil {
+ return NewResourceError("reading service account failed", d.Id(), err)
+ }
+
_ = d.Set("policy", output.Policy)
return nil
diff --git a/minio/resource_minio_service_account_test.go b/minio/resource_minio_service_account_test.go
index 7a014f81..1751a9bc 100644
--- a/minio/resource_minio_service_account_test.go
+++ b/minio/resource_minio_service_account_test.go
@@ -30,8 +30,15 @@ func TestServiceAccount_basic(t *testing.T) {
Check: resource.ComposeTestCheckFunc(
testAccCheckMinioServiceAccountExists(resourceName, &serviceAccount),
testAccCheckMinioServiceAccountAttributes(resourceName, targetUser, status),
+ resource.TestCheckResourceAttr(resourceName, "target_user", targetUser),
),
},
+ {
+ ResourceName: resourceName,
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"update_secret", "secret_key"},
+ },
},
})
}
@@ -130,6 +137,12 @@ func TestServiceAccount_Policy(t *testing.T) {
testAccCheckMinioServiceAccountExists(resourceName2, &serviceAccount),
),
},
+ {
+ ResourceName: resourceName2,
+ ImportState: true,
+ ImportStateVerify: true,
+ ImportStateVerifyIgnore: []string{"update_secret", "secret_key"},
+ },
},
})
}
diff --git a/minio/utils.go b/minio/utils.go
index 604ec311..8a0a1b6d 100644
--- a/minio/utils.go
+++ b/minio/utils.go
@@ -6,7 +6,9 @@ import (
"errors"
"hash/crc32"
"log"
+ "strings"
"sync"
+ "time"
"github.com/aws/aws-sdk-go/aws"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
@@ -135,3 +137,14 @@ func NewMutexKV() *MutexKV {
store: make(map[string]*sync.Mutex),
}
}
+
+func shortDur(d time.Duration) string {
+ s := d.String()
+ if strings.HasSuffix(s, "m0s") {
+ s = s[:len(s)-2]
+ }
+ if strings.HasSuffix(s, "h0m") {
+ s = s[:len(s)-2]
+ }
+ return s
+}
diff --git a/utils/mc_test_env.sh b/utils/mc_test_env.sh
new file mode 100644
index 00000000..e381b65b
--- /dev/null
+++ b/utils/mc_test_env.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+if [ -n "${MINIO_USER}" ] && [ -n "${MINIO_PASSWORD}" ] && [ -n "${MINIO_ENDPOINT}" ]; then
+ echo 'MC configuration set for "a"'
+ export MC_HOST_a="http://${MINIO_USER}:${MINIO_PASSWORD}@${MINIO_ENDPOINT}"
+fi
+if [ -n "${SECOND_MINIO_USER}" ] && [ -n "${SECOND_MINIO_PASSWORD}" ] && [ -n "${SECOND_MINIO_ENDPOINT}" ]; then
+ echo 'MC configuration set for "b"'
+ export MC_HOST_b="http://${SECOND_MINIO_USER}:${SECOND_MINIO_PASSWORD}@${SECOND_MINIO_ENDPOINT}"
+fi
+if [ -n "${THIRD_MINIO_USER}" ] && [ -n "${THIRD_MINIO_PASSWORD}" ] && [ -n "${THIRD_MINIO_ENDPOINT}" ]; then
+ echo 'MC configuration set for "c"'
+ export MC_HOST_c="http://${THIRD_MINIO_USER}:${THIRD_MINIO_PASSWORD}@${THIRD_MINIO_ENDPOINT}"
+fi
+if [ -n "${FOURTH_MINIO_USER}" ] && [ -n "${FOURTH_MINIO_PASSWORD}" ] && [ -n "${FOURTH_MINIO_ENDPOINT}" ]; then
+ echo 'MC configuration set for "d"'
+ export MC_HOST_d="http://${FOURTH_MINIO_USER}:${FOURTH_MINIO_PASSWORD}@${FOURTH_MINIO_ENDPOINT}"
+fi
\ No newline at end of file
diff --git a/utils/minio_acc_env.sh b/utils/minio_acc_env.sh
new file mode 100644
index 00000000..7d57ed14
--- /dev/null
+++ b/utils/minio_acc_env.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+export TF_ACC=0
+export MINIO_ENDPOINT=`docker network inspect bridge | jq -r .[].IPAM.Config[].Gateway`:9000
+export MINIO_USER=minio
+export MINIO_PASSWORD=minio123
+export MINIO_ENABLE_HTTPS=false
+export SECOND_MINIO_ENDPOINT=`docker network inspect bridge | jq -r .[].IPAM.Config[].Gateway`:9002
+export SECOND_MINIO_USER=minio
+export SECOND_MINIO_PASSWORD=minio321
+export SECOND_MINIO_ENABLE_HTTPS=false
+export THIRD_MINIO_ENDPOINT=`docker network inspect bridge | jq -r .[].IPAM.Config[].Gateway`:9004
+export THIRD_MINIO_USER=minio
+export THIRD_MINIO_PASSWORD=minio456
+export THIRD_MINIO_ENABLE_HTTPS=false
+export FOURTH_MINIO_ENDPOINT=`docker network inspect bridge | jq -r .[].IPAM.Config[].Gateway`:9006
+export FOURTH_MINIO_USER=minio
+export FOURTH_MINIO_PASSWORD=minio654
+export FOURTH_MINIO_ENABLE_HTTPS=false
\ No newline at end of file