From b0165721ba2e2f7cefd8d1128dd27de7f3db7680 Mon Sep 17 00:00:00 2001 From: Xi Bai Date: Tue, 19 Dec 2023 11:27:17 +0000 Subject: [PATCH] allow users to retain template configurations --- README.md | 2 + cluster/eks.tf | 21 ++----- cluster/terraform.tfvars | 3 + cluster/vpc.tf | 4 -- config/data.tf | 17 +++++ config/ebs.tf | 47 +++----------- config/route53.tf | 56 +---------------- config/s3.tf | 131 +++++++-------------------------------- config/terraform.tfvars | 11 ++++ config/variables.tf | 9 +-- 10 files changed, 76 insertions(+), 225 deletions(-) create mode 100644 cluster/terraform.tfvars create mode 100644 config/terraform.tfvars diff --git a/README.md b/README.md index 0f6a976..fca97c2 100644 --- a/README.md +++ b/README.md @@ -27,6 +27,8 @@ export TF_VAR_AWS_SESSION_TOKEN=$AWS_SESSION_TOKEN ## Workspaces The definition of resources required for running RADAR-base components is located in the `cluster` directory, while other optional resources are defined in the `config` directory. Please treat each directory as a separate workspace and perform terraform operations individually. The `cluster` resources need to be created and made fully available before you proceed with the creation of the `config` ones. + +To retain the user-specific configurations for future infrastructure updates, modify `terraform.tfvars` within the workspace and push the change to your repository. If needed, additional variables defined in `variables.tf` can also be included there. | :information_source: Important Notice | |:----------------------------------------| |As a best practice, never save raw values of secret variables in your repository. Instead, always encrypt them before committing. If your cluster is no longer in use, run `terraform destory` to delete all the associated resources and reduce your cloud spending. If you have resources created within `config`, run `terraform destory` in that directory before running the counterpart in `cluster`.| diff --git a/cluster/eks.tf b/cluster/eks.tf index 6c7880f..18e9873 100644 --- a/cluster/eks.tf +++ b/cluster/eks.tf @@ -88,6 +88,11 @@ module "eks" { snapshotter : { forceEnable : false } + }, + controller : { + volumeModificationFeature : { + enabled : true + } } }) } @@ -190,19 +195,3 @@ module "eks" { tags = merge(tomap({ "Name" : var.eks_cluster_name }), var.common_tags) } - -output "radar_base_eks_cluster_name" { - value = module.eks.cluster_name -} - -output "radar_base_eks_cluser_endpoint" { - value = module.eks.cluster_endpoint -} - -output "radar_base_eks_dmz_node_group_name" { - value = element(split(":", module.eks.eks_managed_node_groups.dmz.node_group_id), 1) -} - -output "radar_base_eks_worker_node_group_name" { - value = element(split(":", module.eks.eks_managed_node_groups.worker.node_group_id), 1) -} diff --git a/cluster/terraform.tfvars b/cluster/terraform.tfvars new file mode 100644 index 0000000..49406e2 --- /dev/null +++ b/cluster/terraform.tfvars @@ -0,0 +1,3 @@ +AWS_REGION = "eu-west-2" +environment = "dev" +eks_admins_group_users = [] \ No newline at end of file diff --git a/cluster/vpc.tf b/cluster/vpc.tf index 988a598..4936930 100644 --- a/cluster/vpc.tf +++ b/cluster/vpc.tf @@ -45,7 +45,3 @@ module "vpc" { default_security_group_tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-vpc-default-sg" }), var.common_tags) tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-vpc" }), var.common_tags) } - -output "radar_base_vpc_public_subnets" { - value = module.vpc.public_subnets -} diff --git a/config/data.tf b/config/data.tf index 27e5d69..df073a7 100644 --- a/config/data.tf +++ b/config/data.tf @@ -72,5 +72,22 @@ locals { gp2 = "radar-base-ebs-sc-gp2" gp3 = "radar-base-ebs-sc-gp3" io1 = "radar-base-ebs-sc-io1" + io2 = "radar-base-ebs-sc-io2" } + + s3_bucket_names = { + intermediate_output_storage = "${var.eks_cluster_name}-intermediate-output-storage" + output_storage = "${var.eks_cluster_name}-output-storage" + velero_backups = "${var.eks_cluster_name}-velero-backups" + } + + cname_prefixes = [ + "alertmanager", + "dashboard", + "grafana", + "graylog", + "prometheus", + "s3", + ] + } diff --git a/config/ebs.tf b/config/ebs.tf index 2111f35..b86f96b 100644 --- a/config/ebs.tf +++ b/config/ebs.tf @@ -1,52 +1,21 @@ -resource "kubectl_manifest" "ebs_sc_gp2" { - yaml_body = <<-YAML - apiVersion: storage.k8s.io/v1 - kind: StorageClass - metadata: - name: ${local.storage_classes.gp2} - provisioner: ebs.csi.aws.com - volumeBindingMode: WaitForFirstConsumer - allowVolumeExpansion: true - reclaimPolicy: Retain - parameters: - type: gp2 - fstype: ext4 - YAML -} +resource "kubectl_manifest" "ebs_storage_classes" { + for_each = local.storage_classes -resource "kubectl_manifest" "ebs_sc_gp3" { yaml_body = <<-YAML apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: - name: ${local.storage_classes.gp3} + name: ${each.value} provisioner: ebs.csi.aws.com volumeBindingMode: WaitForFirstConsumer allowVolumeExpansion: true reclaimPolicy: Retain parameters: - type: gp3 + type: ${each.key} fstype: ext4 YAML } -resource "kubectl_manifest" "ebs_sc_io1" { - yaml_body = <<-YAML - apiVersion: storage.k8s.io/v1 - kind: StorageClass - metadata: - name: ${local.storage_classes.io1} - provisioner: ebs.csi.aws.com - volumeBindingMode: WaitForFirstConsumer - allowVolumeExpansion: true - reclaimPolicy: Retain - parameters: - type: io1 - iopsPerGB: "100" - fsType: ext4 - YAML -} - resource "kubernetes_annotations" "unset_eks_default_gp2" { api_version = "storage.k8s.io/v1" kind = "StorageClass" @@ -73,9 +42,7 @@ resource "kubernetes_annotations" "set_defaut_storage_class" { } depends_on = [ - kubectl_manifest.ebs_sc_gp2, - kubectl_manifest.ebs_sc_gp3, - kubectl_manifest.ebs_sc_io1, + kubectl_manifest.ebs_storage_classes, kubernetes_annotations.unset_eks_default_gp2, ] } @@ -91,3 +58,7 @@ output "radar_base_ebs_storage_class_gp3" { output "radar_base_ebs_storage_class_io1" { value = local.storage_classes.io1 } + +output "radar_base_ebs_storage_class_io2" { + value = local.storage_classes.io2 +} \ No newline at end of file diff --git a/config/route53.tf b/config/route53.tf index 9092d30..b201089 100644 --- a/config/route53.tf +++ b/config/route53.tf @@ -15,61 +15,11 @@ resource "aws_route53_record" "main" { records = [aws_eip.cluster_loadbalancer_eip[0].public_dns] } -resource "aws_route53_record" "alertmanager" { - count = var.enable_route53 ? 1 : 0 - - zone_id = aws_route53_zone.primary[0].zone_id - name = "alertmanager.${var.environment}.${var.domain_name}" - type = "CNAME" - ttl = 300 - records = ["${var.environment}.${var.domain_name}"] -} - -resource "aws_route53_record" "dashboard" { - count = var.enable_route53 ? 1 : 0 - - zone_id = aws_route53_zone.primary[0].zone_id - name = "dashboard.${var.environment}.${var.domain_name}" - type = "CNAME" - ttl = 300 - records = ["${var.environment}.${var.domain_name}"] -} - -resource "aws_route53_record" "grafana" { - count = var.enable_route53 ? 1 : 0 - - zone_id = aws_route53_zone.primary[0].zone_id - name = "grafana.${var.environment}.${var.domain_name}" - type = "CNAME" - ttl = 300 - records = ["${var.environment}.${var.domain_name}"] -} - -resource "aws_route53_record" "graylog" { - count = var.enable_route53 ? 1 : 0 - - zone_id = aws_route53_zone.primary[0].zone_id - name = "graylog.${var.environment}.${var.domain_name}" - type = "CNAME" - ttl = 300 - records = ["${var.environment}.${var.domain_name}"] -} - -resource "aws_route53_record" "prometheus" { - count = var.enable_route53 ? 1 : 0 - - zone_id = aws_route53_zone.primary[0].zone_id - name = "prometheus.${var.environment}.${var.domain_name}" - type = "CNAME" - ttl = 300 - records = ["${var.environment}.${var.domain_name}"] -} - -resource "aws_route53_record" "s3" { - count = var.enable_route53 ? 1 : 0 +resource "aws_route53_record" "this" { + for_each = toset([for prefix in local.cname_prefixes : prefix if var.enable_route53]) zone_id = aws_route53_zone.primary[0].zone_id - name = "s3.${var.environment}.${var.domain_name}" + name = "${each.value}.${var.environment}.${var.domain_name}" type = "CNAME" ttl = 300 records = ["${var.environment}.${var.domain_name}"] diff --git a/config/s3.tf b/config/s3.tf index 2444394..b2bfbbb 100644 --- a/config/s3.tf +++ b/config/s3.tf @@ -14,118 +14,37 @@ resource "aws_vpc_endpoint_route_table_association" "route_table_association" { vpc_endpoint_id = aws_vpc_endpoint.s3[0].id } -resource "aws_s3_bucket" "intermediate_output_storage" { - count = var.enable_s3 ? 1 : 0 - - bucket = "${var.eks_cluster_name}-intermediate-output-storage" - - tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-intermediate-output-storage" }), var.common_tags) -} - -resource "aws_s3_bucket_ownership_controls" "intermediate_output" { - count = var.enable_s3 ? 1 : 0 - - bucket = aws_s3_bucket.intermediate_output_storage[0].id - rule { - object_ownership = "BucketOwnerPreferred" - } - - depends_on = [aws_s3_bucket.intermediate_output_storage] -} - -resource "aws_s3_bucket_acl" "intermediate_output" { - count = var.enable_s3 ? 1 : 0 - - bucket = aws_s3_bucket.intermediate_output_storage[0].id - acl = "private" - - depends_on = [aws_s3_bucket_ownership_controls.intermediate_output] -} - -resource "aws_s3_bucket" "output_storage" { - count = var.enable_s3 ? 1 : 0 - - bucket = "${var.eks_cluster_name}-output-storage" - - tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-output-storage" }), var.common_tags) -} - -resource "aws_s3_bucket_ownership_controls" "output" { - count = var.enable_s3 ? 1 : 0 - - bucket = aws_s3_bucket.output_storage[0].id - rule { - object_ownership = "BucketOwnerPreferred" - } - - depends_on = [aws_s3_bucket.output_storage] -} - -resource "aws_s3_bucket_acl" "output" { - count = var.enable_s3 ? 1 : 0 - - bucket = aws_s3_bucket.output_storage[0].id - acl = "private" - - depends_on = [aws_s3_bucket_ownership_controls.output] -} - -resource "aws_s3_bucket" "velero_backups" { - count = var.enable_s3 ? 1 : 0 - - bucket = "${var.eks_cluster_name}-velero-backups" +resource "aws_s3_bucket" "this" { + for_each = { for k, v in local.s3_bucket_names : k => v if var.enable_s3 } - tags = merge(tomap({ "Name" : "${var.eks_cluster_name}-velero-backups" }), var.common_tags) + bucket = each.value + tags = merge(tomap({ "Name" : each.key }), var.common_tags) } -resource "aws_s3_bucket_ownership_controls" "velero" { - count = var.enable_s3 ? 1 : 0 +resource "aws_s3_bucket_ownership_controls" "this" { + for_each = { for k, v in local.s3_bucket_names : k => v if var.enable_s3 } - bucket = aws_s3_bucket.velero_backups[0].id + bucket = aws_s3_bucket.this[each.key].id rule { object_ownership = "BucketOwnerPreferred" } - depends_on = [aws_s3_bucket.velero_backups] + depends_on = [aws_s3_bucket.this] } -resource "aws_s3_bucket_acl" "velero" { - count = var.enable_s3 ? 1 : 0 +resource "aws_s3_bucket_acl" "this" { + for_each = { for k, v in local.s3_bucket_names : k => v if var.enable_s3 } - bucket = aws_s3_bucket.velero_backups[0].id + bucket = aws_s3_bucket.this[each.key].id acl = "private" - depends_on = [aws_s3_bucket_ownership_controls.velero] -} - -resource "aws_s3_bucket_server_side_encryption_configuration" "intermediate_output_storage_encryption" { - count = var.enable_s3 ? 1 : 0 - - bucket = aws_s3_bucket.intermediate_output_storage[0].id - - rule { - apply_server_side_encryption_by_default { - sse_algorithm = "AES256" - } - } -} - -resource "aws_s3_bucket_server_side_encryption_configuration" "output_storage_encryption" { - count = var.enable_s3 ? 1 : 0 - - bucket = aws_s3_bucket.output_storage[0].id - - rule { - apply_server_side_encryption_by_default { - sse_algorithm = "AES256" - } - } + depends_on = [aws_s3_bucket_ownership_controls.this] } -resource "aws_s3_bucket_server_side_encryption_configuration" "velero_backups_encryption" { - count = var.enable_s3 ? 1 : 0 +resource "aws_s3_bucket_server_side_encryption_configuration" "this" { + for_each = { for k, v in local.s3_bucket_names : k => v if var.enable_s3 } - bucket = aws_s3_bucket.velero_backups[0].id + bucket = aws_s3_bucket.this[each.key].id rule { apply_server_side_encryption_by_default { @@ -151,20 +70,12 @@ resource "aws_iam_policy" "s3_access" { "Action" : [ "s3:ListBucket" ], - "Resource" : [ - "arn:aws:s3:::${var.eks_cluster_name}-intermediate-output-storage", - "arn:aws:s3:::${var.eks_cluster_name}-output-storage", - "arn:aws:s3:::${var.eks_cluster_name}-velero-backups" - ] + "Resource" : [for bucket_name in local.s3_bucket_names : "arn:aws:s3:::${bucket_name}"] }, { "Effect" : "Allow", "Action" : "s3:*Object", - "Resource" : [ - "arn:aws:s3:::${var.eks_cluster_name}-intermediate-output-storage/*", - "arn:aws:s3:::${var.eks_cluster_name}-output-storage/*", - "arn:aws:s3:::${var.eks_cluster_name}-velero-backups/*" - ] + "Resource" : [for bucket_name in local.s3_bucket_names : "arn:aws:s3:::${bucket_name}/*"] } ] }) @@ -194,15 +105,15 @@ resource "aws_iam_user_policy_attachment" "s3_access" { } output "radar_base_s3_intermediate_output_bucket_name" { - value = var.enable_s3 ? aws_s3_bucket.intermediate_output_storage[0].bucket : null + value = var.enable_s3 ? local.s3_bucket_names["intermediate_output_storage"] : null } output "radar_base_s3_output_bucket_name" { - value = var.enable_s3 ? aws_s3_bucket.output_storage[0].bucket : null + value = var.enable_s3 ? local.s3_bucket_names["output_storage"] : null } output "radar_base_s3_velero_bucket_name" { - value = var.enable_s3 ? aws_s3_bucket.velero_backups[0].bucket : null + value = var.enable_s3 ? local.s3_bucket_names["velero_backups"] : null } output "radar_base_s3_access_key" { @@ -212,4 +123,4 @@ output "radar_base_s3_access_key" { output "radar_base_s3_secret_key" { value = var.enable_s3 ? aws_iam_access_key.s3_access[0].secret : null sensitive = true -} \ No newline at end of file +} diff --git a/config/terraform.tfvars b/config/terraform.tfvars new file mode 100644 index 0000000..6345c91 --- /dev/null +++ b/config/terraform.tfvars @@ -0,0 +1,11 @@ +AWS_REGION = "eu-west-2" +environment = "dev" +domain_name = "change-me-radar-base-dummy-domain.net" +defaut_storage_class = "radar-base-ebs-sc-gp2" +enable_karpenter = false +enable_msk = false +enable_rds = false +enable_route53 = false +enable_ses = false +enable_s3 = false +enable_eip = false diff --git a/config/variables.tf b/config/variables.tf index 5ba42fe..f8f1d60 100644 --- a/config/variables.tf +++ b/config/variables.tf @@ -71,8 +71,8 @@ variable "defaut_storage_class" { default = "radar-base-ebs-sc-gp2" validation { - condition = var.defaut_storage_class == "radar-base-ebs-sc-gp2" || var.defaut_storage_class == "radar-base-ebs-sc-gp3" || var.defaut_storage_class == "radar-base-ebs-sc-io1" - error_message = "Invalid storage class. Allowed values are 'radar-base-ebs-sc-gp2', 'radar-base-ebs-sc-gp3' or 'radar-base-ebs-sc-io1'." + condition = var.defaut_storage_class == "radar-base-ebs-sc-gp2" || var.defaut_storage_class == "radar-base-ebs-sc-gp3" || var.defaut_storage_class == "radar-base-ebs-sc-io1" || var.defaut_storage_class == "radar-base-ebs-sc-io2" + error_message = "Invalid storage class. Allowed values are 'radar-base-ebs-sc-gp2', 'radar-base-ebs-sc-gp3', 'radar-base-ebs-sc-io1' or 'radar-base-ebs-sc-io2'." } } @@ -95,8 +95,9 @@ variable "karpenter_version" { variable "radar_postgres_password" { type = string description = "Password for the PostgreSQL database used by Radar components" - default = "change_me" - sensitive = true + # Make sure to chage the default value when var.enable_rds is set to "true" + default = "change_me" + sensitive = true } variable "enable_karpenter" {