Skip to content

dasmeta/terraform-aws-eks

Folders and files

NameName
Last commit message
Last commit date

Latest commit

 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 

Repository files navigation

Why

To spin up complete eks with all necessary components. Those include:

How to run

*data "aws_availability_zones" "available" {}

*locals {
   cluster_endpoint_public_access = true
   cluster_enabled_log_types = ["audit"]
 vpc = {
   create = {
     name = "dev"
     availability_zones = data.aws_availability_zones.available.names
     private_subnets    = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
     public_subnets     = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
     cidr               = "172.16.0.0/16"
     public_subnet_tags = {
   "kubernetes.io/cluster/dev" = "shared"
   "kubernetes.io/role/elb"    = "1"
 }
 private_subnet_tags = {
   "kubernetes.io/cluster/dev"       = "shared"
   "kubernetes.io/role/internal-elb" = "1"
 }
   }
 }
  cluster_name = "your-cluster-name-goes-here"
 alb_log_bucket_name = "your-log-bucket-name-goes-here"

 fluent_bit_name = "fluent-bit"
 log_group_name  = "fluent-bit-cloudwatch-env"
*}

*#(Basic usage with example of using already created VPC)
*data "aws_availability_zones" "available" {}

*locals {
   cluster_endpoint_public_access = true
   cluster_enabled_log_types = ["audit"]

 vpc = {
   link = {
     id = "vpc-1234"
     private_subnet_ids = ["subnet-1", "subnet-2"]
   }
 }
  cluster_name = "your-cluster-name-goes-here"
 alb_log_bucket_name = "your-log-bucket-name-goes-here"

 fluent_bit_name = "fluent-bit"
 log_group_name  = "fluent-bit-cloudwatch-env"
*}

*# Minimum

*module "cluster_min" {
 source  = "dasmeta/eks/aws"
 version = "0.1.1"

 cluster_name        = local.cluster_name
 users               = local.users

 vpc = {
   link = {
     id = "vpc-1234"
     private_subnet_ids = ["subnet-1", "subnet-2"]
   }
 }

*}

*# Max @TODO: the max param passing setup needs to be checked/fixed

module "cluster_max" {
 source  = "dasmeta/eks/aws"
 version = "0.1.1"

 ### VPC
 vpc = {
   create = {
     name = "dev"
    availability_zones = data.aws_availability_zones.available.names
    private_subnets    = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
    public_subnets     = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
    cidr               = "172.16.0.0/16"
    public_subnet_tags = {
  "kubernetes.io/cluster/dev" = "shared"
  "kubernetes.io/role/elb"    = "1"
 }
 private_subnet_tags = {
   "kubernetes.io/cluster/dev"       = "shared"
   "kubernetes.io/role/internal-elb" = "1"
 }
   }
 }

 cluster_enabled_log_types = local.cluster_enabled_log_types
 cluster_endpoint_public_access = local.cluster_endpoint_public_access

 ### EKS
 cluster_name          = local.cluster_name
 manage_aws_auth       = true

 # IAM users username and group. By default value is ["system:masters"]
 user = [
         {
           username = "devops1"
           group    = ["system:masters"]
         },
         {
           username = "devops2"
           group    = ["system:kube-scheduler"]
         },
         {
           username = "devops3"
         }
 ]

 # You can create node use node_group when you create node in specific subnet zone.(Note. This Case Ec2 Instance havn't specific name).
 # Other case you can use worker_group variable.

 node_groups = {
   example =  {
     name  = "nodegroup"
     name-prefix     = "nodegroup"
     additional_tags = {
         "Name"      = "node"
         "ExtraTag"  = "ExtraTag"
     }

     instance_type   = "t3.xlarge"
     max_capacity    = 1
     disk_size       = 50
     create_launch_template = false
     subnet = ["subnet_id"]
   }
}

node_groups_default = {
    disk_size      = 50
    instance_types = ["t3.medium"]
  }

worker_groups = {
  default = {
    name              = "nodes"
    instance_type     = "t3.xlarge"
    asg_max_size      = 3
    root_volume_size  = 50
  }
}

 workers_group_defaults = {
   launch_template_use_name_prefix = true
   launch_template_name            = "default"
   root_volume_type                = "gp2"
   root_volume_size                = 50
 }

 ### ALB-INGRESS-CONTROLLER
 alb_log_bucket_name = local.alb_log_bucket_name

 ### FLUENT-BIT
 fluent_bit_name = local.fluent_bit_name
 log_group_name  = local.log_group_name

 # Should be refactored to install from cluster: for prod it has done from metrics-server.tf
 ### METRICS-SERVER
 # enable_metrics_server = false
 metrics_server_name     = "metrics-server"

Requirements

Name Version
terraform ~> 1.3
aws >= 3.31, < 5.0.0
helm >= 2.4.1
kubectl ~>1.14

Providers

Name Version
aws >= 3.31, < 5.0.0
helm >= 2.4.1
kubernetes n/a

Modules

Name Source Version
adot ./modules/adot n/a
alb-ingress-controller ./modules/aws-load-balancer-controller n/a
api-gw-controller ./modules/api-gw n/a
autoscaler ./modules/autoscaler n/a
cloudwatch-metrics ./modules/cloudwatch-metrics n/a
cw_alerts dasmeta/monitoring/aws//modules/alerts 1.3.5
ebs-csi ./modules/ebs-csi n/a
efs-csi-driver ./modules/efs-csi n/a
eks-cluster ./modules/eks n/a
external-dns ./modules/external-dns n/a
external-secrets ./modules/external-secrets n/a
fluent-bit ./modules/fluent-bit n/a
metrics-server ./modules/metrics-server n/a
nginx-ingress-controller ./modules/nginx-ingress-controller/ n/a
node-problem-detector ./modules/node-problem-detector n/a
olm ./modules/olm n/a
portainer ./modules/portainer n/a
priority_class ./modules/priority-class/ n/a
sso-rbac ./modules/sso-rbac n/a
vpc dasmeta/vpc/aws 1.0.1
weave-scope ./modules/weave-scope n/a

Resources

Name Type
helm_release.cert-manager resource
helm_release.kube-state-metrics resource
kubernetes_namespace.meta-system resource
aws_caller_identity.current data source
aws_region.current data source

Inputs

Name Description Type Default Required
account_id AWS Account Id to apply changes into string null no
additional_priority_classes Defines Priority Classes in Kubernetes, used to assign different levels of priority to pods. By default, this module creates three Priority Classes: 'high'(1000000), 'medium'(500000) and 'low'(250000) . You can also provide a custom list of Priority Classes if needed.
list(object({
name = string
value = string # number in string form
}))
[] no
adot_config accept_namespace_regex defines the list of namespaces from which metrics will be exported, and additional_metrics defines additional metrics to export.
object({
accept_namespace_regex = optional(string, "(default
kube-system)")
additional_metrics = optional(list(string), [])
log_group_name = optional(string, "adot")
log_retention = optional(number, 14)
helm_values = optional(any, null)
logging_enable = optional(bool, false)
resources = optional(object({
limit = object({
cpu = optional(string, "200m")
memory = optional(string, "200Mi")
})
requests = object({
cpu = optional(string, "200m")
memory = optional(string, "200Mi")
})
}), {
limit = {
cpu = "200m"
memory = "200Mi"
}
requests = {
cpu = "200m"
memory = "200Mi"
}
})
})
{
"accept_namespace_regex": "(default
adot_version The version of the AWS Distro for OpenTelemetry addon to use. string "v0.78.0-eksbuild.1" no
alarms Alarms enabled by default you need set sns topic name for send alarms for customize alarms threshold use custom_values
object({
enabled = optional(bool, true)
sns_topic = string
custom_values = optional(any, {})
})
n/a yes
alb_log_bucket_name n/a string "" no
alb_log_bucket_path ALB-INGRESS-CONTROLLER string "" no
api_gateway_resources Nested map containing API, Stage, and VPC Link resources
list(object({
namespace = string
api = object({
name = string
protocolType = string
})
stages = optional(list(object({
name = string
namespace = string
apiRef_name = string
stageName = string
autoDeploy = bool
description = string
})))
vpc_links = optional(list(object({
name = string
namespace = string
})))
}))
[] no
api_gw_deploy_region Region in which API gatewat will be configured string "" no
autoscaler_image_patch The patch number of autoscaler image number 0 no
autoscaler_limits n/a
object({
cpu = string
memory = string
})
{
"cpu": "100m",
"memory": "600Mi"
}
no
autoscaler_requests n/a
object({
cpu = string
memory = string
})
{
"cpu": "100m",
"memory": "600Mi"
}
no
autoscaling Weather enable autoscaling or not in EKS bool true no
bindings Variable which describes group and role binding
list(object({
group = string
namespace = string
roles = list(string)

}))
[] no
cluster_enabled_log_types A list of the desired control plane logs to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) list(string) [] no
cluster_endpoint_public_access n/a bool true no
cluster_name Creating eks cluster name. string n/a yes
cluster_version Allows to set/change kubernetes cluster version, kubernetes version needs to be updated at leas once a year. Please check here for available versions https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html string "1.27" no
create Whether to create cluster and other resources or not bool true no
create_cert_manager If enabled it always gets deployed to the cert-manager namespace. bool false no
ebs_csi_version EBS CSI driver addon version string "v1.15.0-eksbuild.1" no
efs_id EFS filesystem id in AWS string null no
efs_storage_classes Additional storage class configurations: by default, 2 storage classes are created - efs-sc and efs-sc-root which has 0 uid. One can add another storage classes besides these 2.
list(object({
name : string
provisioning_mode : optional(string, "efs-ap")
file_system_id : string
directory_perms : optional(string, "755")
base_path : optional(string, "/")
uid : optional(number)
}))
[] no
enable_api_gw_controller Weather enable API-GW controller or not bool false no
enable_ebs_driver Weather enable EBS-CSI driver or not bool true no
enable_efs_driver Weather install EFS driver or not in EKS bool false no
enable_kube_state_metrics Enable kube-state-metrics bool false no
enable_metrics_server METRICS-SERVER bool false no
enable_node_problem_detector n/a bool true no
enable_olm To install OLM controller (experimental). bool false no
enable_portainer Enable Portainer provisioning or not bool false no
enable_sso_rbac Enable SSO RBAC integration or not bool false no
enable_waf_for_alb Enables WAF and WAF V2 addons for ALB bool false no
external_dns Allows to install external-dns helm chart and related roles, which allows to automatically create R53 records based on ingress/service domain/host configs
object({
enabled = optional(bool, false)
configs = optional(any, {})
})
{
"enabled": false
}
no
external_secrets_namespace The namespace of external-secret operator string "kube-system" no
fluent_bit_configs Fluent Bit configs
object({
enabled = optional(string, true)
fluent_bit_name = optional(string, "")
log_group_name = optional(string, "")
system_log_group_name = optional(string, "")
log_retention_days = optional(number, 90)
values_yaml = optional(string, "")
configs = optional(object({
inputs = optional(string, "")
filters = optional(string, "")
outputs = optional(string, "")
cloudwatch_outputs_enabled = optional(bool, true)
}), {})
drop_namespaces = optional(list(string), [])
log_filters = optional(list(string), [])
additional_log_filters = optional(list(string), [])
kube_namespaces = optional(list(string), [])
image_pull_secrets = optional(list(string), [])
})
{
"additional_log_filters": [
"ELB-HealthChecker",
"Amazon-Route53-Health-Check-Service"
],
"configs": {
"cloudwatch_outputs_enabled": true,
"filters": "",
"inputs": "",
"outputs": ""
},
"drop_namespaces": [
"kube-system",
"opentelemetry-operator-system",
"adot",
"cert-manager",
"opentelemetry.",
"meta.
"
],
"enabled": true,
"fluent_bit_name": "",
"image_pull_secrets": [],
"kube_namespaces": [
"kube.",
"meta.
",
"adot.",
"devops.
",
"cert-manager.",
"git.
",
"opentelemetry.",
"stakater.
",
"renovate.*"
],
"log_filters": [
"kube-probe",
"health",
"prometheus",
"liveness"
],
"log_group_name": "",
"log_retention_days": 90,
"system_log_group_name": "",
"values_yaml": ""
}
no
manage_aws_auth n/a bool true no
map_roles Additional IAM roles to add to the aws-auth configmap.
list(object({
rolearn = string
username = string
groups = list(string)
}))
[] no
metrics_exporter Metrics Exporter, can use cloudwatch or adot string "adot" no
metrics_server_name n/a string "metrics-server" no
nginx_ingress_controller_config Nginx ingress controller configs
object({
enabled = optional(bool, false)
name = optional(string, "nginx")
create_namespace = optional(bool, true)
namespace = optional(string, "ingress-nginx")
replicacount = optional(number, 3)
metrics_enabled = optional(bool, true)
})
{
"create_namespace": true,
"enabled": false,
"metrics_enabled": true,
"name": "nginx",
"namespace": "ingress-nginx",
"replicacount": 3
}
no
node_groups Map of EKS managed node group definitions to create any
{
"default": {
"desired_size": 2,
"iam_role_additional_policies": [
"arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"
],
"instance_types": [
"t3.large"
],
"max_size": 4,
"min_size": 2
}
}
no
node_groups_default Map of EKS managed node group default configurations any
{
"disk_size": 50,
"iam_role_additional_policies": [
"arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"
],
"instance_types": [
"t3.large"
]
}
no
node_security_group_additional_rules n/a any
{
"ingress_cluster_10250": {
"description": "Metric server to node groups",
"from_port": 10250,
"protocol": "tcp",
"self": true,
"to_port": 10250,
"type": "ingress"
},
"ingress_cluster_8443": {
"description": "Metric server to node groups",
"from_port": 8443,
"protocol": "tcp",
"source_cluster_security_group": true,
"to_port": 8443,
"type": "ingress"
}
}
no
portainer_config Portainer hostname and ingress config.
object({
host = optional(string, "portainer.dasmeta.com")
enable_ingress = optional(bool, true)
})
{} no
prometheus_metrics Prometheus Metrics any [] no
region AWS Region name. string null no
roles Variable describes which role will user have K8s
list(object({
actions = list(string)
resources = list(string)
}))
[] no
scale_down_unneeded_time Scale down unneeded in minutes number 2 no
send_alb_logs_to_cloudwatch Whether send alb logs to CloudWatch or not. bool true no
users List of users to open eks cluster api access list(any) [] no
vpc VPC configuration for eks, we support both cases create new vpc(create field) and using already created one(link)
object({
# for linking using existing vpc
link = optional(object({
id = string
private_subnet_ids = list(string) # please have the existing vpc public/private subnets(at least 2 needed) tagged with corresponding tags(look into create case subnet tags defaults)
}), { id = null, private_subnet_ids = null })
# for creating new vpc
create = optional(object({
name = string
availability_zones = list(string)
cidr = string
private_subnets = list(string)
public_subnets = list(string)
public_subnet_tags = optional(map(any), {}) # to pass additional tags for public subnet or override default ones. The default ones are: {"kubernetes.io/cluster/${var.cluster_name}" = "shared","kubernetes.io/role/elb" = 1}
private_subnet_tags = optional(map(any), {}) # to pass additional tags for public subnet or override default ones. The default ones are: {"kubernetes.io/cluster/${var.cluster_name}" = "shared","kubernetes.io/role/internal-elb" = 1}
}), { name = null, availability_zones = null, cidr = null, private_subnets = null, public_subnets = null })
})
n/a yes
weave_scope_config Weave scope namespace configuration variables
object({
create_namespace = bool
namespace = string
annotations = map(string)
ingress_host = string
ingress_class = string
ingress_name = string
service_type = string
weave_helm_release_name = string
})
{
"annotations": {},
"create_namespace": true,
"ingress_class": "",
"ingress_host": "",
"ingress_name": "weave-ingress",
"namespace": "meta-system",
"service_type": "NodePort",
"weave_helm_release_name": "weave"
}
no
weave_scope_enabled Weather enable Weave Scope or not bool false no
worker_groups Worker groups. any {} no
workers_group_defaults Worker group defaults. any
{
"launch_template_name": "default",
"launch_template_use_name_prefix": true,
"root_volume_size": 50,
"root_volume_type": "gp2"
}
no

Outputs

Name Description
cluster_certificate EKS cluster certificate used for authentication/access in helm/kubectl/kubernetes providers
cluster_host EKS cluster host name used for authentication/access in helm/kubectl/kubernetes providers
cluster_iam_role_name n/a
cluster_id n/a
cluster_primary_security_group_id n/a
cluster_security_group_id n/a
cluster_token EKS cluster token used for authentication/access in helm/kubectl/kubernetes providers
eks_auth_configmap n/a
eks_module n/a
eks_oidc_root_ca_thumbprint Grab eks_oidc_root_ca_thumbprint from oidc_provider_arn.
map_user_data n/a
oidc_provider_arn ## CLUSTER
role_arns n/a
role_arns_without_path n/a
vpc_cidr_block The cidr block of the vpc
vpc_default_security_group_id The ID of default security group created for vpc
vpc_id The newly created vpc id
vpc_nat_public_ips The list of elastic public IPs for vpc
vpc_private_subnets The newly created vpc private subnets IDs list
vpc_public_subnets The newly created vpc public subnets IDs list