-
Notifications
You must be signed in to change notification settings - Fork 1.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
refactor: Upgrade to v18 of EKS module #64
Changes from 1 commit
724d310
37d5e19
1ddc747
26181c4
775ed21
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,4 +1,4 @@ | ||
# Learn Terraform - Provision an EKS Cluster | ||
|
||
This repo is a companion repo to the [Provision an EKS Cluster learn guide](https://learn.hashicorp.com/terraform/kubernetes/provision-eks-cluster), containing | ||
Terraform configuration files to provision an EKS cluster on AWS. | ||
Terraform configuration files to provision an EKS cluster on AWS. |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,38 +1,88 @@ | ||
module "eks" { | ||
source = "terraform-aws-modules/eks/aws" | ||
version = "17.24.0" | ||
source = "terraform-aws-modules/eks/aws" | ||
version = "18.23.0" | ||
|
||
cluster_name = local.cluster_name | ||
cluster_version = "1.20" | ||
subnets = module.vpc.private_subnets | ||
cluster_version = "1.22" | ||
|
||
vpc_id = module.vpc.vpc_id | ||
subnet_ids = module.vpc.private_subnets | ||
|
||
vpc_id = module.vpc.vpc_id | ||
manage_aws_auth_configmap = true | ||
|
||
workers_group_defaults = { | ||
root_volume_type = "gp2" | ||
# Extend cluster security group rules | ||
cluster_security_group_additional_rules = { | ||
egress_nodes_ephemeral_ports_tcp = { | ||
description = "To node 1025-65535" | ||
protocol = "tcp" | ||
from_port = 1025 | ||
to_port = 65535 | ||
type = "egress" | ||
source_node_security_group = true | ||
} | ||
} | ||
|
||
worker_groups = [ | ||
{ | ||
name = "worker-group-1" | ||
instance_type = "t2.small" | ||
additional_userdata = "echo foo bar" | ||
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id] | ||
asg_desired_capacity = 2 | ||
}, | ||
{ | ||
name = "worker-group-2" | ||
instance_type = "t2.medium" | ||
additional_userdata = "echo foo bar" | ||
additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id] | ||
asg_desired_capacity = 1 | ||
}, | ||
] | ||
} | ||
# Extend node-to-node security group rules | ||
node_security_group_additional_rules = { | ||
ingress_self_all = { | ||
description = "Node to node all ports/protocols" | ||
protocol = "-1" | ||
from_port = 0 | ||
to_port = 0 | ||
type = "ingress" | ||
self = true | ||
} | ||
egress_all = { | ||
description = "Node all egress" | ||
protocol = "-1" | ||
from_port = 0 | ||
to_port = 0 | ||
type = "egress" | ||
cidr_blocks = ["0.0.0.0/0"] | ||
ipv6_cidr_blocks = ["::/0"] | ||
} | ||
} | ||
|
||
data "aws_eks_cluster" "cluster" { | ||
name = module.eks.cluster_id | ||
} | ||
eks_managed_node_group_defaults = { | ||
ami_type = "AL2_x86_64" | ||
|
||
# We'll provide | ||
create_security_group = false | ||
} | ||
|
||
eks_managed_node_groups = { | ||
one = { | ||
instance_types = ["t3.small"] | ||
|
||
data "aws_eks_cluster_auth" "cluster" { | ||
name = module.eks.cluster_id | ||
min_size = 1 | ||
max_size = 3 | ||
desired_size = 2 | ||
|
||
pre_bootstrap_user_data = <<-EOT | ||
echo 'foo bar' | ||
EOT | ||
|
||
vpc_security_group_ids = [ | ||
aws_security_group.node_group_one.id | ||
] | ||
} | ||
|
||
two = { | ||
name = "node-group-2" | ||
|
||
instance_types = ["t3.medium"] | ||
|
||
min_size = 1 | ||
max_size = 2 | ||
desired_size = 1 | ||
|
||
pre_bootstrap_user_data = <<-EOT | ||
echo 'foo bar' | ||
EOT | ||
|
||
vpc_security_group_ids = [ | ||
aws_security_group.node_group_two.id | ||
] | ||
} | ||
} | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,12 +1,31 @@ | ||
# Kubernetes provider | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Its common practice to have a generic |
||
# https://learn.hashicorp.com/terraform/kubernetes/provision-eks-cluster#optional-configure-terraform-kubernetes-provider | ||
# To learn how to schedule deployments and services using the provider, go here: https://learn.hashicorp.com/terraform/kubernetes/deploy-nginx-kubernetes | ||
|
||
# The Kubernetes provider is included in this file so the EKS module can complete successfully. Otherwise, it throws an error when creating `kubernetes_config_map.aws_auth`. | ||
# You should **not** schedule deployments and services in this workspace. This keeps workspaces modular (one for provision EKS, another for scheduling Kubernetes resources) as per best practices. | ||
|
||
provider "kubernetes" { | ||
host = data.aws_eks_cluster.cluster.endpoint | ||
token = data.aws_eks_cluster_auth.cluster.token | ||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) | ||
host = module.eks.cluster_endpoint | ||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) | ||
|
||
exec { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The |
||
api_version = "client.authentication.k8s.io/v1alpha1" | ||
command = "aws" | ||
# This requires the awscli to be available locally where Terraform is executed | ||
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id] | ||
} | ||
} | ||
|
||
provider "aws" { | ||
region = var.region | ||
} | ||
|
||
data "aws_availability_zones" "available" {} | ||
|
||
locals { | ||
cluster_name = "education-eks-${random_string.suffix.result}" | ||
} | ||
|
||
resource "random_string" "suffix" { | ||
length = 8 | ||
special = false | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,26 +1,21 @@ | ||
output "cluster_id" { | ||
description = "EKS cluster ID." | ||
description = "EKS cluster ID" | ||
value = module.eks.cluster_id | ||
} | ||
|
||
output "cluster_endpoint" { | ||
description = "Endpoint for EKS control plane." | ||
description = "Endpoint for EKS control plane" | ||
value = module.eks.cluster_endpoint | ||
} | ||
|
||
output "cluster_security_group_id" { | ||
description = "Security group ids attached to the cluster control plane." | ||
description = "Security group ids attached to the cluster control plane" | ||
value = module.eks.cluster_security_group_id | ||
} | ||
|
||
output "kubectl_config" { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this output or an equivalent is no longer provided in v18; users are able to retrieve this through the awscli: aws eks update-kubeconfig --region <REGION> --name <CLUSTER_NAME> |
||
description = "kubectl config as generated by the module." | ||
value = module.eks.kubeconfig | ||
} | ||
|
||
output "config_map_aws_auth" { | ||
description = "A kubernetes configuration to authenticate to this EKS cluster." | ||
value = module.eks.config_map_aws_auth | ||
output "aws_auth_configmap_yaml" { | ||
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles" | ||
value = module.eks.aws_auth_configmap_yaml | ||
} | ||
|
||
output "region" { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The security group rules added here put the definition back inline with the v17 version. In v18 the security group rules were reduced to only the bare minimum required for a cluster to provision successfully while allowing users to open/extend access as they see fit for their workload