From 6ca49acba8b4e9d8a1ae63336f8352097ab150fe Mon Sep 17 00:00:00 2001 From: Simon Gerber Date: Wed, 15 Jan 2025 16:28:53 +0100 Subject: [PATCH] Draft implementation for using instance pools --- control_plane.tf | 2 ++ infra.tf | 2 ++ modules/node-group/main.tf | 32 ++++++++++++++++++++++++++++---- modules/node-group/output.tf | 6 +++++- modules/node-group/variables.tf | 6 ++++++ variables.tf | 6 ++++++ worker.tf | 6 +++++- 7 files changed, 54 insertions(+), 6 deletions(-) diff --git a/control_plane.tf b/control_plane.tf index a9837b0..eb3c7a1 100644 --- a/control_plane.tf +++ b/control_plane.tf @@ -32,6 +32,8 @@ module "master" { deploy_target_id = var.deploy_target_id bootstrap_bucket = var.bootstrap_bucket + + use_instancepool = var.use_instancepools } resource "exoscale_domain_record" "etcd" { diff --git a/infra.tf b/infra.tf index 17df122..db31570 100644 --- a/infra.tf +++ b/infra.tf @@ -33,4 +33,6 @@ module "infra" { deploy_target_id = var.deploy_target_id bootstrap_bucket = var.bootstrap_bucket + + use_instancepool = var.use_instancepools } diff --git a/modules/node-group/main.tf b/modules/node-group/main.tf index ca31e43..7712af8 100644 --- a/modules/node-group/main.tf +++ b/modules/node-group/main.tf @@ -17,7 +17,7 @@ locals { // having to work around merge() being a shallow merge in the compute // instance resource. user_data = [ - for hostname in random_id.node_id[*].hex : + for hostname in(var.use_instancepool ? ["dummy"] : random_id.node_id[*].hex) : { "ignition" : { "version" : "3.1.0", @@ -42,7 +42,7 @@ locals { "storage" : { // concatenate the private network config (if requested) with the // `/etc/hostname` override. - "files" : concat( + "files" : var.use_instancepool ? [] : concat( var.use_privnet ? local.privnet_config_files : [], // override /etc/hostname with short hostname, this works around the // fact that we can't set a separate `name` and `display_name` for @@ -158,7 +158,7 @@ locals { } resource "random_id" "node_id" { - count = var.node_count + count = var.use_instancepool ? 0 : var.node_count prefix = "${var.role}-" byte_length = 2 } @@ -170,7 +170,7 @@ resource "exoscale_anti_affinity_group" "anti_affinity_group" { } resource "exoscale_compute_instance" "nodes" { - count = var.node_count + count = var.use_instancepool ? 0 : var.node_count name = "${random_id.node_id[count.index].hex}.${var.cluster_domain}" ssh_key = var.ssh_key_pair zone = var.region @@ -207,3 +207,27 @@ resource "exoscale_compute_instance" "nodes" { ] } } + +resource "exoscale_instance_pool" "nodes" { + count = var.use_instancepool ? local.anti_affinity_group_count : 0 + name = "${var.role}-${count.index}" + size = var.node_count + zone = var.region + key_pair = var.ssh_key_pair + template_id = var.template_id + + instance_prefix = var.role + instance_type = var.instance_type + + disk_size = local.disk_size + user_data = base64encode(jsonencode(local.user_data[0])) + + deploy_target_id = var.deploy_target_id + + security_group_ids = var.security_group_ids + + anti_affinity_group_ids = concat( + [exoscale_anti_affinity_group.anti_affinity_group[count.index].id], + var.additional_affinity_group_ids + ) +} diff --git a/modules/node-group/output.tf b/modules/node-group/output.tf index fc9d656..02d6850 100644 --- a/modules/node-group/output.tf +++ b/modules/node-group/output.tf @@ -1,3 +1,7 @@ +locals { + instance_pool_ips = var.use_instancepool ? exoscale_instance_pool.nodes[*].instances[*].public_ip_address : [] + instance_ips = var.use_privnet ? exoscale_compute_instance.nodes[*].network_interface[0].ip_address : exoscale_compute_instance.nodes[*].public_ip_address +} output "ip_address" { - value = var.use_privnet ? exoscale_compute_instance.nodes[*].network_interface[0].ip_address : exoscale_compute_instance.nodes[*].public_ip_address + value = var.use_instancepool ? local.instance_pool_ips : local.instance_ips } diff --git a/modules/node-group/variables.tf b/modules/node-group/variables.tf index d87695b..97c94f0 100644 --- a/modules/node-group/variables.tf +++ b/modules/node-group/variables.tf @@ -126,3 +126,9 @@ variable "affinity_group_capacity" { default = 0 description = "Capacity of the affinity group, e.g. when using dedicated hypervisors, default: 0 (unlimited)" } + +variable "use_instancepool" { + type = bool + description = "Use instancepool for this node group" + default = false +} diff --git a/variables.tf b/variables.tf index 9b2238d..c83aa6d 100644 --- a/variables.tf +++ b/variables.tf @@ -238,3 +238,9 @@ variable "additional_lb_security_group_ids" { default = [] description = "List of additional security group IDs to configure on the LBs" } + +variable "use_instancepools" { + type = bool + description = "Use instance pools for node groups" + default = true +} diff --git a/worker.tf b/worker.tf index 11d1bcc..8e5f1d9 100644 --- a/worker.tf +++ b/worker.tf @@ -35,6 +35,8 @@ module "worker" { deploy_target_id = var.deploy_target_id bootstrap_bucket = var.bootstrap_bucket + + use_instancepool = var.use_instancepools } // Additional worker groups. @@ -74,7 +76,7 @@ module "additional_worker" { [exoscale_security_group.all_machines.id] ) - affinity_group_capacity = var.affinity_group_capacity + affinity_group_capacity = 1 additional_affinity_group_ids = concat( each.value.affinity_group_ids != null ? each.value.affinity_group_ids : [], var.additional_affinity_group_ids @@ -83,4 +85,6 @@ module "additional_worker" { deploy_target_id = var.deploy_target_id bootstrap_bucket = var.bootstrap_bucket + + use_instancepool = true }