-
Notifications
You must be signed in to change notification settings - Fork 0
/
aks.tf
95 lines (81 loc) · 3.95 KB
/
aks.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
resource "azurerm_user_assigned_identity" "cluster" {
count = var.use_existing_cluster ? 0 : 1
location = var.location
name = var.name
resource_group_name = var.resource_group_name
}
# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#add-role-assignment-for-managed-identity
resource "azurerm_role_assignment" "network_contributor_cluster" {
count = var.use_existing_cluster ? 0 : 1
scope = var.vnet_id
role_definition_name = "Network Contributor"
principal_id = azurerm_user_assigned_identity.cluster[0].principal_id
}
module "aks" {
count = var.use_existing_cluster ? 0 : 1
source = "Azure/aks/azurerm"
version = "9.1.0"
resource_group_name = var.resource_group_name
cluster_name = var.name
location = var.location
prefix = "tfy"
workload_identity_enabled = var.workload_identity_enabled
temporary_name_for_rotation = "tmpdefault"
log_analytics_workspace_enabled = var.log_analytics_workspace_enabled
# agents_labels = {
# "truefoundry" : "essential"
# }
agents_pool_name = var.initial_node_pool_name
agents_min_count = var.initial_node_pool_min_count
agents_max_count = var.initial_node_pool_max_count
agents_size = var.intial_node_pool_instance_type
agents_max_pods = var.max_pods_per_node
agents_pool_max_surge = var.initial_node_pool_max_surge
agents_tags = local.tags
orchestrator_version = var.orchestrator_version
# autoscaler configuration
auto_scaler_profile_enabled = true
auto_scaler_profile_expander = "random"
auto_scaler_profile_max_graceful_termination_sec = "180"
auto_scaler_profile_max_node_provisioning_time = "5m"
auto_scaler_profile_max_unready_nodes = 0
auto_scaler_profile_scale_down_delay_after_add = "2m"
auto_scaler_profile_scale_down_delay_after_delete = "30s"
auto_scaler_profile_scale_down_unneeded = "1m"
auto_scaler_profile_scale_down_unready = "2m"
auto_scaler_profile_scale_down_utilization_threshold = "0.3"
# cluster level configurations
api_server_authorized_ip_ranges = var.allowed_ip_ranges
create_role_assignment_network_contributor = false
enable_auto_scaling = true
enable_host_encryption = true
identity_ids = [azurerm_user_assigned_identity.cluster[0].id]
identity_type = "UserAssigned"
kubernetes_version = var.kubernetes_version
# storage
storage_profile_blob_driver_enabled = var.enable_blob_driver
storage_profile_disk_driver_enabled = var.enable_disk_driver
storage_profile_disk_driver_version = var.disk_driver_version
storage_profile_file_driver_enabled = var.enable_file_driver
storage_profile_snapshot_controller_enabled = var.enable_snapshot_controller
storage_profile_enabled = var.enable_storage_profile
# network configuration
network_plugin = var.network_plugin
vnet_subnet_id = var.subnet_id
net_profile_dns_service_ip = var.dns_ip
net_profile_service_cidr = var.service_cidr
net_profile_pod_cidr = var.pod_cidr
# net_profile_docker_bridge_cidr = "10.244.0.10"
node_pools = local.node_pools
oidc_issuer_enabled = var.oidc_issuer_enabled
os_disk_size_gb = var.disk_size
# makes the initial node pool have a taint `CriticalAddonsOnly=true:NoSchedule`
# helpful in scheduling important workloads
# only_critical_addons_enabled = true
private_cluster_enabled = var.private_cluster_enabled
# rbac
rbac_aad = false
role_based_access_control_enabled = false
sku_tier = var.sku_tier
tags = local.tags
}