This repository has been archived by the owner on May 18, 2023. It is now read-only.
forked from Azure/terraform-azurerm-aks
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.tf
656 lines (590 loc) · 33.3 KB
/
main.tf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
data "azurerm_resource_group" "main" {
name = var.resource_group_name
}
moved {
from = module.ssh-key.tls_private_key.ssh
to = tls_private_key.ssh[0]
}
resource "tls_private_key" "ssh" {
count = var.admin_username == null ? 0 : 1
algorithm = "RSA"
rsa_bits = 2048
}
resource "azurerm_kubernetes_cluster" "main" {
location = coalesce(var.location, data.azurerm_resource_group.main.location)
name = var.cluster_name == null ? "${var.prefix}-aks" : var.cluster_name
resource_group_name = data.azurerm_resource_group.main.name
api_server_authorized_ip_ranges = var.api_server_authorized_ip_ranges
automatic_channel_upgrade = var.automatic_channel_upgrade
azure_policy_enabled = var.azure_policy_enabled
disk_encryption_set_id = var.disk_encryption_set_id
dns_prefix = var.prefix
http_application_routing_enabled = var.http_application_routing_enabled
kubernetes_version = var.kubernetes_version
local_account_disabled = var.local_account_disabled
node_resource_group = var.node_resource_group
oidc_issuer_enabled = var.oidc_issuer_enabled
open_service_mesh_enabled = var.open_service_mesh_enabled
private_cluster_enabled = var.private_cluster_enabled
private_cluster_public_fqdn_enabled = var.private_cluster_public_fqdn_enabled
private_dns_zone_id = var.private_dns_zone_id
public_network_access_enabled = var.public_network_access_enabled
role_based_access_control_enabled = var.role_based_access_control_enabled
sku_tier = var.sku_tier
tags = var.tags
workload_identity_enabled = var.workload_identity_enabled
dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? [] : ["default_node_pool_manually_scaled"]
content {
name = var.agents_pool_name
vm_size = var.agents_size
enable_auto_scaling = var.enable_auto_scaling
enable_host_encryption = var.enable_host_encryption
enable_node_public_ip = var.enable_node_public_ip
max_count = null
max_pods = var.agents_max_pods
min_count = null
node_count = var.agents_count
node_labels = var.agents_labels
node_taints = var.agents_taints
only_critical_addons_enabled = var.only_critical_addons_enabled
orchestrator_version = var.orchestrator_version
os_disk_size_gb = var.os_disk_size_gb
os_disk_type = var.os_disk_type
os_sku = var.os_sku
pod_subnet_id = var.pod_subnet_id
scale_down_mode = var.scale_down_mode
tags = merge(var.tags, var.agents_tags)
temporary_name_for_rotation = var.temporary_name_for_rotation
type = var.agents_type
ultra_ssd_enabled = var.ultra_ssd_enabled
vnet_subnet_id = var.vnet_subnet_id
zones = var.agents_availability_zones
dynamic "kubelet_config" {
for_each = var.agents_pool_kubelet_configs
content {
allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
container_log_max_line = kubelet_config.value.container_log_max_line
container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
cpu_manager_policy = kubelet_config.value.cpu_manager_policy
image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
pod_max_pid = kubelet_config.value.pod_max_pid
topology_manager_policy = kubelet_config.value.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = var.agents_pool_linux_os_configs
content {
swap_file_size_mb = linux_os_config.value.swap_file_size_mb
transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
content {
fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
fs_file_max = sysctl_config.value.fs_file_max
fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
fs_nr_open = sysctl_config.value.fs_nr_open
kernel_threads_max = sysctl_config.value.kernel_threads_max
net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
net_core_optmem_max = sysctl_config.value.net_core_optmem_max
net_core_rmem_default = sysctl_config.value.net_core_rmem_default
net_core_rmem_max = sysctl_config.value.net_core_rmem_max
net_core_somaxconn = sysctl_config.value.net_core_somaxconn
net_core_wmem_default = sysctl_config.value.net_core_wmem_default
net_core_wmem_max = sysctl_config.value.net_core_wmem_max
net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
vm_max_map_count = sysctl_config.value.vm_max_map_count
vm_swappiness = sysctl_config.value.vm_swappiness
vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
}
}
}
}
}
}
dynamic "default_node_pool" {
for_each = var.enable_auto_scaling == true ? ["default_node_pool_auto_scaled"] : []
content {
name = var.agents_pool_name
vm_size = var.agents_size
enable_auto_scaling = var.enable_auto_scaling
enable_host_encryption = var.enable_host_encryption
enable_node_public_ip = var.enable_node_public_ip
max_count = var.agents_max_count
max_pods = var.agents_max_pods
min_count = var.agents_min_count
node_labels = var.agents_labels
node_taints = var.agents_taints
only_critical_addons_enabled = var.only_critical_addons_enabled
orchestrator_version = var.orchestrator_version
os_disk_size_gb = var.os_disk_size_gb
os_disk_type = var.os_disk_type
os_sku = var.os_sku
pod_subnet_id = var.pod_subnet_id
scale_down_mode = var.scale_down_mode
tags = merge(var.tags, var.agents_tags)
temporary_name_for_rotation = var.temporary_name_for_rotation
type = var.agents_type
ultra_ssd_enabled = var.ultra_ssd_enabled
vnet_subnet_id = var.vnet_subnet_id
zones = var.agents_availability_zones
dynamic "kubelet_config" {
for_each = var.agents_pool_kubelet_configs
content {
allowed_unsafe_sysctls = kubelet_config.value.allowed_unsafe_sysctls
container_log_max_line = kubelet_config.value.container_log_max_line
container_log_max_size_mb = kubelet_config.value.container_log_max_size_mb
cpu_cfs_quota_enabled = kubelet_config.value.cpu_cfs_quota_enabled
cpu_cfs_quota_period = kubelet_config.value.cpu_cfs_quota_period
cpu_manager_policy = kubelet_config.value.cpu_manager_policy
image_gc_high_threshold = kubelet_config.value.image_gc_high_threshold
image_gc_low_threshold = kubelet_config.value.image_gc_low_threshold
pod_max_pid = kubelet_config.value.pod_max_pid
topology_manager_policy = kubelet_config.value.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = var.agents_pool_linux_os_configs
content {
swap_file_size_mb = linux_os_config.value.swap_file_size_mb
transparent_huge_page_defrag = linux_os_config.value.transparent_huge_page_defrag
transparent_huge_page_enabled = linux_os_config.value.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = linux_os_config.value.sysctl_configs == null ? [] : linux_os_config.value.sysctl_configs
content {
fs_aio_max_nr = sysctl_config.value.fs_aio_max_nr
fs_file_max = sysctl_config.value.fs_file_max
fs_inotify_max_user_watches = sysctl_config.value.fs_inotify_max_user_watches
fs_nr_open = sysctl_config.value.fs_nr_open
kernel_threads_max = sysctl_config.value.kernel_threads_max
net_core_netdev_max_backlog = sysctl_config.value.net_core_netdev_max_backlog
net_core_optmem_max = sysctl_config.value.net_core_optmem_max
net_core_rmem_default = sysctl_config.value.net_core_rmem_default
net_core_rmem_max = sysctl_config.value.net_core_rmem_max
net_core_somaxconn = sysctl_config.value.net_core_somaxconn
net_core_wmem_default = sysctl_config.value.net_core_wmem_default
net_core_wmem_max = sysctl_config.value.net_core_wmem_max
net_ipv4_ip_local_port_range_max = sysctl_config.value.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = sysctl_config.value.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = sysctl_config.value.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = sysctl_config.value.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = sysctl_config.value.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = sysctl_config.value.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = sysctl_config.value.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = sysctl_config.value.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = sysctl_config.value.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = sysctl_config.value.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = sysctl_config.value.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = sysctl_config.value.net_netfilter_nf_conntrack_max
vm_max_map_count = sysctl_config.value.vm_max_map_count
vm_swappiness = sysctl_config.value.vm_swappiness
vm_vfs_cache_pressure = sysctl_config.value.vm_vfs_cache_pressure
}
}
}
}
}
}
dynamic "aci_connector_linux" {
for_each = var.aci_connector_linux_enabled ? ["aci_connector_linux"] : []
content {
subnet_name = var.aci_connector_linux_subnet_name
}
}
dynamic "auto_scaler_profile" {
for_each = var.auto_scaler_profile_enabled ? ["default_auto_scaler_profile"] : []
content {
balance_similar_node_groups = var.auto_scaler_profile_balance_similar_node_groups
empty_bulk_delete_max = var.auto_scaler_profile_empty_bulk_delete_max
expander = var.auto_scaler_profile_expander
max_graceful_termination_sec = var.auto_scaler_profile_max_graceful_termination_sec
max_node_provisioning_time = var.auto_scaler_profile_max_node_provisioning_time
max_unready_nodes = var.auto_scaler_profile_max_unready_nodes
max_unready_percentage = var.auto_scaler_profile_max_unready_percentage
new_pod_scale_up_delay = var.auto_scaler_profile_new_pod_scale_up_delay
scale_down_delay_after_add = var.auto_scaler_profile_scale_down_delay_after_add
scale_down_delay_after_delete = local.auto_scaler_profile_scale_down_delay_after_delete
scale_down_delay_after_failure = var.auto_scaler_profile_scale_down_delay_after_failure
scale_down_unneeded = var.auto_scaler_profile_scale_down_unneeded
scale_down_unready = var.auto_scaler_profile_scale_down_unready
scale_down_utilization_threshold = var.auto_scaler_profile_scale_down_utilization_threshold
scan_interval = var.auto_scaler_profile_scan_interval
skip_nodes_with_local_storage = var.auto_scaler_profile_skip_nodes_with_local_storage
skip_nodes_with_system_pods = var.auto_scaler_profile_skip_nodes_with_system_pods
}
}
dynamic "azure_active_directory_role_based_access_control" {
for_each = var.role_based_access_control_enabled && var.rbac_aad && var.rbac_aad_managed ? ["rbac"] : []
content {
admin_group_object_ids = var.rbac_aad_admin_group_object_ids
azure_rbac_enabled = var.rbac_aad_azure_rbac_enabled
managed = true
tenant_id = var.rbac_aad_tenant_id
}
}
dynamic "azure_active_directory_role_based_access_control" {
for_each = var.role_based_access_control_enabled && var.rbac_aad && !var.rbac_aad_managed ? ["rbac"] : []
content {
client_app_id = var.rbac_aad_client_app_id
managed = false
server_app_id = var.rbac_aad_server_app_id
server_app_secret = var.rbac_aad_server_app_secret
tenant_id = var.rbac_aad_tenant_id
}
}
dynamic "identity" {
for_each = var.client_id == "" || var.client_secret == "" ? ["identity"] : []
content {
type = var.identity_type
identity_ids = var.identity_ids
}
}
dynamic "ingress_application_gateway" {
for_each = var.ingress_application_gateway_enabled ? ["ingress_application_gateway"] : []
content {
gateway_id = var.ingress_application_gateway_id
gateway_name = var.ingress_application_gateway_name
subnet_cidr = var.ingress_application_gateway_subnet_cidr
subnet_id = var.ingress_application_gateway_subnet_id
}
}
dynamic "key_management_service" {
for_each = var.kms_enabled ? ["key_management_service"] : []
content {
key_vault_key_id = var.kms_key_vault_key_id
key_vault_network_access = var.kms_key_vault_network_access
}
}
dynamic "key_vault_secrets_provider" {
for_each = var.key_vault_secrets_provider_enabled ? ["key_vault_secrets_provider"] : []
content {
secret_rotation_enabled = var.secret_rotation_enabled
secret_rotation_interval = var.secret_rotation_interval
}
}
dynamic "linux_profile" {
for_each = var.admin_username == null ? [] : ["linux_profile"]
content {
admin_username = var.admin_username
ssh_key {
key_data = replace(coalesce(var.public_ssh_key, tls_private_key.ssh[0].public_key_openssh), "\n", "")
}
}
}
dynamic "maintenance_window" {
for_each = var.maintenance_window != null ? ["maintenance_window"] : []
content {
dynamic "allowed" {
for_each = var.maintenance_window.allowed
content {
day = allowed.value.day
hours = allowed.value.hours
}
}
dynamic "not_allowed" {
for_each = var.maintenance_window.not_allowed
content {
end = not_allowed.value.end
start = not_allowed.value.start
}
}
}
}
dynamic "microsoft_defender" {
for_each = var.microsoft_defender_enabled ? ["microsoft_defender"] : []
content {
log_analytics_workspace_id = local.log_analytics_workspace.id
}
}
dynamic "monitor_metrics" {
for_each = var.monitor_metrics != null ? ["monitor_metrics"] : []
content {
annotations_allowed = var.monitor_metrics.annotations_allowed
labels_allowed = var.monitor_metrics.labels_allowed
}
}
network_profile {
network_plugin = var.network_plugin
dns_service_ip = var.net_profile_dns_service_ip
load_balancer_sku = var.load_balancer_sku
network_policy = var.network_policy
outbound_type = var.net_profile_outbound_type
pod_cidr = var.net_profile_pod_cidr
service_cidr = var.net_profile_service_cidr
dynamic "load_balancer_profile" {
for_each = var.load_balancer_profile_enabled && var.load_balancer_sku == "standard" ? ["load_balancer_profile"] : []
content {
idle_timeout_in_minutes = var.load_balancer_profile_idle_timeout_in_minutes
managed_outbound_ip_count = var.load_balancer_profile_managed_outbound_ip_count
managed_outbound_ipv6_count = var.load_balancer_profile_managed_outbound_ipv6_count
outbound_ip_address_ids = var.load_balancer_profile_outbound_ip_address_ids
outbound_ip_prefix_ids = var.load_balancer_profile_outbound_ip_prefix_ids
outbound_ports_allocated = var.load_balancer_profile_outbound_ports_allocated
}
}
}
dynamic "oms_agent" {
for_each = var.log_analytics_workspace_enabled ? ["oms_agent"] : []
content {
log_analytics_workspace_id = local.log_analytics_workspace.id
}
}
dynamic "service_principal" {
for_each = var.client_id != "" && var.client_secret != "" ? ["service_principal"] : []
content {
client_id = var.client_id
client_secret = var.client_secret
}
}
dynamic "storage_profile" {
for_each = var.storage_profile_enabled ? ["storage_profile"] : []
content {
blob_driver_enabled = var.storage_profile_blob_driver_enabled
disk_driver_enabled = var.storage_profile_disk_driver_enabled
disk_driver_version = var.storage_profile_disk_driver_version
file_driver_enabled = var.storage_profile_file_driver_enabled
snapshot_controller_enabled = var.storage_profile_snapshot_controller_enabled
}
}
dynamic "web_app_routing" {
for_each = var.web_app_routing == null ? [] : ["web_app_routing"]
content {
dns_zone_id = var.web_app_routing.dns_zone_id
}
}
lifecycle {
ignore_changes = [kubernetes_version]
precondition {
condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type != "")
error_message = "Either `client_id` and `client_secret` or `identity_type` must be set."
}
precondition {
# Why don't use var.identity_ids != null && length(var.identity_ids)>0 ? Because bool expression in Terraform is not short circuit so even var.identity_ids is null Terraform will still invoke length function with null and cause error. https://github.com/hashicorp/terraform/issues/24128
condition = (var.client_id != "" && var.client_secret != "") || (var.identity_type == "SystemAssigned") || (var.identity_ids == null ? false : length(var.identity_ids) > 0)
error_message = "If use identity and `UserAssigned` is set, an `identity_ids` must be set as well."
}
precondition {
condition = !(var.microsoft_defender_enabled && !var.log_analytics_workspace_enabled)
error_message = "Enabling Microsoft Defender requires that `log_analytics_workspace_enabled` be set to true."
}
precondition {
condition = !(var.load_balancer_profile_enabled && var.load_balancer_sku != "standard")
error_message = "Enabling load_balancer_profile requires that `load_balancer_sku` be set to `standard`"
}
precondition {
condition = local.automatic_channel_upgrade_check
error_message = "Either disable automatic upgrades, or specify `kubernetes_version` or `orchestrator_version` only up to the minor version when using `automatic_channel_upgrade=patch`. You don't need to specify `kubernetes_version` at all when using `automatic_channel_upgrade=stable|rapid|node-image`, where `orchestrator_version` always must be set to `null`."
}
precondition {
condition = var.role_based_access_control_enabled || !var.rbac_aad
error_message = "Enabling Azure Active Directory integration requires that `role_based_access_control_enabled` be set to true."
}
precondition {
condition = !(var.kms_enabled && var.identity_type != "UserAssigned")
error_message = "KMS etcd encryption doesn't work with system-assigned managed identity."
}
}
}
resource "null_resource" "kubernetes_version_keeper" {
triggers = {
version = var.kubernetes_version
}
}
resource "azapi_update_resource" "aks_cluster_post_create" {
type = "Microsoft.ContainerService/managedClusters@2023-01-02-preview"
body = jsonencode({
properties = {
kubernetesVersion = var.kubernetes_version
}
})
resource_id = azurerm_kubernetes_cluster.main.id
lifecycle {
ignore_changes = all
replace_triggered_by = [null_resource.kubernetes_version_keeper.id]
}
}
resource "azurerm_kubernetes_cluster_node_pool" "node_pool" {
for_each = var.node_pools
kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id
name = each.value.name
vm_size = each.value.vm_size
capacity_reservation_group_id = each.value.capacity_reservation_group_id
custom_ca_trust_enabled = each.value.custom_ca_trust_enabled
enable_auto_scaling = each.value.enable_auto_scaling
enable_host_encryption = each.value.enable_host_encryption
enable_node_public_ip = each.value.enable_node_public_ip
eviction_policy = each.value.eviction_policy
fips_enabled = each.value.fips_enabled
host_group_id = each.value.host_group_id
kubelet_disk_type = each.value.kubelet_disk_type
max_count = each.value.max_count
max_pods = each.value.max_pods
message_of_the_day = each.value.message_of_the_day
min_count = each.value.min_count
mode = each.value.mode
node_count = each.value.node_count
node_labels = each.value.node_labels
node_public_ip_prefix_id = each.value.node_public_ip_prefix_id
node_taints = each.value.node_taints
orchestrator_version = each.value.orchestrator_version
os_disk_size_gb = each.value.os_disk_size_gb
os_disk_type = each.value.os_disk_type
os_sku = each.value.os_sku
os_type = each.value.os_type
pod_subnet_id = each.value.pod_subnet_id
priority = each.value.priority
proximity_placement_group_id = each.value.proximity_placement_group_id
scale_down_mode = each.value.scale_down_mode
spot_max_price = each.value.spot_max_price
tags = each.value.tags
ultra_ssd_enabled = each.value.ultra_ssd_enabled
vnet_subnet_id = each.value.vnet_subnet_id
workload_runtime = each.value.workload_runtime
zones = each.value.zones
dynamic "kubelet_config" {
for_each = each.value.kubelet_config == null ? [] : ["kubelet_config"]
content {
allowed_unsafe_sysctls = each.value.kubelet_config.allowed_unsafe_sysctls
container_log_max_line = each.value.kubelet_config.container_log_max_files
container_log_max_size_mb = each.value.kubelet_config.container_log_max_size_mb
cpu_cfs_quota_enabled = each.value.kubelet_config.cpu_cfs_quota_enabled
cpu_cfs_quota_period = each.value.kubelet_config.cpu_cfs_quota_period
cpu_manager_policy = each.value.kubelet_config.cpu_manager_policy
image_gc_high_threshold = each.value.kubelet_config.image_gc_high_threshold
image_gc_low_threshold = each.value.kubelet_config.image_gc_low_threshold
pod_max_pid = each.value.kubelet_config.pod_max_pid
topology_manager_policy = each.value.kubelet_config.topology_manager_policy
}
}
dynamic "linux_os_config" {
for_each = each.value.linux_os_config == null ? [] : ["linux_os_config"]
content {
swap_file_size_mb = each.value.linux_os_config.swap_file_size_mb
transparent_huge_page_defrag = each.value.linux_os_config.transparent_huge_page_defrag
transparent_huge_page_enabled = each.value.linux_os_config.transparent_huge_page_enabled
dynamic "sysctl_config" {
for_each = each.value.linux_os_config.sysctl_config == null ? [] : ["sysctl_config"]
content {
fs_aio_max_nr = each.value.linux_os_config.sysctl_config.fs_aio_max_nr
fs_file_max = each.value.linux_os_config.sysctl_config.fs_file_max
fs_inotify_max_user_watches = each.value.linux_os_config.sysctl_config.fs_inotify_max_user_watches
fs_nr_open = each.value.linux_os_config.sysctl_config.fs_nr_open
kernel_threads_max = each.value.linux_os_config.sysctl_config.kernel_threads_max
net_core_netdev_max_backlog = each.value.linux_os_config.sysctl_config.net_core_netdev_max_backlog
net_core_optmem_max = each.value.linux_os_config.sysctl_config.net_core_optmem_max
net_core_rmem_default = each.value.linux_os_config.sysctl_config.net_core_rmem_default
net_core_rmem_max = each.value.linux_os_config.sysctl_config.net_core_rmem_max
net_core_somaxconn = each.value.linux_os_config.sysctl_config.net_core_somaxconn
net_core_wmem_default = each.value.linux_os_config.sysctl_config.net_core_wmem_default
net_core_wmem_max = each.value.linux_os_config.sysctl_config.net_core_wmem_max
net_ipv4_ip_local_port_range_max = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_max
net_ipv4_ip_local_port_range_min = each.value.linux_os_config.sysctl_config.net_ipv4_ip_local_port_range_min
net_ipv4_neigh_default_gc_thresh1 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh1
net_ipv4_neigh_default_gc_thresh2 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh2
net_ipv4_neigh_default_gc_thresh3 = each.value.linux_os_config.sysctl_config.net_ipv4_neigh_default_gc_thresh3
net_ipv4_tcp_fin_timeout = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_fin_timeout
net_ipv4_tcp_keepalive_intvl = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_intvl
net_ipv4_tcp_keepalive_probes = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_probes
net_ipv4_tcp_keepalive_time = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_keepalive_time
net_ipv4_tcp_max_syn_backlog = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_syn_backlog
net_ipv4_tcp_max_tw_buckets = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_max_tw_buckets
net_ipv4_tcp_tw_reuse = each.value.linux_os_config.sysctl_config.net_ipv4_tcp_tw_reuse
net_netfilter_nf_conntrack_buckets = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_buckets
net_netfilter_nf_conntrack_max = each.value.linux_os_config.sysctl_config.net_netfilter_nf_conntrack_max
vm_max_map_count = each.value.linux_os_config.sysctl_config.vm_max_map_count
vm_swappiness = each.value.linux_os_config.sysctl_config.vm_swappiness
vm_vfs_cache_pressure = each.value.linux_os_config.sysctl_config.vm_vfs_cache_pressure
}
}
}
}
dynamic "node_network_profile" {
for_each = each.value.node_network_profile == null ? [] : ["node_network_profile"]
content {
node_public_ip_tags = each.value.node_network_profile.node_public_ip_tags
}
}
dynamic "upgrade_settings" {
for_each = each.value.upgrade_settings == null ? [] : ["upgrade_settings"]
content {
max_surge = each.value.upgrade_settings.max_surge
}
}
dynamic "windows_profile" {
for_each = each.value.windows_profile == null ? [] : ["windows_profile"]
content {
outbound_nat_enabled = each.value.windows_profile.outbound_nat_enabled
}
}
depends_on = [azapi_update_resource.aks_cluster_post_create]
lifecycle {
precondition {
condition = var.agents_type == "VirtualMachineScaleSets"
error_message = "Multiple Node Pools are only supported when the Kubernetes Cluster is using Virtual Machine Scale Sets."
}
}
}
resource "azurerm_log_analytics_workspace" "main" {
count = local.create_analytics_workspace ? 1 : 0
location = coalesce(var.location, data.azurerm_resource_group.main.location)
name = var.cluster_log_analytics_workspace_name == null ? "${var.prefix}-workspace" : var.cluster_log_analytics_workspace_name
resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name)
retention_in_days = var.log_retention_in_days
sku = var.log_analytics_workspace_sku
tags = var.tags
}
locals {
azurerm_log_analytics_workspace_id = try(azurerm_log_analytics_workspace.main[0].id, null)
azurerm_log_analytics_workspace_name = try(azurerm_log_analytics_workspace.main[0].name, null)
}
resource "azurerm_log_analytics_solution" "main" {
count = local.create_analytics_solution ? 1 : 0
location = coalesce(var.location, data.azurerm_resource_group.main.location)
resource_group_name = coalesce(var.log_analytics_workspace_resource_group_name, var.resource_group_name)
solution_name = "ContainerInsights"
workspace_name = local.log_analytics_workspace.name
workspace_resource_id = local.log_analytics_workspace.id
tags = var.tags
plan {
product = "OMSGallery/ContainerInsights"
publisher = "Microsoft"
}
}
resource "azurerm_role_assignment" "acr" {
for_each = var.attached_acr_id_map
principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id
scope = each.value
role_definition_name = "AcrPull"
skip_service_principal_aad_check = true
}
# The AKS cluster identity has the Contributor role on the AKS second resource group (MC_myResourceGroup_myAKSCluster_eastus)
# However when using a custom VNET, the AKS cluster identity needs the Network Contributor role on the VNET subnets
# used by the system node pool and by any additional node pools.
# https://learn.microsoft.com/en-us/azure/aks/configure-kubenet#prerequisites
# https://learn.microsoft.com/en-us/azure/aks/configure-azure-cni#prerequisites
# https://github.com/Azure/terraform-azurerm-aks/issues/178
resource "azurerm_role_assignment" "network_contributor" {
for_each = var.create_role_assignment_network_contributor ? local.subnet_ids : []
principal_id = azurerm_kubernetes_cluster.main.kubelet_identity[0].object_id
scope = each.value
role_definition_name = "Network Contributor"
}