From ebfe7cdfd894ebe1b35e8cc5952142f16a0eecfe Mon Sep 17 00:00:00 2001 From: Anthony Howe Date: Fri, 23 Apr 2021 16:00:14 -0400 Subject: [PATCH] fix formatting (#1167) --- src/terraform/examples/azcopysync/main.tf | 64 +-- src/terraform/examples/centos-ci/main.tf | 78 +-- src/terraform/examples/centos/cycle/main.tf | 38 +- src/terraform/examples/centos/main.tf | 104 ++-- .../examples/centos/stockimage/main.tf | 120 ++--- src/terraform/examples/centosgridgpu/main.tf | 70 +-- src/terraform/examples/dnsserver/main.tf | 294 +++++------ .../examples/dnsserver/standalone/main.tf | 88 ++-- .../0.network/fourthregion/main.tf | 80 +-- .../0.network/main.tf | 128 ++--- .../4.windowsclient/main.tf | 82 ++-- src/terraform/examples/hammerspace/main.tf | 240 ++++----- .../houdinienvironment/0.network/main.tf | 52 +- .../1.storage/blobstorage/main.tf | 36 +- .../1.storage/nfsfiler/main.tf | 68 +-- .../2.windowsstockvm/main.tf | 34 +- .../4.rendernodes/vm/main.tf | 82 ++-- .../4.rendernodes/vmss/main.tf | 104 ++-- .../4.rendernodes/vmssephemeral/main.tf | 128 ++--- .../examples/nfsfilerganesha/main.tf | 166 +++---- src/terraform/examples/nfsfilermd/main.tf | 170 +++---- src/terraform/examples/securedimage/main.tf | 164 +++---- src/terraform/examples/vfxt/3-filers/main.tf | 330 ++++++------- .../examples/vfxt/HoudiniOptimized/main.tf | 252 +++++----- .../vfxt/azureblobfiler-zonal/main.tf | 200 ++++---- .../examples/vfxt/azureblobfiler/main.tf | 194 ++++---- .../collaboratingcloudworkstation/main.tf | 232 ++++----- .../isolatedcloudworkstation/main.tf | 232 ++++----- .../examples/vfxt/custom-vserver/main.tf | 240 ++++----- .../examples/vfxt/hammerspace/main.tf | 392 +++++++-------- .../vfxt/netapp-across-region/main.tf | 228 ++++----- src/terraform/examples/vfxt/netapp/main.tf | 214 ++++---- .../examples/vfxt/opencue/new-storage/main.tf | 396 +++++++-------- .../opencue/pre-existing-azure-blob/main.tf | 400 +++++++-------- .../examples/vfxt/pipeline/centos/main.tf | 88 ++-- .../examples/vfxt/pipeline/ubuntu/main.tf | 88 ++-- src/terraform/examples/vfxt/proxy/main.tf | 302 ++++++------ src/terraform/examples/vfxt/run-local/main.tf | 104 ++-- .../user-assigned-managed-identity/main.tf | 298 ++++++------ .../vfxt/vdbench/azureblobfiler/main.tf | 252 +++++----- src/terraform/examples/vfxt/vdbench/main.tf | 74 +-- src/terraform/examples/vfxt/vfxt-only/main.tf | 192 ++++---- src/terraform/examples/vfxt/vmss/main.tf | 288 +++++------ src/terraform/examples/vmss-rendering/main.tf | 96 ++-- .../examples/windows/client-ps-cse/main.tf | 14 +- src/terraform/examples/windowsgridgpu/main.tf | 86 ++-- src/terraform/examples/wireguard/main.tf | 456 +++++++++--------- .../modules/cachewarmer_build/main.tf | 20 +- .../modules/cachewarmer_build/outputs.tf | 4 +- .../modules/cachewarmer_build/variables.tf | 18 +- .../cachewarmer_manager_install/main.tf | 30 +- .../cachewarmer_manager_install/outputs.tf | 4 +- .../cachewarmer_manager_install/variables.tf | 42 +- .../modules/cachewarmer_submitjob/main.tf | 18 +- .../modules/cachewarmer_submitjob/outputs.tf | 4 +- .../cachewarmer_submitjob/variables.tf | 28 +- .../cachewarmer_submitmultiplejobs/main.tf | 22 +- .../cachewarmer_submitmultiplejobs/outputs.tf | 4 +- .../variables.tf | 26 +- src/terraform/modules/centosgridgpu/main.tf | 36 +- .../modules/centosgridgpu/outputs.tf | 8 +- .../modules/centosgridgpu/variables.tf | 26 +- src/terraform/modules/controller3/main.tf | 76 +-- src/terraform/modules/controller3/outputs.tf | 8 +- .../modules/controller3/variables.tf | 32 +- src/terraform/modules/dnsserver/main.tf | 168 +++---- src/terraform/modules/dnsserver/outputs.tf | 8 +- .../anvil-run-once-configure/main.tf | 4 +- .../anvil-run-once-configure/variables.tf | 18 +- .../modules/hammerspace/anvil/main.tf | 18 +- .../modules/hammerspace/anvil/outputs.tf | 2 +- .../modules/hammerspace/anvil/variables.tf | 22 +- src/terraform/modules/hammerspace/dsx/main.tf | 18 +- .../modules/hammerspace/dsx/outputs.tf | 4 +- .../modules/hammerspace/dsx/variables.tf | 16 +- src/terraform/modules/jumpbox/main.tf | 56 +-- src/terraform/modules/jumpbox/outputs.tf | 6 +- src/terraform/modules/jumpbox/variables.tf | 24 +- src/terraform/modules/mount_nfs/main.tf | 16 +- src/terraform/modules/mount_nfs/outputs.tf | 2 +- src/terraform/modules/mount_nfs/variables.tf | 16 +- .../modules/nfs_filer_ganesha/variables.tf | 26 +- src/terraform/modules/nfs_filer_md/main.tf | 46 +- src/terraform/modules/nfs_filer_md/outputs.tf | 2 +- .../modules/nfs_filer_md/variables.tf | 28 +- src/terraform/modules/nfsbridge/main.tf | 38 +- src/terraform/modules/nfsbridge/outputs.tf | 8 +- src/terraform/modules/nfsbridge/variables.tf | 18 +- src/terraform/modules/opencue_config/main.tf | 18 +- .../modules/opencue_config/outputs.tf | 4 +- .../modules/opencue_config/variables.tf | 14 +- src/terraform/modules/proxy/main.tf | 44 +- src/terraform/modules/proxy/outputs.tf | 2 +- src/terraform/modules/proxy/variables.tf | 10 +- src/terraform/modules/render_network/main.tf | 180 +++---- .../modules/render_network/outputs.tf | 6 +- .../modules/render_network/variables.tf | 86 ++-- .../modules/render_network_secure/main.tf | 398 +++++++-------- .../modules/render_network_secure/outputs.tf | 2 +- .../render_network_secure/variables.tf | 70 +-- src/terraform/modules/vdbench_config/main.tf | 18 +- .../modules/vdbench_config/variables.tf | 18 +- src/terraform/modules/vmss_config/main.tf | 18 +- src/terraform/modules/vmss_config/outputs.tf | 4 +- .../modules/vmss_config/variables.tf | 16 +- .../modules/windowsgridgpu/outputs.tf | 8 +- .../modules/windowsgridgpu/variables.tf | 38 +- 107 files changed, 4980 insertions(+), 4982 deletions(-) diff --git a/src/terraform/examples/azcopysync/main.tf b/src/terraform/examples/azcopysync/main.tf index 4bfbd62e2..598f0f99d 100644 --- a/src/terraform/examples/azcopysync/main.tf +++ b/src/terraform/examples/azcopysync/main.tf @@ -1,16 +1,16 @@ // customize the simple VM by adjusting the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - vm_admin_password = "PASSWORD" - resource_group_name = "resource_group" - // set the following to true, otherwise use windows server - use_windows_desktop = false - - // provide a globally unique name - storage_account_name = "storageaccount" - container_name = "previz" + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + vm_admin_password = "PASSWORD" + resource_group_name = "resource_group" + // set the following to true, otherwise use windows server + use_windows_desktop = false + + // provide a globally unique name + storage_account_name = "storageaccount" + container_name = "previz" } terraform { @@ -67,10 +67,10 @@ resource "azurerm_subnet" "subnet" { } resource "azurerm_public_ip" "vm" { - name = "publicip" - resource_group_name = azurerm_resource_group.rg.name - location = azurerm_resource_group.rg.location - allocation_method = "Static" + name = "publicip" + resource_group_name = azurerm_resource_group.rg.name + location = azurerm_resource_group.rg.location + allocation_method = "Static" } resource "azurerm_network_interface" "nic" { @@ -103,28 +103,28 @@ resource "azurerm_windows_virtual_machine" "vm" { } dynamic "source_image_reference" { - for_each = local.use_windows_desktop == false ? ["MicrosoftWindowsServer"] : [] - content { - publisher = "MicrosoftWindowsServer" - offer = "WindowsServer" - sku = "2016-Datacenter" - version = "latest" - } + for_each = local.use_windows_desktop == false ? ["MicrosoftWindowsServer"] : [] + content { + publisher = "MicrosoftWindowsServer" + offer = "WindowsServer" + sku = "2016-Datacenter" + version = "latest" + } } dynamic "source_image_reference" { - for_each = local.use_windows_desktop == true ? ["MicrosoftWindowsDesktop"] : [] - content { - publisher = "MicrosoftWindowsDesktop" - offer = "Windows-10" - sku = "19h2-pro" - version = "latest" - } + for_each = local.use_windows_desktop == true ? ["MicrosoftWindowsDesktop"] : [] + content { + publisher = "MicrosoftWindowsDesktop" + offer = "Windows-10" + sku = "19h2-pro" + version = "latest" + } } } output "rdp_username" { - value = local.vm_admin_username + value = local.vm_admin_username } output "rdp_address" { @@ -132,9 +132,9 @@ output "rdp_address" { } output "storage_account_container_sas_command_prefix" { - value = "export SAS_PREFIX=https://${local.storage_account_name}.blob.core.windows.net/${local.container_name}?" + value = "export SAS_PREFIX=https://${local.storage_account_name}.blob.core.windows.net/${local.container_name}?" } output "storage_account_container_sas_command_suffix" { - value = "export SAS_SUFFIX=$(az storage container generate-sas --account-name ${local.storage_account_name} --https-only --permissions acdlrw --start 2020-04-06T00:00:00Z --expiry 2021-01-01T00:00:00Z --name ${local.container_name} --output tsv)" + value = "export SAS_SUFFIX=$(az storage container generate-sas --account-name ${local.storage_account_name} --https-only --permissions acdlrw --start 2020-04-06T00:00:00Z --expiry 2021-01-01T00:00:00Z --name ${local.container_name} --output tsv)" } diff --git a/src/terraform/examples/centos-ci/main.tf b/src/terraform/examples/centos-ci/main.tf index 260d29027..930b32de1 100644 --- a/src/terraform/examples/centos-ci/main.tf +++ b/src/terraform/examples/centos-ci/main.tf @@ -1,29 +1,29 @@ // customize the Secured VM by adjusting the following local variables locals { - // the region of the deployment - location = "westus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "PASSWORD" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - resource_group_name = "centosresource_group" - vm_size = "Standard_D2s_v3" - - // network details - virtual_network_resource_group = "network_resource_group" - virtual_network_name = "rendervnet" - virtual_network_subnet_name = "render_clients2" - - # load the files as b64 - foreman_file_b64 = base64gzip(replace(file("${path.module}/20-foreman.cfg"),"\r","")) - example_file_b64 = base64gzip(replace(file("${path.module}/examplefile.txt"),"\r","")) - - # embed the files - cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { foreman_file = local.foreman_file_b64, example_file_b64 = local.example_file_b64}) + // the region of the deployment + location = "westus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "PASSWORD" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + resource_group_name = "centosresource_group" + vm_size = "Standard_D2s_v3" + + // network details + virtual_network_resource_group = "network_resource_group" + virtual_network_name = "rendervnet" + virtual_network_subnet_name = "render_clients2" + + # load the files as b64 + foreman_file_b64 = base64gzip(replace(file("${path.module}/20-foreman.cfg"), "\r", "")) + example_file_b64 = base64gzip(replace(file("${path.module}/examplefile.txt"), "\r", "")) + + # embed the files + cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { foreman_file = local.foreman_file_b64, example_file_b64 = local.example_file_b64 }) } terraform { @@ -70,35 +70,35 @@ resource "azurerm_linux_virtual_machine" "main" { network_interface_ids = [azurerm_network_interface.main.id] computer_name = "vm" size = local.vm_size - + # this encodes the payload - custom_data = base64encode(local.cloud_init_file) - - // if needed replace source image reference with the id + custom_data = base64encode(local.cloud_init_file) + + // if needed replace source image reference with the id // source_image_id = "some image id" source_image_reference { publisher = "OpenLogic" offer = "CentOS" sku = "7_9" version = "latest" - } + } // by default the OS has encryption at rest os_disk { - name = "osdisk" + name = "osdisk" storage_account_type = "Standard_LRS" caching = "ReadWrite" } - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } @@ -111,5 +111,5 @@ output "vm_address" { } output "ssh_command" { - value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" +} diff --git a/src/terraform/examples/centos/cycle/main.tf b/src/terraform/examples/centos/cycle/main.tf index 80c6e1a6c..00bb3b0b8 100644 --- a/src/terraform/examples/centos/cycle/main.tf +++ b/src/terraform/examples/centos/cycle/main.tf @@ -2,7 +2,7 @@ locals { resource_group_name = "cycle_rg" // paste in the id of the full custom image - vm_size = "Standard_D4s_v3" + vm_size = "Standard_D4s_v3" vm_admin_username = "azureuser" // use either SSH Key data or admin password, if ssh_key_data is specified // then admin_password is ignored @@ -10,17 +10,17 @@ locals { // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 // populated where you are running terraform vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - + # you can choose to use the marketplace image or a manual install # 1. true - marketplace image - https://docs.microsoft.com/en-us/azure/cyclecloud/qs-install-marketplace # 2. false - manual install - this installs on a centos image: https://docs.microsoft.com/en-us/azure/cyclecloud/how-to/install-manual use_marketplace_image = false // replace below variables with the infrastructure variables from 0.network - location = "" + location = "" vnet_jumpbox_subnet_name = "" - vnet_name = "" - vnet_resource_group = "" + vnet_name = "" + vnet_resource_group = "" } terraform { @@ -43,19 +43,19 @@ resource "azurerm_resource_group" "vm" { } module "cyclecloud" { - source = "github.com/Azure/Avere/src/terraform/modules/cyclecloud" - resource_group_name = azurerm_resource_group.vm.name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = local.vm_size - use_marketplace = local.use_marketplace_image + source = "github.com/Azure/Avere/src/terraform/modules/cyclecloud" + resource_group_name = azurerm_resource_group.vm.name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = local.vm_size + use_marketplace = local.use_marketplace_image - // network details - virtual_network_resource_group = local.vnet_resource_group - virtual_network_name = local.vnet_name - virtual_network_subnet_name = local.vnet_jumpbox_subnet_name + // network details + virtual_network_resource_group = local.vnet_resource_group + virtual_network_name = local.vnet_name + virtual_network_subnet_name = local.vnet_jumpbox_subnet_name } output "nfs_username" { @@ -67,5 +67,5 @@ output "nfs_address" { } output "ssh_command" { - value = "ssh ${module.cyclecloud.admin_username}@${module.cyclecloud.primary_ip}" -} \ No newline at end of file + value = "ssh ${module.cyclecloud.admin_username}@${module.cyclecloud.primary_ip}" +} diff --git a/src/terraform/examples/centos/main.tf b/src/terraform/examples/centos/main.tf index 186cd1eef..6bd8292b4 100644 --- a/src/terraform/examples/centos/main.tf +++ b/src/terraform/examples/centos/main.tf @@ -1,49 +1,49 @@ // customize the Secured VM by adjusting the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "PASSWORD" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - resource_group_name = "centosresource_group" - vm_size = "Standard_D2s_v3" - - // the below is the resource group and name of the previously created custom image - image_resource_group = "image_resource_group" - image_name = "image_name" - - // network details - virtual_network_resource_group = "network_resource_group" - virtual_network_name = "rendervnet" - virtual_network_subnet_name = "render_clients2" - - // update search domain with space separated list of search domains, leave blank to not set - search_domain = "" - - // this value for OS Disk resize must be between 20GB and 1023GB, - // after this you will need to repartition the disk - os_disk_size_gb = 32 - - script_file_b64 = base64gzip(replace(file("${path.module}/installnfs.sh"),"\r","")) - cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { install_script = local.script_file_b64, search_domain = local.search_domain}) + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "PASSWORD" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + resource_group_name = "centosresource_group" + vm_size = "Standard_D2s_v3" + + // the below is the resource group and name of the previously created custom image + image_resource_group = "image_resource_group" + image_name = "image_name" + + // network details + virtual_network_resource_group = "network_resource_group" + virtual_network_name = "rendervnet" + virtual_network_subnet_name = "render_clients2" + + // update search domain with space separated list of search domains, leave blank to not set + search_domain = "" + + // this value for OS Disk resize must be between 20GB and 1023GB, + // after this you will need to repartition the disk + os_disk_size_gb = 32 + + script_file_b64 = base64gzip(replace(file("${path.module}/installnfs.sh"), "\r", "")) + cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { install_script = local.script_file_b64, search_domain = local.search_domain }) } terraform { - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = "~>2.12.0" - } - } + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~>2.12.0" + } + } } provider "azurerm" { - features {} + features {} } data "azurerm_subnet" "vnet" { @@ -53,8 +53,8 @@ data "azurerm_subnet" "vnet" { } data "azurerm_image" "custom_image" { - name = local.image_name - resource_group_name = local.image_resource_group + name = local.image_name + resource_group_name = local.image_resource_group } resource "azurerm_resource_group" "main" { @@ -83,29 +83,29 @@ resource "azurerm_linux_virtual_machine" "main" { size = local.vm_size custom_data = base64encode(local.cloud_init_file) source_image_id = data.azurerm_image.custom_image.id - + // by default the OS has encryption at rest os_disk { - name = "osdisk" + name = "osdisk" storage_account_type = "Standard_LRS" caching = "ReadWrite" disk_size_gb = local.os_disk_size_gb } - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } resource "azurerm_virtual_machine_extension" "cse" { - name = "vm-cse" + name = "vm-cse" virtual_machine_id = azurerm_linux_virtual_machine.main.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" @@ -132,5 +132,5 @@ output "vm_address" { } output "ssh_command" { - value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" +} diff --git a/src/terraform/examples/centos/stockimage/main.tf b/src/terraform/examples/centos/stockimage/main.tf index 67e20a686..d6883e92a 100644 --- a/src/terraform/examples/centos/stockimage/main.tf +++ b/src/terraform/examples/centos/stockimage/main.tf @@ -1,55 +1,55 @@ // customize the Secured VM by adjusting the following local variables locals { - // the region of the deployment - location = "eastus" - - // authentication details - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // leave ssh key data blank if you want to use a password - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - // VM details - resource_group_name = "centosresource_group" - unique_name = "vm" - vm_size = "Standard_D2s_v3" - - // virtual network information - virtual_network_resource_group_name = "network_resource_group" - virtual_network_name = "rendervnet" - virtual_network_subnet_name = "render_clients1" - - source_image_reference = local.source_image_reference_latest - - source_image_reference_latest = { - publisher = "OpenLogic" - offer = "CentOS" - sku = "7.7" - version = "latest" - } + // the region of the deployment + location = "eastus" + + // authentication details + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // leave ssh key data blank if you want to use a password + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + // VM details + resource_group_name = "centosresource_group" + unique_name = "vm" + vm_size = "Standard_D2s_v3" + + // virtual network information + virtual_network_resource_group_name = "network_resource_group" + virtual_network_name = "rendervnet" + virtual_network_subnet_name = "render_clients1" + + source_image_reference = local.source_image_reference_latest + + source_image_reference_latest = { + publisher = "OpenLogic" + offer = "CentOS" + sku = "7.7" + version = "latest" + } - # even though it is deprecated, you can use the offer "CentOS-CI" for older Cent OS images - source_image_reference_7_4 = { - publisher = "OpenLogic" - offer = "CentOS-CI" - sku = "7-CI" - version = "7.4.20180417" - } + # even though it is deprecated, you can use the offer "CentOS-CI" for older Cent OS images + source_image_reference_7_4 = { + publisher = "OpenLogic" + offer = "CentOS-CI" + sku = "7-CI" + version = "7.4.20180417" + } } terraform { - required_providers { - azurerm = { - source = "hashicorp/azurerm" - version = "~>2.12.0" - } - } + required_providers { + azurerm = { + source = "hashicorp/azurerm" + version = "~>2.12.0" + } + } } provider "azurerm" { - features {} + features {} } resource "azurerm_resource_group" "main" { @@ -76,13 +76,13 @@ resource "azurerm_network_interface" "main" { } resource "azurerm_linux_virtual_machine" "main" { - name = local.unique_name - resource_group_name = azurerm_resource_group.main.name - location = azurerm_resource_group.main.location - network_interface_ids = [azurerm_network_interface.main.id] - computer_name = local.unique_name - size = local.vm_size - + name = local.unique_name + resource_group_name = azurerm_resource_group.main.name + location = azurerm_resource_group.main.location + network_interface_ids = [azurerm_network_interface.main.id] + computer_name = local.unique_name + size = local.vm_size + source_image_reference { publisher = local.source_image_reference.publisher offer = local.source_image_reference.offer @@ -92,21 +92,21 @@ resource "azurerm_linux_virtual_machine" "main" { // by default the OS has encryption at rest os_disk { - name = "osdisk" + name = "osdisk" storage_account_type = "Standard_LRS" caching = "ReadWrite" } // configuration for authentication. If ssh key specified, ignore password - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data== null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } @@ -119,5 +119,5 @@ output "ip_address" { } output "ssh_command" { - value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" +} diff --git a/src/terraform/examples/centosgridgpu/main.tf b/src/terraform/examples/centosgridgpu/main.tf index 3cc053349..598374b19 100644 --- a/src/terraform/examples/centosgridgpu/main.tf +++ b/src/terraform/examples/centosgridgpu/main.tf @@ -1,28 +1,28 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "westus2" - resource_group = "centosgridgpu" + // the region of the deployment + location = "westus2" + resource_group = "centosgridgpu" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - unique_name = "centosgridgpu" - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - vnet_name = "rendernetwork" - subnet_name = "jumpbox" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + unique_name = "centosgridgpu" + ssh_port = 22 - teradici_license_key = "" + // network details + network_resource_group_name = "network_resource_group" + vnet_name = "rendernetwork" + subnet_name = "jumpbox" - // update search domain with space separated list of search domains, leave blank to not set - search_domain = "" + teradici_license_key = "" + + // update search domain with space separated list of search domains, leave blank to not set + search_domain = "" } terraform { @@ -45,24 +45,24 @@ resource "azurerm_resource_group" "centosgridgpu" { } module "centosgridgpu" { - source = "github.com/Azure/Avere/src/terraform/modules/centosgridgpu" - resource_group_name = local.resource_group - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - ssh_port = local.ssh_port - search_domain = local.search_domain - teradici_license_key = local.teradici_license_key + source = "github.com/Azure/Avere/src/terraform/modules/centosgridgpu" + resource_group_name = local.resource_group + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + ssh_port = local.ssh_port + search_domain = local.search_domain + teradici_license_key = local.teradici_license_key - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = local.vnet_name - virtual_network_subnet_name = local.subnet_name + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = local.vnet_name + virtual_network_subnet_name = local.subnet_name - module_depends_on = [azurerm_resource_group.centosgridgpu.id] + module_depends_on = [azurerm_resource_group.centosgridgpu.id] } output "address" { value = module.centosgridgpu.address -} \ No newline at end of file +} diff --git a/src/terraform/examples/dnsserver/main.tf b/src/terraform/examples/dnsserver/main.tf index 9d7af706c..b80bcdadb 100644 --- a/src/terraform/examples/dnsserver/main.tf +++ b/src/terraform/examples/dnsserver/main.tf @@ -1,54 +1,54 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // dns settings - onprem_dns_servers = "169.254.169.254" - dnsserver_static_ip = "10.0.3.253" - onprem_filer_fqdn = "nfs1.rendering.com" - - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // dns settings + onprem_dns_servers = "169.254.169.254" + dnsserver_static_ip = "10.0.3.253" + onprem_filer_fqdn = "nfs1.rendering.com" + + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -67,12 +67,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -82,106 +82,106 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - vserver_first_ip = "10.0.1.200" - vserver_ip_count = 12 - dns_server = local.onprem_dns_servers - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs1data" - core_filer_export = module.nasfiler1.core_filer_export - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + vserver_first_ip = "10.0.1.200" + vserver_ip_count = 12 + dns_server = local.onprem_dns_servers + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs1data" + core_filer_export = module.nasfiler1.core_filer_export } -} + } +} module "dnsserver" { - source = "github.com/Azure/Avere/src/terraform/modules/dnsserver" - resource_group_name = local.network_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - // this is the address of the unbound dns server - private_ip_address = local.dnsserver_static_ip - - dns_server = local.onprem_dns_servers - avere_first_ip_addr = avere_vfxt.vfxt.vserver_first_ip - avere_ip_addr_count = avere_vfxt.vfxt.vserver_ip_count - avere_filer_fqdn = local.onprem_filer_fqdn - - // set the TTL - dns_max_ttl_seconds = 300 - - module_depends_on = [module.network.module_depends_on_ids] + source = "github.com/Azure/Avere/src/terraform/modules/dnsserver" + resource_group_name = local.network_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + // this is the address of the unbound dns server + private_ip_address = local.dnsserver_static_ip + + dns_server = local.onprem_dns_servers + avere_first_ip_addr = avere_vfxt.vfxt.vserver_first_ip + avere_ip_addr_count = avere_vfxt.vfxt.vserver_ip_count + avere_filer_fqdn = local.onprem_filer_fqdn + + // set the TTL + dns_max_ttl_seconds = 300 + + module_depends_on = [module.network.module_depends_on_ids] } @@ -202,17 +202,17 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "unbound_dns_server_ip" { value = module.dnsserver.dnsserver_address -} \ No newline at end of file +} diff --git a/src/terraform/examples/dnsserver/standalone/main.tf b/src/terraform/examples/dnsserver/standalone/main.tf index 649eb2cce..d8dc51083 100644 --- a/src/terraform/examples/dnsserver/standalone/main.tf +++ b/src/terraform/examples/dnsserver/standalone/main.tf @@ -1,30 +1,30 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 - // network details - network_resource_group_name = "network_resource_group" - vnet_name = "rendernetwork" - subnet_name = "cache" - - // dns settings - // A space separated list of dns servers to forward to - onprem_dns_servers = "169.254.169.254" - dnsserver_static_ip = "10.0.3.253" - onprem_filer_fqdn = "nfs1.rendering.com" - dns_max_ttl_seconds = 300 + // network details + network_resource_group_name = "network_resource_group" + vnet_name = "rendernetwork" + subnet_name = "cache" - avere_first_ip = "10.0.3.50" - avere_ip_addr_count = 3 + // dns settings + // A space separated list of dns servers to forward to + onprem_dns_servers = "169.254.169.254" + dnsserver_static_ip = "10.0.3.253" + onprem_filer_fqdn = "nfs1.rendering.com" + dns_max_ttl_seconds = 300 + + avere_first_ip = "10.0.3.50" + avere_ip_addr_count = 3 } terraform { @@ -42,31 +42,31 @@ provider "azurerm" { } module "dnsserver" { - source = "github.com/Azure/Avere/src/terraform/modules/dnsserver" - resource_group_name = local.network_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - ssh_port = local.ssh_port + source = "github.com/Azure/Avere/src/terraform/modules/dnsserver" + resource_group_name = local.network_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = local.vnet_name + virtual_network_subnet_name = local.subnet_name - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = local.vnet_name - virtual_network_subnet_name = local.subnet_name + // this is the address of the unbound dns server + private_ip_address = local.dnsserver_static_ip - // this is the address of the unbound dns server - private_ip_address = local.dnsserver_static_ip + dns_server = local.onprem_dns_servers + avere_first_ip_addr = local.avere_first_ip + avere_ip_addr_count = local.avere_ip_addr_count + avere_filer_fqdn = local.onprem_filer_fqdn - dns_server = local.onprem_dns_servers - avere_first_ip_addr = local.avere_first_ip - avere_ip_addr_count = local.avere_ip_addr_count - avere_filer_fqdn = local.onprem_filer_fqdn - - // set the TTL - dns_max_ttl_seconds = local.dns_max_ttl_seconds + // set the TTL + dns_max_ttl_seconds = local.dns_max_ttl_seconds } output "unbound_dns_server_ip" { value = module.dnsserver.dnsserver_address -} \ No newline at end of file +} diff --git a/src/terraform/examples/hammerspace-multi-region/0.network/fourthregion/main.tf b/src/terraform/examples/hammerspace-multi-region/0.network/fourthregion/main.tf index c405d4cf2..d629e8736 100644 --- a/src/terraform/examples/hammerspace-multi-region/0.network/fourthregion/main.tf +++ b/src/terraform/examples/hammerspace-multi-region/0.network/fourthregion/main.tf @@ -1,28 +1,28 @@ // customize the simple VM by editing the following local variables locals { - location4 = "australiaeast" - - // network details - network_rg4_name = "${local.resource_group_unique_prefix}netregion4" - - // paste the below settings from the output of the 0.network/main.tf - network-region1-vnet_id = "" - network-region1-vnet_name = "" - network-region2-vnet_id = "" - network-region2-vnet_name = "" - network-region3-vnet_id = "" - network-region3-vnet_name = "" - network_rg1_name = "" - network_rg2_name = "" - network_rg3_name = "" - resource_group_unique_prefix = "" - - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [22,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] - dns_servers = null // set this to the dc, for example ["10.0.3.254"] could be use for domain controller + location4 = "australiaeast" + + // network details + network_rg4_name = "${local.resource_group_unique_prefix}netregion4" + + // paste the below settings from the output of the 0.network/main.tf + network-region1-vnet_id = "" + network-region1-vnet_name = "" + network-region2-vnet_id = "" + network-region2-vnet_name = "" + network-region3-vnet_id = "" + network-region3-vnet_name = "" + network_rg1_name = "" + network_rg2_name = "" + network_rg3_name = "" + resource_group_unique_prefix = "" + + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [22, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] + dns_servers = null // set this to the dc, for example ["10.0.3.254"] could be use for domain controller } terraform { @@ -46,20 +46,20 @@ provider "azurerm" { // the render network module "network-region4" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_rg4_name - location = local.location4 - dns_servers = local.dns_servers - - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources - vnet_address_space = "10.3.0.0/16" - subnet_cloud_cache_address_prefix = "10.3.1.0/24" - subnet_cloud_filers_address_prefix = "10.3.2.128/25" - subnet_cloud_filers_ha_address_prefix = "10.3.2.0/25" - subnet_jumpbox_address_prefix = "10.3.3.0/24" - subnet_render_clients1_address_prefix = "10.3.4.0/23" - subnet_render_clients2_address_prefix = "10.3.6.0/23" + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_rg4_name + location = local.location4 + dns_servers = local.dns_servers + + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources + vnet_address_space = "10.3.0.0/16" + subnet_cloud_cache_address_prefix = "10.3.1.0/24" + subnet_cloud_filers_address_prefix = "10.3.2.128/25" + subnet_cloud_filers_ha_address_prefix = "10.3.2.0/25" + subnet_jumpbox_address_prefix = "10.3.3.0/24" + subnet_render_clients1_address_prefix = "10.3.4.0/23" + subnet_render_clients2_address_prefix = "10.3.6.0/23" } resource "azurerm_virtual_network_peering" "p1-4" { @@ -109,7 +109,7 @@ output "location4" { } output "network_rg4_name" { - value = "\"${local.network_rg4_name}\"" + value = "\"${local.network_rg4_name}\"" } output "network-region4-cloud_filers_ha_subnet_name" { @@ -129,5 +129,5 @@ output "network-region4-vnet_id" { } output "resource_group_unique_prefix" { - value = "\"${local.resource_group_unique_prefix}\"" -} \ No newline at end of file + value = "\"${local.resource_group_unique_prefix}\"" +} diff --git a/src/terraform/examples/hammerspace-multi-region/0.network/main.tf b/src/terraform/examples/hammerspace-multi-region/0.network/main.tf index 7ecc298c9..d1d5b14c9 100644 --- a/src/terraform/examples/hammerspace-multi-region/0.network/main.tf +++ b/src/terraform/examples/hammerspace-multi-region/0.network/main.tf @@ -1,23 +1,23 @@ // customize the simple VM by editing the following local variables locals { - // the region2 of the deployment - location1 = "westus2" - location2 = "westus" - location3 = "canadaeast" - - resource_group_unique_prefix = "" - - // network details - network_rg1_name = "${local.resource_group_unique_prefix}netregion1" - network_rg2_name = "${local.resource_group_unique_prefix}netregion2" - network_rg3_name = "${local.resource_group_unique_prefix}netregion3" - - # advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [22,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] - dns_servers = null // set this to the dc, for example ["10.0.3.254"] could be use for domain controller + // the region2 of the deployment + location1 = "westus2" + location2 = "westus" + location3 = "canadaeast" + + resource_group_unique_prefix = "" + + // network details + network_rg1_name = "${local.resource_group_unique_prefix}netregion1" + network_rg2_name = "${local.resource_group_unique_prefix}netregion2" + network_rg3_name = "${local.resource_group_unique_prefix}netregion3" + + # advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [22, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] + dns_servers = null // set this to the dc, for example ["10.0.3.254"] could be use for domain controller } terraform { @@ -43,56 +43,56 @@ provider "azurerm" { // the render network module "network-region1" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_rg1_name - location = local.location1 - dns_servers = local.dns_servers - - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources - vnet_address_space = "10.0.0.0/16" - subnet_cloud_cache_address_prefix = "10.0.1.0/24" - subnet_cloud_filers_address_prefix = "10.0.2.128/25" - subnet_cloud_filers_ha_address_prefix = "10.0.2.0/25" - subnet_jumpbox_address_prefix = "10.0.3.0/24" - subnet_render_clients1_address_prefix = "10.0.4.0/23" - subnet_render_clients2_address_prefix = "10.0.6.0/23" + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_rg1_name + location = local.location1 + dns_servers = local.dns_servers + + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources + vnet_address_space = "10.0.0.0/16" + subnet_cloud_cache_address_prefix = "10.0.1.0/24" + subnet_cloud_filers_address_prefix = "10.0.2.128/25" + subnet_cloud_filers_ha_address_prefix = "10.0.2.0/25" + subnet_jumpbox_address_prefix = "10.0.3.0/24" + subnet_render_clients1_address_prefix = "10.0.4.0/23" + subnet_render_clients2_address_prefix = "10.0.6.0/23" } // the render network module "network-region2" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_rg2_name - location = local.location2 - dns_servers = local.dns_servers - - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources - vnet_address_space = "10.1.0.0/16" - subnet_cloud_cache_address_prefix = "10.1.1.0/24" - subnet_cloud_filers_address_prefix = "10.1.2.128/25" - subnet_cloud_filers_ha_address_prefix = "10.1.2.0/25" - subnet_jumpbox_address_prefix = "10.1.3.0/24" - subnet_render_clients1_address_prefix = "10.1.4.0/23" - subnet_render_clients2_address_prefix = "10.1.6.0/23" + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_rg2_name + location = local.location2 + dns_servers = local.dns_servers + + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources + vnet_address_space = "10.1.0.0/16" + subnet_cloud_cache_address_prefix = "10.1.1.0/24" + subnet_cloud_filers_address_prefix = "10.1.2.128/25" + subnet_cloud_filers_ha_address_prefix = "10.1.2.0/25" + subnet_jumpbox_address_prefix = "10.1.3.0/24" + subnet_render_clients1_address_prefix = "10.1.4.0/23" + subnet_render_clients2_address_prefix = "10.1.6.0/23" } // the render network module "network-region3" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_rg3_name - location = local.location3 - dns_servers = local.dns_servers - - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources - vnet_address_space = "10.2.0.0/16" - subnet_cloud_cache_address_prefix = "10.2.1.0/24" - subnet_cloud_filers_address_prefix = "10.2.2.128/25" - subnet_cloud_filers_ha_address_prefix = "10.2.2.0/25" - subnet_jumpbox_address_prefix = "10.2.3.0/24" - subnet_render_clients1_address_prefix = "10.2.4.0/23" - subnet_render_clients2_address_prefix = "10.2.6.0/23" + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_rg3_name + location = local.location3 + dns_servers = local.dns_servers + + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources + vnet_address_space = "10.2.0.0/16" + subnet_cloud_cache_address_prefix = "10.2.1.0/24" + subnet_cloud_filers_address_prefix = "10.2.2.128/25" + subnet_cloud_filers_ha_address_prefix = "10.2.2.0/25" + subnet_jumpbox_address_prefix = "10.2.3.0/24" + subnet_render_clients1_address_prefix = "10.2.4.0/23" + subnet_render_clients2_address_prefix = "10.2.6.0/23" } resource "azurerm_virtual_network_peering" "p1-2" { @@ -150,11 +150,11 @@ output "location3" { } output "resource_group_unique_prefix" { - value = "\"${local.resource_group_unique_prefix}\"" + value = "\"${local.resource_group_unique_prefix}\"" } output "network_rg1_name" { - value = "\"${local.network_rg1_name}\"" + value = "\"${local.network_rg1_name}\"" } output "network-region1-cloud_filers_ha_subnet_name" { @@ -186,7 +186,7 @@ output "network-region1-vnet_id" { } output "network_rg2_name" { - value = "\"${local.network_rg2_name}\"" + value = "\"${local.network_rg2_name}\"" } output "network-region2-cloud_filers_ha_subnet_name" { @@ -206,7 +206,7 @@ output "network-region2-vnet_id" { } output "network_rg3_name" { - value = "\"${local.network_rg3_name}\"" + value = "\"${local.network_rg3_name}\"" } output "network-region3-cloud_filers_ha_subnet_name" { diff --git a/src/terraform/examples/hammerspace-multi-region/4.windowsclient/main.tf b/src/terraform/examples/hammerspace-multi-region/4.windowsclient/main.tf index 7d5dd4eda..c32119460 100644 --- a/src/terraform/examples/hammerspace-multi-region/4.windowsclient/main.tf +++ b/src/terraform/examples/hammerspace-multi-region/4.windowsclient/main.tf @@ -1,40 +1,40 @@ // customize the simple VM by adjusting the following local variables locals { - resource_group_name = "houdini_vm_rg" - unique_name = "unique" - // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine - vmPrefix = local.unique_name - // paste in the id of the full custom image - source_image_id = "" - // can be any of the following None, Windows_Client and Windows_Server - license_type = "None" - vm_size = "Standard_D4s_v3" - add_public_ip = true - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - - // replace below variables with the infrastructure variables from 0.network - location = "" - vnet_render_clients1_subnet_id = "" - - // update the below with information about the domain - ad_domain = "" // example "rendering.com" - // leave blank to add machine to default location - ou_path = "" - ad_username = "" - ad_password = "" - - // update if you need to change the RDP port - rdp_port = 3389 - - // the following are the arguments to be passed to the custom script - windows_custom_script_arguments = "$arguments = ' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " - - // load the powershell file, you can substitute kv pairs as you need them, but - // use arguments where possible - powershell_script = file("${path.module}/../../setupMachine.ps1") + resource_group_name = "houdini_vm_rg" + unique_name = "unique" + // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine + vmPrefix = local.unique_name + // paste in the id of the full custom image + source_image_id = "" + // can be any of the following None, Windows_Client and Windows_Server + license_type = "None" + vm_size = "Standard_D4s_v3" + add_public_ip = true + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + + // replace below variables with the infrastructure variables from 0.network + location = "" + vnet_render_clients1_subnet_id = "" + + // update the below with information about the domain + ad_domain = "" // example "rendering.com" + // leave blank to add machine to default location + ou_path = "" + ad_username = "" + ad_password = "" + + // update if you need to change the RDP port + rdp_port = 3389 + + // the following are the arguments to be passed to the custom script + windows_custom_script_arguments = "$arguments = ' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " + + // load the powershell file, you can substitute kv pairs as you need them, but + // use arguments where possible + powershell_script = file("${path.module}/../../setupMachine.ps1") } terraform { @@ -57,10 +57,10 @@ resource "azurerm_resource_group" "win" { } resource "azurerm_public_ip" "vm" { - name = "${local.unique_name}-publicip" - location = local.location - resource_group_name = azurerm_resource_group.win.name - allocation_method = "Static" + name = "${local.unique_name}-publicip" + location = local.location + resource_group_name = azurerm_resource_group.win.name + allocation_method = "Static" count = local.add_public_ip ? 1 : 0 } @@ -127,5 +127,5 @@ output "username" { } output "vm_address" { - value = "${local.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = local.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm.ip_configuration[0].private_ip_address +} diff --git a/src/terraform/examples/hammerspace/main.tf b/src/terraform/examples/hammerspace/main.tf index bd1bea7fe..03811596a 100644 --- a/src/terraform/examples/hammerspace/main.tf +++ b/src/terraform/examples/hammerspace/main.tf @@ -1,70 +1,70 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - admin_username = "azureuser" - admin_password = "ReplacePassword$" - - unique_name = "hammerspace1" - hammerspace_image_id = "" - use_highly_available = false - anvil_configuration = local.use_highly_available ? "High Availability" : "Standalone" - - // add a globally uniquename - storage_account_name = "REPLACE_WITH_GLOBALLY_UNIQUE_NAME" - storage_container_name = "hammerspace" - - // virtual network and subnet details - virtual_network_resource_group_name = "network_resource_group" - virtual_network_name = "rendervnet" - ha_subnet_name = "cloud_filers_ha" - data_subnet_name = "cloud_filers" - data_subnet_mask_bits = 25 - anvil_data_cluster_ip = "10.0.2.110" // leave blank to be dynamic - dsx_instance_count = 1 - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes - // vm_size = "Standard_F16s_v2" - // vm_size = "Standard_F32s_v2" - // vm_size = "Standard_F48s_v2" - anvil_instance_type = "Standard_F16s_v2" - // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes - // vm_size = "Standard_F16s_v2" - // vm_size = "Standard_F32s_v2" - // vm_size = "Standard_F48s_v2" - dsx_instance_type = "Standard_F16s_v2" - - // storage_account_type = "Standard_LRS" - // storage_account_type = "StandardSSD_LRS" - storage_account_type = "Premium_LRS" - - // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ - // disk_size_gb = 127 // P10, E10, S10 - metadata_disk_size_gb = 255 // P15, E15, S15 - // disk_size_gb = 511 // P20, E20, S20 - // disk_size_gb = 1023 // P30, E30, S30 - // disk_size_gb = 2047 // P40, E40, S40 - // disk_size_gb = 4095 // P50, E50, S50 - // disk_size_gb = 8191 // P60, E60, S60 - // disk_size_gb = 16383 // P70, E70, S70 - // metadata_disk_size_gb = 32767 // P80, E80, S80 - - // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ - // disk_size_gb = 127 // P10, E10, S10 - // disk_size_gb = 255 // P15, E15, S15 - // disk_size_gb = 511 // P20, E20, S20 - // disk_size_gb = 1023 // P30, E30, S30 - // disk_size_gb = 2047 // P40, E40, S40 - datadisk_size_gb = 4095 // P50, E50, S50 - // disk_size_gb = 8191 // P60, E60, S60 - // disk_size_gb = 16383 // P70, E70, S70 - // data_disk_size_gb = 32767 // P80, E80, S80 - - // the nfs export path exported from hammerspace - nfs_export_path = "/data" + // the region of the deployment + location = "eastus" + admin_username = "azureuser" + admin_password = "ReplacePassword$" + + unique_name = "hammerspace1" + hammerspace_image_id = "" + use_highly_available = false + anvil_configuration = local.use_highly_available ? "High Availability" : "Standalone" + + // add a globally uniquename + storage_account_name = "REPLACE_WITH_GLOBALLY_UNIQUE_NAME" + storage_container_name = "hammerspace" + + // virtual network and subnet details + virtual_network_resource_group_name = "network_resource_group" + virtual_network_name = "rendervnet" + ha_subnet_name = "cloud_filers_ha" + data_subnet_name = "cloud_filers" + data_subnet_mask_bits = 25 + anvil_data_cluster_ip = "10.0.2.110" // leave blank to be dynamic + dsx_instance_count = 1 + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes + // vm_size = "Standard_F16s_v2" + // vm_size = "Standard_F32s_v2" + // vm_size = "Standard_F48s_v2" + anvil_instance_type = "Standard_F16s_v2" + // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes + // vm_size = "Standard_F16s_v2" + // vm_size = "Standard_F32s_v2" + // vm_size = "Standard_F48s_v2" + dsx_instance_type = "Standard_F16s_v2" + + // storage_account_type = "Standard_LRS" + // storage_account_type = "StandardSSD_LRS" + storage_account_type = "Premium_LRS" + + // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ + // disk_size_gb = 127 // P10, E10, S10 + metadata_disk_size_gb = 255 // P15, E15, S15 + // disk_size_gb = 511 // P20, E20, S20 + // disk_size_gb = 1023 // P30, E30, S30 + // disk_size_gb = 2047 // P40, E40, S40 + // disk_size_gb = 4095 // P50, E50, S50 + // disk_size_gb = 8191 // P60, E60, S60 + // disk_size_gb = 16383 // P70, E70, S70 + // metadata_disk_size_gb = 32767 // P80, E80, S80 + + // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ + // disk_size_gb = 127 // P10, E10, S10 + // disk_size_gb = 255 // P15, E15, S15 + // disk_size_gb = 511 // P20, E20, S20 + // disk_size_gb = 1023 // P30, E30, S30 + // disk_size_gb = 2047 // P40, E40, S40 + datadisk_size_gb = 4095 // P50, E50, S50 + // disk_size_gb = 8191 // P60, E60, S60 + // disk_size_gb = 16383 // P70, E70, S70 + // data_disk_size_gb = 32767 // P80, E80, S80 + + // the nfs export path exported from hammerspace + nfs_export_path = "/data" } terraform { @@ -82,8 +82,8 @@ provider "azurerm" { } resource "azurerm_resource_group" "nfsfiler" { - name = local.filer_resource_group_name - location = local.location + name = local.filer_resource_group_name + location = local.location } resource "azurerm_storage_account" "storage" { @@ -103,81 +103,81 @@ resource "azurerm_storage_container" "blob_container" { // the ephemeral filer module "anvil" { - source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - hammerspace_image_id = local.hammerspace_image_id - unique_name = local.unique_name - admin_username = local.admin_username - admin_password = local.admin_password - anvil_configuration = local.anvil_configuration - anvil_instance_type = local.anvil_instance_type - virtual_network_resource_group = local.virtual_network_resource_group_name - virtual_network_name = local.virtual_network_name - virtual_network_ha_subnet_name = local.ha_subnet_name - virtual_network_data_subnet_name = local.data_subnet_name - virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits - anvil_data_cluster_ip = local.anvil_data_cluster_ip - anvil_metadata_disk_storage_type = local.storage_account_type - anvil_metadata_disk_size = local.metadata_disk_size_gb - - module_depends_on = [azurerm_resource_group.nfsfiler.id] + source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + hammerspace_image_id = local.hammerspace_image_id + unique_name = local.unique_name + admin_username = local.admin_username + admin_password = local.admin_password + anvil_configuration = local.anvil_configuration + anvil_instance_type = local.anvil_instance_type + virtual_network_resource_group = local.virtual_network_resource_group_name + virtual_network_name = local.virtual_network_name + virtual_network_ha_subnet_name = local.ha_subnet_name + virtual_network_data_subnet_name = local.data_subnet_name + virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits + anvil_data_cluster_ip = local.anvil_data_cluster_ip + anvil_metadata_disk_storage_type = local.storage_account_type + anvil_metadata_disk_size = local.metadata_disk_size_gb + + module_depends_on = [azurerm_resource_group.nfsfiler.id] } // the ephemeral filer module "dsx" { - source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/dsx" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - hammerspace_image_id = local.hammerspace_image_id - unique_name = local.unique_name - admin_username = local.admin_username - admin_password = local.admin_password - dsx_instance_count = local.dsx_instance_count - dsx_instance_type = local.dsx_instance_type - virtual_network_resource_group = local.virtual_network_resource_group_name - virtual_network_name = local.virtual_network_name - virtual_network_data_subnet_name = local.data_subnet_name - virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits - anvil_password = module.anvil.web_ui_password - anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip - anvil_domain = module.anvil.anvil_domain - dsx_data_disk_storage_type = local.storage_account_type - dsx_data_disk_size = local.datadisk_size_gb + source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/dsx" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + hammerspace_image_id = local.hammerspace_image_id + unique_name = local.unique_name + admin_username = local.admin_username + admin_password = local.admin_password + dsx_instance_count = local.dsx_instance_count + dsx_instance_type = local.dsx_instance_type + virtual_network_resource_group = local.virtual_network_resource_group_name + virtual_network_name = local.virtual_network_name + virtual_network_data_subnet_name = local.data_subnet_name + virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits + anvil_password = module.anvil.web_ui_password + anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip + anvil_domain = module.anvil.anvil_domain + dsx_data_disk_storage_type = local.storage_account_type + dsx_data_disk_size = local.datadisk_size_gb } module "anvil_configure" { - source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil-run-once-configure" - anvil_arm_virtual_machine_id = length(module.anvil.arm_virtual_machine_ids) == 0 ? "" : module.anvil.arm_virtual_machine_ids[0] - anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip - web_ui_password = module.anvil.web_ui_password - dsx_count = local.dsx_instance_count - nfs_export_path = local.nfs_export_path - anvil_hostname = length(module.anvil.anvil_host_names) == 0 ? "" : module.anvil.anvil_host_names[0] - - module_depends_on = module.anvil.module_depends_on_ids + source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil-run-once-configure" + anvil_arm_virtual_machine_id = length(module.anvil.arm_virtual_machine_ids) == 0 ? "" : module.anvil.arm_virtual_machine_ids[0] + anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip + web_ui_password = module.anvil.web_ui_password + dsx_count = local.dsx_instance_count + nfs_export_path = local.nfs_export_path + anvil_hostname = length(module.anvil.anvil_host_names) == 0 ? "" : module.anvil.anvil_host_names[0] + + module_depends_on = module.anvil.module_depends_on_ids } output "hammerspace_username" { - value = module.anvil.admin_username + value = module.anvil.admin_username } output "hammerspace_webui_username" { - value = module.anvil.web_ui_username + value = module.anvil.web_ui_username } output "hammerspace_webui_password" { - value = module.anvil.web_ui_password + value = module.anvil.web_ui_password } output "anvil_data_cluster_ip" { - value = module.anvil.anvil_data_cluster_ip + value = module.anvil.anvil_data_cluster_ip } output "nfs_mountable_ips" { - value = module.dsx.dsx_ip_addresses + value = module.dsx.dsx_ip_addresses } output "nfs_export_path" { - value = local.nfs_export_path -} \ No newline at end of file + value = local.nfs_export_path +} diff --git a/src/terraform/examples/houdinienvironment/0.network/main.tf b/src/terraform/examples/houdinienvironment/0.network/main.tf index f8595641f..fd0d5cfae 100644 --- a/src/terraform/examples/houdinienvironment/0.network/main.tf +++ b/src/terraform/examples/houdinienvironment/0.network/main.tf @@ -1,17 +1,17 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "westus2" - - // network details - network_resource_group_name = "houdini_network_rg" - - # advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [22,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] - dns_servers = null // set this to the dc, for example ["10.0.3.254"] could be use for domain controller + // the region of the deployment + location = "westus2" + + // network details + network_resource_group_name = "houdini_network_rg" + + # advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [22, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] + dns_servers = null // set this to the dc, for example ["10.0.3.254"] could be use for domain controller } terraform { @@ -30,20 +30,20 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location - dns_servers = local.dns_servers - - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources - vnet_address_space = "10.0.0.0/16" - subnet_cloud_cache_address_prefix = "10.0.1.0/24" - subnet_cloud_filers_address_prefix = "10.0.2.0/25" - subnet_cloud_filers_ha_address_prefix = "10.0.2.128/25" - subnet_jumpbox_address_prefix = "10.0.3.0/24" - subnet_render_clients1_address_prefix = "10.0.4.0/23" - subnet_render_clients2_address_prefix = "10.0.6.0/23" + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location + dns_servers = local.dns_servers + + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources + vnet_address_space = "10.0.0.0/16" + subnet_cloud_cache_address_prefix = "10.0.1.0/24" + subnet_cloud_filers_address_prefix = "10.0.2.0/25" + subnet_cloud_filers_ha_address_prefix = "10.0.2.128/25" + subnet_jumpbox_address_prefix = "10.0.3.0/24" + subnet_render_clients1_address_prefix = "10.0.4.0/23" + subnet_render_clients2_address_prefix = "10.0.6.0/23" } output "location" { diff --git a/src/terraform/examples/houdinienvironment/1.storage/blobstorage/main.tf b/src/terraform/examples/houdinienvironment/1.storage/blobstorage/main.tf index d74828ec3..215766dd7 100644 --- a/src/terraform/examples/houdinienvironment/1.storage/blobstorage/main.tf +++ b/src/terraform/examples/houdinienvironment/1.storage/blobstorage/main.tf @@ -1,18 +1,18 @@ // customize the simple VM by editing the following local variables locals { - // storage details - storage_resource_group_name = "houdini_storage_rg" - storage_account_name = "houdinistgacct" + // storage details + storage_resource_group_name = "houdini_storage_rg" + storage_account_name = "houdinistgacct" - // replace below variables with the infrastructure variables from 1.base_infrastructure - location = "" - vnet_cloud_cache_subnet_id = "" - vnet_cloud_cache_subnet_name = "" - vnet_jumpbox_subnet_id = "" - vnet_jumpbox_subnet_name = "" - vnet_name = "" - vnet_render_clients1_subnet_id = "" - vnet_resource_group = "" + // replace below variables with the infrastructure variables from 1.base_infrastructure + location = "" + vnet_cloud_cache_subnet_id = "" + vnet_cloud_cache_subnet_name = "" + vnet_jumpbox_subnet_id = "" + vnet_jumpbox_subnet_name = "" + vnet_name = "" + vnet_render_clients1_subnet_id = "" + vnet_resource_group = "" } terraform { @@ -41,12 +41,12 @@ resource "azurerm_storage_account" "storage" { account_tier = "Standard" account_replication_type = "LRS" network_rules { - virtual_network_subnet_ids = [ - local.vnet_cloud_cache_subnet_id, - // need for the controller to create the container - local.vnet_jumpbox_subnet_id, - ] - default_action = "Deny" + virtual_network_subnet_ids = [ + local.vnet_cloud_cache_subnet_id, + // need for the controller to create the container + local.vnet_jumpbox_subnet_id, + ] + default_action = "Deny" } } diff --git a/src/terraform/examples/houdinienvironment/1.storage/nfsfiler/main.tf b/src/terraform/examples/houdinienvironment/1.storage/nfsfiler/main.tf index ad2132d2f..cc729c16d 100644 --- a/src/terraform/examples/houdinienvironment/1.storage/nfsfiler/main.tf +++ b/src/terraform/examples/houdinienvironment/1.storage/nfsfiler/main.tf @@ -1,29 +1,29 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + // the region of the deployment + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - // nfs filer details - storage_resource_group_name = "houdini_storage_rg" - // more filer sizes listed at https://github.com/Azure/Avere/tree/main/src/terraform/modules/nfs_filer - filer_size = "Standard_D2s_v3" + // nfs filer details + storage_resource_group_name = "houdini_storage_rg" + // more filer sizes listed at https://github.com/Azure/Avere/tree/main/src/terraform/modules/nfs_filer + filer_size = "Standard_D2s_v3" - // replace below variables with the infrastructure variables from 1.base_infrastructure - location = "" - vnet_cloud_cache_subnet_id = "" - vnet_cloud_cache_subnet_name = "" - vnet_cloud_filers_subnet_name = "" - vnet_jumpbox_subnet_id = "" - vnet_jumpbox_subnet_name = "" - vnet_name = "" - vnet_render_clients1_subnet_id = "" - vnet_resource_group = "" + // replace below variables with the infrastructure variables from 1.base_infrastructure + location = "" + vnet_cloud_cache_subnet_id = "" + vnet_cloud_cache_subnet_name = "" + vnet_cloud_filers_subnet_name = "" + vnet_jumpbox_subnet_id = "" + vnet_jumpbox_subnet_name = "" + vnet_name = "" + vnet_render_clients1_subnet_id = "" + vnet_resource_group = "" } terraform { @@ -47,19 +47,19 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = local.filer_size - unique_name = "nasfiler1" + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = local.filer_size + unique_name = "nasfiler1" - // network details - virtual_network_resource_group = local.vnet_resource_group - virtual_network_name = local.vnet_name - virtual_network_subnet_name = local.vnet_cloud_filers_subnet_name + // network details + virtual_network_resource_group = local.vnet_resource_group + virtual_network_name = local.vnet_name + virtual_network_subnet_name = local.vnet_cloud_filers_subnet_name } output "filer_username" { diff --git a/src/terraform/examples/houdinienvironment/2.windowsstockvm/main.tf b/src/terraform/examples/houdinienvironment/2.windowsstockvm/main.tf index c65307fed..3bc6fd233 100644 --- a/src/terraform/examples/houdinienvironment/2.windowsstockvm/main.tf +++ b/src/terraform/examples/houdinienvironment/2.windowsstockvm/main.tf @@ -1,8 +1,8 @@ // customize the simple VM by adjusting the following local variables locals { resource_group_name = "houdini_windows_vms_rg" - unique_name = "unique" - vm_size = "Standard_D4s_v3" + unique_name = "unique" + vm_size = "Standard_D4s_v3" # choose one of the following windows versions source_image_reference = local.windows_server_2016 #source_image_reference = local.windows_server_2019 @@ -12,7 +12,7 @@ locals { license_type = "None" # license_type = "Windows_Client" # license_type = "Windows_Server" - add_public_ip = true + add_public_ip = true vm_admin_username = "azureuser" // use either SSH Key data or admin password, if ssh_key_data is specified // then admin_password is ignored @@ -20,20 +20,20 @@ locals { // network, set static and IP if using a DC use_static_private_ip_address = false - private_ip_address = "" // for example "10.0.3.254" could be use for domain controller + private_ip_address = "" // for example "10.0.3.254" could be use for domain controller // replace below variables with the infrastructure variables from 0.network - location = "" + location = "" vnet_render_clients1_subnet_id = "" - + // replace below variables with the cache variables from the nfs filer or cache (in case of blob backed storage) mount_addresses = [] - mount_path = "" + mount_path = "" // advanced scenarios: the below variables rarely need to change mount_address_csv = join(",", tolist(local.mount_addresses)) - target_path = "c:\\\\cloudcache" - rdp_port = 3389 + target_path = "c:\\\\cloudcache" + rdp_port = 3389 // the following are the arguments to be passed to the custom script windows_custom_script_arguments = "$arguments = ' -MountAddressesCSV ''${local.mount_address_csv}'' -MountPath ''${local.mount_path}'' -TargetPath ''${local.target_path}'' -RDPPort ${local.rdp_port} ' ; " @@ -62,7 +62,7 @@ locals { offer = "Windows-10" sku = "20h2-pro" #sku = "20h1-entn" // uncomment for 2004 - version = "latest" + version = "latest" } } terraform { @@ -85,10 +85,10 @@ resource "azurerm_resource_group" "win" { } resource "azurerm_public_ip" "vm" { - name = "${local.unique_name}-publicip" - location = local.location - resource_group_name = azurerm_resource_group.win.name - allocation_method = "Static" + name = "${local.unique_name}-publicip" + location = local.location + resource_group_name = azurerm_resource_group.win.name + allocation_method = "Static" count = local.add_public_ip ? 1 : 0 } @@ -118,7 +118,7 @@ resource "azurerm_windows_virtual_machine" "vm" { size = local.vm_size network_interface_ids = [azurerm_network_interface.vm.id] license_type = local.license_type - + os_disk { name = "${local.unique_name}-osdisk" caching = "ReadWrite" @@ -162,5 +162,5 @@ output "username" { } output "vm_address" { - value = "${local.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = local.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm.ip_configuration[0].private_ip_address +} diff --git a/src/terraform/examples/houdinienvironment/4.rendernodes/vm/main.tf b/src/terraform/examples/houdinienvironment/4.rendernodes/vm/main.tf index 7d5dd4eda..c32119460 100644 --- a/src/terraform/examples/houdinienvironment/4.rendernodes/vm/main.tf +++ b/src/terraform/examples/houdinienvironment/4.rendernodes/vm/main.tf @@ -1,40 +1,40 @@ // customize the simple VM by adjusting the following local variables locals { - resource_group_name = "houdini_vm_rg" - unique_name = "unique" - // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine - vmPrefix = local.unique_name - // paste in the id of the full custom image - source_image_id = "" - // can be any of the following None, Windows_Client and Windows_Server - license_type = "None" - vm_size = "Standard_D4s_v3" - add_public_ip = true - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - - // replace below variables with the infrastructure variables from 0.network - location = "" - vnet_render_clients1_subnet_id = "" - - // update the below with information about the domain - ad_domain = "" // example "rendering.com" - // leave blank to add machine to default location - ou_path = "" - ad_username = "" - ad_password = "" - - // update if you need to change the RDP port - rdp_port = 3389 - - // the following are the arguments to be passed to the custom script - windows_custom_script_arguments = "$arguments = ' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " - - // load the powershell file, you can substitute kv pairs as you need them, but - // use arguments where possible - powershell_script = file("${path.module}/../../setupMachine.ps1") + resource_group_name = "houdini_vm_rg" + unique_name = "unique" + // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine + vmPrefix = local.unique_name + // paste in the id of the full custom image + source_image_id = "" + // can be any of the following None, Windows_Client and Windows_Server + license_type = "None" + vm_size = "Standard_D4s_v3" + add_public_ip = true + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + + // replace below variables with the infrastructure variables from 0.network + location = "" + vnet_render_clients1_subnet_id = "" + + // update the below with information about the domain + ad_domain = "" // example "rendering.com" + // leave blank to add machine to default location + ou_path = "" + ad_username = "" + ad_password = "" + + // update if you need to change the RDP port + rdp_port = 3389 + + // the following are the arguments to be passed to the custom script + windows_custom_script_arguments = "$arguments = ' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " + + // load the powershell file, you can substitute kv pairs as you need them, but + // use arguments where possible + powershell_script = file("${path.module}/../../setupMachine.ps1") } terraform { @@ -57,10 +57,10 @@ resource "azurerm_resource_group" "win" { } resource "azurerm_public_ip" "vm" { - name = "${local.unique_name}-publicip" - location = local.location - resource_group_name = azurerm_resource_group.win.name - allocation_method = "Static" + name = "${local.unique_name}-publicip" + location = local.location + resource_group_name = azurerm_resource_group.win.name + allocation_method = "Static" count = local.add_public_ip ? 1 : 0 } @@ -127,5 +127,5 @@ output "username" { } output "vm_address" { - value = "${local.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = local.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm.ip_configuration[0].private_ip_address +} diff --git a/src/terraform/examples/houdinienvironment/4.rendernodes/vmss/main.tf b/src/terraform/examples/houdinienvironment/4.rendernodes/vmss/main.tf index 0c66216ea..4b81de163 100644 --- a/src/terraform/examples/houdinienvironment/4.rendernodes/vmss/main.tf +++ b/src/terraform/examples/houdinienvironment/4.rendernodes/vmss/main.tf @@ -1,46 +1,46 @@ // customize the VMSS by editing the following local variables locals { - vmss_resource_group_name = "houdini_vmss_rg" - unique_name = "unique" - // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine - vmPrefix = local.unique_name - // paste in the id of the full custom image - source_image_id = "" - // can be any of the following None, Windows_Client and Windows_Server - license_type = "None" - vm_count = 2 - vmss_size = "Standard_D4s_v3" - // Specify to use 'Regular' or 'Low' - vmss_priority = "Regular" - // Only used if "Low" is set. Specify "Delete" or "Deallocate" - vmss_spot_eviction_policy = "Delete" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - - // replace below variables with the infrastructure variables from 0.network - location = "" - vnet_render_clients1_subnet_id = "" - - // update the below with information about the domain - ad_domain = "" // example "rendering.com" - // leave blank to add machine to default location - ou_path = "" - ad_username = "" - ad_password = "" - - // update if you need to change the RDP port - rdp_port = 3389 - - nssm_path = "https://nssm.cc/release/nssm-2.24.zip" - - // the following are the arguments to be passed to the custom script - windows_custom_script_arguments = "$arguments = ' -NSSMPath ''${local.nssm_path}'' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " - - // load the powershell file, you can substitute kv pairs as you need them, but - // use arguments where possible - powershell_script = file("${path.module}/../../setupMachine.ps1") + vmss_resource_group_name = "houdini_vmss_rg" + unique_name = "unique" + // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine + vmPrefix = local.unique_name + // paste in the id of the full custom image + source_image_id = "" + // can be any of the following None, Windows_Client and Windows_Server + license_type = "None" + vm_count = 2 + vmss_size = "Standard_D4s_v3" + // Specify to use 'Regular' or 'Low' + vmss_priority = "Regular" + // Only used if "Low" is set. Specify "Delete" or "Deallocate" + vmss_spot_eviction_policy = "Delete" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + + // replace below variables with the infrastructure variables from 0.network + location = "" + vnet_render_clients1_subnet_id = "" + + // update the below with information about the domain + ad_domain = "" // example "rendering.com" + // leave blank to add machine to default location + ou_path = "" + ad_username = "" + ad_password = "" + + // update if you need to change the RDP port + rdp_port = 3389 + + nssm_path = "https://nssm.cc/release/nssm-2.24.zip" + + // the following are the arguments to be passed to the custom script + windows_custom_script_arguments = "$arguments = ' -NSSMPath ''${local.nssm_path}'' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " + + // load the powershell file, you can substitute kv pairs as you need them, but + // use arguments where possible + powershell_script = file("${path.module}/../../setupMachine.ps1") } terraform { @@ -78,16 +78,16 @@ resource "azurerm_virtual_machine_scale_set" "vmss" { priority = local.vmss_priority eviction_policy = local.vmss_priority == "Low" ? local.vmss_spot_eviction_policy : null // avoid overprovision as it can create race conditions with render managers - overprovision = false + overprovision = false // avoid use of zones so you get maximum spread of machines, and have > 100 nodes single_placement_group = false sku { - name = local.vmss_size - tier = "Standard" + name = local.vmss_size + tier = "Standard" capacity = local.vm_count } - + os_profile { computer_name_prefix = local.unique_name admin_username = local.vm_admin_username @@ -99,7 +99,7 @@ resource "azurerm_virtual_machine_scale_set" "vmss" { id = local.source_image_id } - storage_profile_os_disk { + storage_profile_os_disk { caching = "ReadWrite" managed_disk_type = "Standard_LRS" create_option = "FromImage" @@ -146,9 +146,9 @@ output "vmss_name" { } output "vmss_addresses_command" { - // local-exec doesn't return output, and the only way to - // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 - // in the meantime just provide the az cli command to - // the customer - value = "az vmss nic list -g ${azurerm_resource_group.vmss.name} --vmss-name ${azurerm_virtual_machine_scale_set.vmss.name} --query \"[].ipConfigurations[].privateIpAddress\"" -} \ No newline at end of file + // local-exec doesn't return output, and the only way to + // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 + // in the meantime just provide the az cli command to + // the customer + value = "az vmss nic list -g ${azurerm_resource_group.vmss.name} --vmss-name ${azurerm_virtual_machine_scale_set.vmss.name} --query \"[].ipConfigurations[].privateIpAddress\"" +} diff --git a/src/terraform/examples/houdinienvironment/4.rendernodes/vmssephemeral/main.tf b/src/terraform/examples/houdinienvironment/4.rendernodes/vmssephemeral/main.tf index 7735f9dae..7a5ead2a0 100644 --- a/src/terraform/examples/houdinienvironment/4.rendernodes/vmssephemeral/main.tf +++ b/src/terraform/examples/houdinienvironment/4.rendernodes/vmssephemeral/main.tf @@ -1,49 +1,49 @@ // customize the VMSS by editing the following local variables locals { - vmss_resource_group_name = "houdini_vmss_rg" - unique_name = "unique" - // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine - vmPrefix = local.unique_name - // paste in the id of the full custom image - source_image_id = "" - // can be any of the following None, Windows_Client and Windows_Server - license_type = "None" - vm_count = 2 - vmss_size = "Standard_D8s_v3" - // Specify to use 'Regular' or 'Spot' - vmss_priority = "Spot" - // Only used if "SPOT" is set. Specify "Delete" or "Deallocate" - vmss_spot_eviction_policy = "Delete" - use_ephemeral_os_disk = true - // customize the os disk size if needed - os_disk_size_gb = null - // Standard_LRS, StandardSSD_LRS, Premium_LRS and UltraSSD_LRS - managed_disk_type = "Standard_LRS" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - - // replace below variables with the infrastructure variables from 0.network - location = "" - vnet_render_clients1_subnet_id = "" - - // update the below with information about the domain - ad_domain = "" // example "rendering.com" - // leave blank to add machine to default location - ou_path = "" - ad_username = "" - ad_password = "" - - // update if you need to change the RDP port - rdp_port = 3389 - - // the following are the arguments to be passed to the custom script - windows_custom_script_arguments = "$arguments = ' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " - - // load the powershell file, you can substitute kv pairs as you need them, but - // use arguments where possible - powershell_script = file("${path.module}/../../setupMachine.ps1") + vmss_resource_group_name = "houdini_vmss_rg" + unique_name = "unique" + // leave blank to not rename VM, otherwise it will be named "VMPREFIX-OCTET3-OCTET4" where the octets are from the IPv4 address of the machine + vmPrefix = local.unique_name + // paste in the id of the full custom image + source_image_id = "" + // can be any of the following None, Windows_Client and Windows_Server + license_type = "None" + vm_count = 2 + vmss_size = "Standard_D8s_v3" + // Specify to use 'Regular' or 'Spot' + vmss_priority = "Spot" + // Only used if "SPOT" is set. Specify "Delete" or "Deallocate" + vmss_spot_eviction_policy = "Delete" + use_ephemeral_os_disk = true + // customize the os disk size if needed + os_disk_size_gb = null + // Standard_LRS, StandardSSD_LRS, Premium_LRS and UltraSSD_LRS + managed_disk_type = "Standard_LRS" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + + // replace below variables with the infrastructure variables from 0.network + location = "" + vnet_render_clients1_subnet_id = "" + + // update the below with information about the domain + ad_domain = "" // example "rendering.com" + // leave blank to add machine to default location + ou_path = "" + ad_username = "" + ad_password = "" + + // update if you need to change the RDP port + rdp_port = 3389 + + // the following are the arguments to be passed to the custom script + windows_custom_script_arguments = "$arguments = ' -RenameVMPrefix ''${local.vmPrefix}'' -ADDomain ''${local.ad_domain}'' -OUPath ''${local.ou_path}'' ''${local.ad_username}'' -DomainPassword ''${local.ad_password}'' -RDPPort ${local.rdp_port} ' ; " + + // load the powershell file, you can substitute kv pairs as you need them, but + // use arguments where possible + powershell_script = file("${path.module}/../../setupMachine.ps1") } terraform { @@ -78,27 +78,27 @@ resource "azurerm_windows_virtual_machine_scale_set" "vmss" { resource_group_name = azurerm_resource_group.vmss.name location = azurerm_resource_group.vmss.location - sku = local.vmss_size - instances = local.vm_count - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password + sku = local.vmss_size + instances = local.vm_count + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password - custom_data = base64gzip(local.powershell_script) - source_image_id = local.source_image_id + custom_data = base64gzip(local.powershell_script) + source_image_id = local.source_image_id # use low-priority with Delete. Stop Deallocate will be incompatible with OS Ephemeral disks - priority = local.vmss_priority - eviction_policy = local.vmss_priority == "Spot" ? local.vmss_spot_eviction_policy : null + priority = local.vmss_priority + eviction_policy = local.vmss_priority == "Spot" ? local.vmss_spot_eviction_policy : null // avoid overprovision as it can create race conditions with render managers - overprovision = false + overprovision = false // avoid use of zones so you get maximum spread of machines, and have > 100 nodes single_placement_group = false // avoid use of zones so you get maximum spread of machines zone_balance = false - zones = [] + zones = [] // avoid use proximity groups so you get maximum spread of machines // proximity_placement_group_id - + os_disk { storage_account_type = local.managed_disk_type caching = local.use_ephemeral_os_disk == true ? "ReadOnly" : "ReadWrite" @@ -107,14 +107,14 @@ resource "azurerm_windows_virtual_machine_scale_set" "vmss" { dynamic "diff_disk_settings" { for_each = local.use_ephemeral_os_disk == true ? [local.use_ephemeral_os_disk] : [] content { - option = "Local" + option = "Local" } } } network_interface { - name = "vminic-${local.unique_name}" - primary = true + name = "vminic-${local.unique_name}" + primary = true enable_accelerated_networking = false ip_configuration { @@ -154,9 +154,9 @@ output "vmss_name" { } output "vmss_addresses_command" { - // local-exec doesn't return output, and the only way to - // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 - // in the meantime just provide the az cli command to - // the customer - value = "az vmss nic list -g ${azurerm_resource_group.vmss.name} --vmss-name ${azurerm_windows_virtual_machine_scale_set.vmss.name} --query \"[].ipConfigurations[].privateIpAddress\"" -} \ No newline at end of file + // local-exec doesn't return output, and the only way to + // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 + // in the meantime just provide the az cli command to + // the customer + value = "az vmss nic list -g ${azurerm_resource_group.vmss.name} --vmss-name ${azurerm_windows_virtual_machine_scale_set.vmss.name} --query \"[].ipConfigurations[].privateIpAddress\"" +} diff --git a/src/terraform/examples/nfsfilerganesha/main.tf b/src/terraform/examples/nfsfilerganesha/main.tf index 80f935852..ae153c08d 100644 --- a/src/terraform/examples/nfsfilerganesha/main.tf +++ b/src/terraform/examples/nfsfilerganesha/main.tf @@ -1,57 +1,57 @@ variable "offline_mode" { description = "Toggles the offline mode where offline mode destroys the VM and moves the disk to cool storage." - default = false + default = false } // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - unique_name = "cloudnfsfiler" - - // virtual network and subnet details - network_resource_group_name = "network_resource_group" - virtual_network_name = "rendervnet" - subnet_name = "cloud_filers" - filer_private_ip_address = null - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes - // vm_size = "Standard_F16s_v2" - // vm_size = "Standard_F32s_v2" - // vm_size = "Standard_F48s_v2" - vm_size = "Standard_DS14_v2" - - // storage_account_type = "Standard_LRS" - // storage_account_type = "StandardSSD_LRS" - storage_account_type = "Premium_LRS" - - // set to true to preserve the disk, but destroy the VM - offline_mode = var.offline_mode - offline_storage_account_type = "Standard_LRS" - - // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ - // disk_size_gb = 127 // P10, E10, S10 - // disk_size_gb = 255 // P15, E15, S15 - // disk_size_gb = 511 // P20, E20, S20 - // disk_size_gb = 1023 // P30, E30, S30 - // disk_size_gb = 2047 // P40, E40, S40 - // disk_size_gb = 4095 // P50, E50, S50 - // disk_size_gb = 8191 // P60, E60, S60 - // disk_size_gb = 16383 // P70, E70, S70 - disk_size_gb = 32767 // P80, E80, S80 - nfs_export_path = "/data" - caching = local.disk_size_gb > 4095 ? "None" : "ReadWrite" + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + unique_name = "cloudnfsfiler" + + // virtual network and subnet details + network_resource_group_name = "network_resource_group" + virtual_network_name = "rendervnet" + subnet_name = "cloud_filers" + filer_private_ip_address = null + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes + // vm_size = "Standard_F16s_v2" + // vm_size = "Standard_F32s_v2" + // vm_size = "Standard_F48s_v2" + vm_size = "Standard_DS14_v2" + + // storage_account_type = "Standard_LRS" + // storage_account_type = "StandardSSD_LRS" + storage_account_type = "Premium_LRS" + + // set to true to preserve the disk, but destroy the VM + offline_mode = var.offline_mode + offline_storage_account_type = "Standard_LRS" + + // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ + // disk_size_gb = 127 // P10, E10, S10 + // disk_size_gb = 255 // P15, E15, S15 + // disk_size_gb = 511 // P20, E20, S20 + // disk_size_gb = 1023 // P30, E30, S30 + // disk_size_gb = 2047 // P40, E40, S40 + // disk_size_gb = 4095 // P50, E50, S50 + // disk_size_gb = 8191 // P60, E60, S60 + // disk_size_gb = 16383 // P70, E70, S70 + disk_size_gb = 32767 // P80, E80, S80 + nfs_export_path = "/data" + caching = local.disk_size_gb > 4095 ? "None" : "ReadWrite" } terraform { @@ -69,56 +69,56 @@ provider "azurerm" { } resource "azurerm_resource_group" "nfsfiler" { - name = local.filer_resource_group_name - location = local.location + name = local.filer_resource_group_name + location = local.location } resource "azurerm_managed_disk" "nfsfiler" { - name = "${local.unique_name}-disk1" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - storage_account_type = local.offline_mode ? local.offline_storage_account_type : local.storage_account_type - create_option = "Empty" - disk_size_gb = local.disk_size_gb + name = "${local.unique_name}-disk1" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + storage_account_type = local.offline_mode ? local.offline_storage_account_type : local.storage_account_type + create_option = "Empty" + disk_size_gb = local.disk_size_gb } // the ephemeral filer module "nfsfiler" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer_ganesha" - deploy_vm = !local.offline_mode - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = local.vm_size - unique_name = local.unique_name - caching = local.caching - enable_root_login = false - deploy_diagnostic_tools = false - - // disk and export details - managed_disk_id = azurerm_managed_disk.nfsfiler.id - nfs_export_path = local.nfs_export_path - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = local.virtual_network_name - virtual_network_subnet_name = local.subnet_name - private_ip_address = local.filer_private_ip_address + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer_ganesha" + deploy_vm = !local.offline_mode + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = local.vm_size + unique_name = local.unique_name + caching = local.caching + enable_root_login = false + deploy_diagnostic_tools = false + + // disk and export details + managed_disk_id = azurerm_managed_disk.nfsfiler.id + nfs_export_path = local.nfs_export_path + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = local.virtual_network_name + virtual_network_subnet_name = local.subnet_name + private_ip_address = local.filer_private_ip_address } output "nfsfiler_username" { - value = module.nfsfiler.admin_username + value = module.nfsfiler.admin_username } output "nfsfiler_address" { - value = module.nfsfiler.primary_ip + value = module.nfsfiler.primary_ip } output "ssh_string" { - value = module.nfsfiler.ssh_string + value = module.nfsfiler.ssh_string } output "list_disks_az_cli" { - value = "az disk list --query \"[?resourceGroup=='${upper(azurerm_resource_group.nfsfiler.name)}'].id\"" -} \ No newline at end of file + value = "az disk list --query \"[?resourceGroup=='${upper(azurerm_resource_group.nfsfiler.name)}'].id\"" +} diff --git a/src/terraform/examples/nfsfilermd/main.tf b/src/terraform/examples/nfsfilermd/main.tf index e3c84e10f..f1f2818f9 100644 --- a/src/terraform/examples/nfsfilermd/main.tf +++ b/src/terraform/examples/nfsfilermd/main.tf @@ -1,58 +1,58 @@ variable "offline_mode" { description = "Toggles the offline mode where offline mode destroys the VM and moves the disk to cool storage." - default = false + default = false } // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - unique_name = "cloudnfsfiler" - - // virtual network and subnet details - network_resource_group_name = "network_resource_group" - virtual_network_name = "rendervnet" - subnet_name = "cloud_filers" - filer_private_ip_address = null - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes - // vm_size = "Standard_F16s_v2" - // vm_size = "Standard_F32s_v2" - // vm_size = "Standard_F48s_v2" - vm_size = "Standard_DS14_v2" - - // storage_account_type = "Standard_LRS" - // storage_account_type = "StandardSSD_LRS" - storage_account_type = "Premium_LRS" - - // set to true to preserve the disk, but destroy the VM - offline_mode = var.offline_mode - offline_storage_account_type = "Standard_LRS" - - // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ - // disk_size_gb = 127 // P10, E10, S10 - // disk_size_gb = 255 // P15, E15, S15 - // disk_size_gb = 511 // P20, E20, S20 - // disk_size_gb = 1023 // P30, E30, S30 - // disk_size_gb = 2047 // P40, E40, S40 - // disk_size_gb = 4095 // P50, E50, S50 - // disk_size_gb = 8191 // P60, E60, S60 - // disk_size_gb = 16383 // P70, E70, S70 - disk_size_gb = 32767 // P80, E80, S80 - nfs_export_path = "/data" - nfs_export_options = "*(rw,sync,no_root_squash)" - caching = local.disk_size_gb > 4095 ? "None" : "ReadWrite" + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + unique_name = "cloudnfsfiler" + + // virtual network and subnet details + network_resource_group_name = "network_resource_group" + virtual_network_name = "rendervnet" + subnet_name = "cloud_filers" + filer_private_ip_address = null + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes + // vm_size = "Standard_F16s_v2" + // vm_size = "Standard_F32s_v2" + // vm_size = "Standard_F48s_v2" + vm_size = "Standard_DS14_v2" + + // storage_account_type = "Standard_LRS" + // storage_account_type = "StandardSSD_LRS" + storage_account_type = "Premium_LRS" + + // set to true to preserve the disk, but destroy the VM + offline_mode = var.offline_mode + offline_storage_account_type = "Standard_LRS" + + // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ + // disk_size_gb = 127 // P10, E10, S10 + // disk_size_gb = 255 // P15, E15, S15 + // disk_size_gb = 511 // P20, E20, S20 + // disk_size_gb = 1023 // P30, E30, S30 + // disk_size_gb = 2047 // P40, E40, S40 + // disk_size_gb = 4095 // P50, E50, S50 + // disk_size_gb = 8191 // P60, E60, S60 + // disk_size_gb = 16383 // P70, E70, S70 + disk_size_gb = 32767 // P80, E80, S80 + nfs_export_path = "/data" + nfs_export_options = "*(rw,sync,no_root_squash)" + caching = local.disk_size_gb > 4095 ? "None" : "ReadWrite" } terraform { @@ -70,57 +70,57 @@ provider "azurerm" { } resource "azurerm_resource_group" "nfsfiler" { - name = local.filer_resource_group_name - location = local.location + name = local.filer_resource_group_name + location = local.location } resource "azurerm_managed_disk" "nfsfiler" { - name = "${local.unique_name}-disk1" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - storage_account_type = local.offline_mode ? local.offline_storage_account_type : local.storage_account_type - create_option = "Empty" - disk_size_gb = local.disk_size_gb + name = "${local.unique_name}-disk1" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + storage_account_type = local.offline_mode ? local.offline_storage_account_type : local.storage_account_type + create_option = "Empty" + disk_size_gb = local.disk_size_gb } // the ephemeral filer module "nfsfiler" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer_md" - deploy_vm = !local.offline_mode - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = local.vm_size - unique_name = local.unique_name - caching = local.caching - enable_root_login = false - deploy_diagnostic_tools = false - - // disk and export details - managed_disk_id = azurerm_managed_disk.nfsfiler.id - nfs_export_path = local.nfs_export_path - nfs_export_options = local.nfs_export_options - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = local.virtual_network_name - virtual_network_subnet_name = local.subnet_name - private_ip_address = local.filer_private_ip_address + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer_md" + deploy_vm = !local.offline_mode + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = local.vm_size + unique_name = local.unique_name + caching = local.caching + enable_root_login = false + deploy_diagnostic_tools = false + + // disk and export details + managed_disk_id = azurerm_managed_disk.nfsfiler.id + nfs_export_path = local.nfs_export_path + nfs_export_options = local.nfs_export_options + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = local.virtual_network_name + virtual_network_subnet_name = local.subnet_name + private_ip_address = local.filer_private_ip_address } output "nfsfiler_username" { - value = module.nfsfiler.admin_username + value = module.nfsfiler.admin_username } output "nfsfiler_address" { - value = module.nfsfiler.primary_ip + value = module.nfsfiler.primary_ip } output "ssh_string" { - value = module.nfsfiler.ssh_string + value = module.nfsfiler.ssh_string } output "list_disks_az_cli" { - value = "az disk list --query \"[?resourceGroup=='${upper(azurerm_resource_group.nfsfiler.name)}'].id\"" -} \ No newline at end of file + value = "az disk list --query \"[?resourceGroup=='${upper(azurerm_resource_group.nfsfiler.name)}'].id\"" +} diff --git a/src/terraform/examples/securedimage/main.tf b/src/terraform/examples/securedimage/main.tf index 0b590f7fb..db5840b0b 100644 --- a/src/terraform/examples/securedimage/main.tf +++ b/src/terraform/examples/securedimage/main.tf @@ -1,25 +1,25 @@ // customize the Secured VM by adjusting the following local variables locals { - // The secure channel consists of SSH from a single Source IP Address - // If you do not have VPN or express route, get your external IP address - // from http://www.myipaddress.com/ - source_ssh_ip_address = "169.254.169.254" - - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - - // per ISE, only SSH keys and not passwords may be used - vm_ssh_key_data = "ssh-rsa AAAAB3...." - resource_group_name = "resource_group" - vm_size = "Standard_D2s_v3" - // this value for OS Disk resize must be between 20GB and 1023GB, - // after this you will need to repartition the disk - os_disk_size_gb = 128 - - // the below is the resource group and name of the previously created custom image - image_resource_group = "image_resource_group" - image_name = "image_name" + // The secure channel consists of SSH from a single Source IP Address + // If you do not have VPN or express route, get your external IP address + // from http://www.myipaddress.com/ + source_ssh_ip_address = "169.254.169.254" + + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + + // per ISE, only SSH keys and not passwords may be used + vm_ssh_key_data = "ssh-rsa AAAAB3...." + resource_group_name = "resource_group" + vm_size = "Standard_D2s_v3" + // this value for OS Disk resize must be between 20GB and 1023GB, + // after this you will need to repartition the disk + os_disk_size_gb = 128 + + // the below is the resource group and name of the previously created custom image + image_resource_group = "image_resource_group" + image_name = "image_name" } terraform { @@ -37,70 +37,70 @@ provider "azurerm" { } data "azurerm_resource_group" "main" { - name = local.resource_group_name + name = local.resource_group_name } data "azurerm_image" "custom_image" { - name = local.image_name - resource_group_name = local.image_resource_group + name = local.image_name + resource_group_name = local.image_resource_group } resource "azurerm_network_security_group" "ssh_nsg" { - name = "ssh_nsg" - location = data.azurerm_resource_group.main.location - resource_group_name = data.azurerm_resource_group.main.name - - // the following security rule only allows incoming traffic from the source ip - // address. - // As machines are added to this VNET, a rule that allows VNET to VNET - // could be added for VMs to communicate with each other. - security_rule { - name = "SSH" - // priorities are between 100 and 4096 an may not overlap. - // A priority of 100 ensures this rule is hit first. - priority = 100 // priorities are between 100 and 4096 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "22" - source_address_prefix = "${local.source_ssh_ip_address}/32" - destination_address_prefix = "*" - } + name = "ssh_nsg" + location = data.azurerm_resource_group.main.location + resource_group_name = data.azurerm_resource_group.main.name - security_rule { - name = "noinbound" - priority = 101 // priorities are between 100 and 4096 - direction = "Inbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + // the following security rule only allows incoming traffic from the source ip + // address. + // As machines are added to this VNET, a rule that allows VNET to VNET + // could be added for VMs to communicate with each other. + security_rule { + name = "SSH" + // priorities are between 100 and 4096 an may not overlap. + // A priority of 100 ensures this rule is hit first. + priority = 100 // priorities are between 100 and 4096 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "${local.source_ssh_ip_address}/32" + destination_address_prefix = "*" + } - // the following security rule deny's all outbound traffic from any IP - // within the VNET. - // As machines are added to this VNET, a rule that allows VNET to VNET - // could be added for VMs to communicate with each other. - security_rule { - name = "notrafficout" - // priorities are between 100 and 4096 an may not overlap. - // A priority of 100 ensures this rule is hit first. - priority = 100 - direction = "Outbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "noinbound" + priority = 101 // priorities are between 100 and 4096 + direction = "Inbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + // the following security rule deny's all outbound traffic from any IP + // within the VNET. + // As machines are added to this VNET, a rule that allows VNET to VNET + // could be added for VMs to communicate with each other. + security_rule { + name = "notrafficout" + // priorities are between 100 and 4096 an may not overlap. + // A priority of 100 ensures this rule is hit first. + priority = 100 + direction = "Outbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } } resource "azurerm_virtual_network" "main" { - name = "virtualnetwork" + name = "virtualnetwork" // The /29 is the smallest possible VNET in Azure, 5 addresses are reserved for Azure // and 3 are available for use. address_space = ["10.0.0.0/29"] @@ -115,10 +115,10 @@ resource "azurerm_virtual_network" "main" { } resource "azurerm_public_ip" "vm" { - name = "publicip" - location = data.azurerm_resource_group.main.location - resource_group_name = data.azurerm_resource_group.main.name - allocation_method = "Static" + name = "publicip" + location = data.azurerm_resource_group.main.location + resource_group_name = data.azurerm_resource_group.main.name + allocation_method = "Static" } resource "azurerm_network_interface" "main" { @@ -143,10 +143,10 @@ resource "azurerm_linux_virtual_machine" "main" { size = local.vm_size admin_username = local.vm_admin_username source_image_id = data.azurerm_image.custom_image.id - + // by default the OS has encryption at rest os_disk { - name = "osdisk" + name = "osdisk" storage_account_type = "Standard_LRS" caching = "ReadWrite" disk_size_gb = local.os_disk_size_gb @@ -154,7 +154,7 @@ resource "azurerm_linux_virtual_machine" "main" { // per ISE, only SSH keys and not passwords may be used admin_ssh_key { - username = local.vm_admin_username + username = local.vm_admin_username public_key = local.vm_ssh_key_data } } @@ -168,5 +168,5 @@ output "jumpbox_address" { } output "ssh_command" { - value = "ssh ${local.vm_admin_username}@${azurerm_public_ip.vm.ip_address}" -} \ No newline at end of file + value = "ssh ${local.vm_admin_username}@${azurerm_public_ip.vm.ip_address}" +} diff --git a/src/terraform/examples/vfxt/3-filers/main.tf b/src/terraform/examples/vfxt/3-filers/main.tf index f9128c52e..310dc324b 100644 --- a/src/terraform/examples/vfxt/3-filers/main.tf +++ b/src/terraform/examples/vfxt/3-filers/main.tf @@ -1,55 +1,55 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - tags = null // local.example_tags - - example_tags = { - Movie = "some movie", - Artist = "some artist", - "Project Name" = "some name", - } + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + tags = null // local.example_tags + + example_tags = { + Movie = "some movie", + Artist = "some artist", + "Project Name" = "some name", + } - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -68,12 +68,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -83,135 +83,135 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } module "nasfiler2" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler2" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler2" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } module "nasfiler3" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler3" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler3" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] - - tags = local.tags + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] + + tags = local.tags } resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - tags = local.tags - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs1data" - core_filer_export = module.nasfiler1.core_filer_export - } - /* add additional junctions by adding another junction block shown below + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + tags = local.tags + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs1data" + core_filer_export = module.nasfiler1.core_filer_export + } + /* add additional junctions by adding another junction block shown below junction { namespace_path = "/nfsdata2" core_filer_export = "/data2" } */ - } + } - core_filer { - name = "nfs2" - fqdn_or_primary_ip = module.nasfiler2.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs2data" - core_filer_export = module.nasfiler2.core_filer_export - } + core_filer { + name = "nfs2" + fqdn_or_primary_ip = module.nasfiler2.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs2data" + core_filer_export = module.nasfiler2.core_filer_export } + } - core_filer { - name = "nfs3" - fqdn_or_primary_ip = module.nasfiler3.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs3data" - core_filer_export = module.nasfiler3.core_filer_export - } + core_filer { + name = "nfs3" + fqdn_or_primary_ip = module.nasfiler3.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs3data" + core_filer_export = module.nasfiler3.core_filer_export } + } } output "controller_username" { @@ -223,13 +223,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) -} \ No newline at end of file + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) +} diff --git a/src/terraform/examples/vfxt/HoudiniOptimized/main.tf b/src/terraform/examples/vfxt/HoudiniOptimized/main.tf index e655f0712..901ed63f1 100644 --- a/src/terraform/examples/vfxt/HoudiniOptimized/main.tf +++ b/src/terraform/examples/vfxt/HoudiniOptimized/main.tf @@ -1,47 +1,47 @@ // customize the simple VM by adjusting the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "PASSWORD" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // filer details - filer_resource_group_name = "filer_resource_group" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "PASSWORD" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // filer details + filer_resource_group_name = "filer_resource_group" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -60,12 +60,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -75,94 +75,94 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - global_custom_settings = [ - "vcm.disableReadAhead AB 1", - "cluster.ctcConnMult CE 24", - "cluster.CtcBackEndTimeout KO 220000000", - "cluster.NfsBackEndTimeout VO 100000000", - "cluster.NfsFrontEndCwnd EK 1", + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + global_custom_settings = [ + "vcm.disableReadAhead AB 1", + "cluster.ctcConnMult CE 24", + "cluster.CtcBackEndTimeout KO 220000000", + "cluster.NfsBackEndTimeout VO 100000000", + "cluster.NfsFrontEndCwnd EK 1", + ] + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + custom_settings = [ + "client_rt_preferred FE 524288", + "client_wt_preferred NO 524288", + "nfsConnMult YW 20", + "autoWanOptimize YF 2", + "always_forward OZ 1", ] - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - custom_settings = [ - "client_rt_preferred FE 524288", - "client_wt_preferred NO 524288", - "nfsConnMult YW 20", - "autoWanOptimize YF 2", - "always_forward OZ 1", - ] - junction { - namespace_path = "/nfs1data" - core_filer_export = module.nasfiler1.core_filer_export - } - /* add additional junctions by adding another junction block shown below + junction { + namespace_path = "/nfs1data" + core_filer_export = module.nasfiler1.core_filer_export + } + /* add additional junctions by adding another junction block shown below junction { namespace_path = "/nfsdata2" core_filer_export = "/data2" } */ - } + } } output "controller_username" { @@ -174,13 +174,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) -} \ No newline at end of file + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) +} diff --git a/src/terraform/examples/vfxt/azureblobfiler-zonal/main.tf b/src/terraform/examples/vfxt/azureblobfiler-zonal/main.tf index 506f2a6b5..dcc27d9f6 100644 --- a/src/terraform/examples/vfxt/azureblobfiler-zonal/main.tf +++ b/src/terraform/examples/vfxt/azureblobfiler-zonal/main.tf @@ -1,45 +1,45 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // storage details - storage_resource_group_name = "storage_resource_group" - // choose from ZRS / GZRS / RAGZRS - storage_zone_redundancy_type = "ZRS" - storage_account_name = "storageaccountname" - avere_storage_container_name = "vfxt" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = false - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - namespace_path = "/storagevfxt" - - // advanced scenarios: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: in addition to storage account put the custom image resource group here - alternative_resource_groups = [local.storage_resource_group_name] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // storage details + storage_resource_group_name = "storage_resource_group" + // choose from ZRS / GZRS / RAGZRS + storage_zone_redundancy_type = "ZRS" + storage_account_name = "storageaccountname" + avere_storage_container_name = "vfxt" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = false + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + namespace_path = "/storagevfxt" + + // advanced scenarios: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: in addition to storage account put the custom image resource group here + alternative_resource_groups = [local.storage_resource_group_name] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -57,12 +57,12 @@ provider "azurerm" { } // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "storage" { @@ -77,12 +77,12 @@ resource "azurerm_storage_account" "storage" { account_tier = "Standard" account_replication_type = local.storage_zone_redundancy_type network_rules { - virtual_network_subnet_ids = [ - module.network.cloud_cache_subnet_id, - // need for the controller to create the container - module.network.jumpbox_subnet_id, - ] - default_action = "Deny" + virtual_network_subnet_ids = [ + module.network.cloud_cache_subnet_id, + // need for the controller to create the container + module.network.jumpbox_subnet_id, + ] + default_action = "Deny" } // if the nsg associations do not complete before the storage account // create is started, it will fail with "subnet updating" @@ -91,54 +91,54 @@ resource "azurerm_storage_account" "storage" { // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - use_availability_zones = true - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - azure_storage_filer { - account_name = azurerm_storage_account.storage.name - container_name = local.avere_storage_container_name - junction_namespace_path = local.namespace_path - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + use_availability_zones = true + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + azure_storage_filer { + account_name = azurerm_storage_account.storage.name + container_name = local.avere_storage_container_name + junction_namespace_path = local.namespace_path + } } output "controller_username" { @@ -150,17 +150,17 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "mount_namespace_path" { - value = local.namespace_path -} \ No newline at end of file + value = local.namespace_path +} diff --git a/src/terraform/examples/vfxt/azureblobfiler/main.tf b/src/terraform/examples/vfxt/azureblobfiler/main.tf index 2773817bb..4878b2d78 100644 --- a/src/terraform/examples/vfxt/azureblobfiler/main.tf +++ b/src/terraform/examples/vfxt/azureblobfiler/main.tf @@ -1,43 +1,43 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // storage details - storage_resource_group_name = "storage_resource_group" - storage_account_name = "storageaccount" - avere_storage_container_name = "vfxt" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - namespace_path = "/storagevfxt" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: in addition to storage account put the custom image resource group here - alternative_resource_groups = [local.storage_resource_group_name] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // storage details + storage_resource_group_name = "storage_resource_group" + storage_account_name = "storageaccount" + avere_storage_container_name = "vfxt" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + namespace_path = "/storagevfxt" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: in addition to storage account put the custom image resource group here + alternative_resource_groups = [local.storage_resource_group_name] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -56,12 +56,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "storage" { @@ -76,12 +76,12 @@ resource "azurerm_storage_account" "storage" { account_tier = "Standard" account_replication_type = "LRS" network_rules { - virtual_network_subnet_ids = [ - module.network.cloud_cache_subnet_id, - // need for the controller to create the container - module.network.jumpbox_subnet_id, - ] - default_action = "Deny" + virtual_network_subnet_ids = [ + module.network.cloud_cache_subnet_id, + // need for the controller to create the container + module.network.jumpbox_subnet_id, + ] + default_action = "Deny" } // if the nsg associations do not complete before the storage account // create is started, it will fail with "subnet updating" @@ -90,53 +90,53 @@ resource "azurerm_storage_account" "storage" { // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - azure_storage_filer { - account_name = azurerm_storage_account.storage.name - container_name = local.avere_storage_container_name - junction_namespace_path = local.namespace_path - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + azure_storage_filer { + account_name = azurerm_storage_account.storage.name + container_name = local.avere_storage_container_name + junction_namespace_path = local.namespace_path + } } output "controller_username" { @@ -148,17 +148,17 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "mount_namespace_path" { - value = local.namespace_path -} \ No newline at end of file + value = local.namespace_path +} diff --git a/src/terraform/examples/vfxt/cloudworkstation/collaboratingcloudworkstation/main.tf b/src/terraform/examples/vfxt/cloudworkstation/collaboratingcloudworkstation/main.tf index 6e47bf786..a68a5d52c 100644 --- a/src/terraform/examples/vfxt/cloudworkstation/collaboratingcloudworkstation/main.tf +++ b/src/terraform/examples/vfxt/cloudworkstation/collaboratingcloudworkstation/main.tf @@ -1,50 +1,50 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - // "Isolated Cloud Workstation" - // "Collaborating Cloud Workstation" - cache_policy = "Collaborating Cloud Workstation" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + // "Isolated Cloud Workstation" + // "Collaborating Cloud Workstation" + cache_policy = "Collaborating Cloud Workstation" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -63,12 +63,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -78,81 +78,81 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs1data" - core_filer_export = module.nasfiler1.core_filer_export - } - /* add additional junctions by adding another junction block shown below + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs1data" + core_filer_export = module.nasfiler1.core_filer_export + } + /* add additional junctions by adding another junction block shown below junction { namespace_path = "/nfsdata2" core_filer_export = "/data2" } */ - } -} + } +} output "controller_username" { value = module.vfxtcontroller.controller_username @@ -163,13 +163,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) -} \ No newline at end of file + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) +} diff --git a/src/terraform/examples/vfxt/cloudworkstation/isolatedcloudworkstation/main.tf b/src/terraform/examples/vfxt/cloudworkstation/isolatedcloudworkstation/main.tf index 565c1365c..4a44ebfe3 100644 --- a/src/terraform/examples/vfxt/cloudworkstation/isolatedcloudworkstation/main.tf +++ b/src/terraform/examples/vfxt/cloudworkstation/isolatedcloudworkstation/main.tf @@ -1,50 +1,50 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - // "Isolated Cloud Workstation" - // "Collaborating Cloud Workstation" - cache_policy = "Isolated Cloud Workstation" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + // "Isolated Cloud Workstation" + // "Collaborating Cloud Workstation" + cache_policy = "Isolated Cloud Workstation" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -63,12 +63,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -78,81 +78,81 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs1data" - core_filer_export = module.nasfiler1.core_filer_export - } - /* add additional junctions by adding another junction block shown below + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs1data" + core_filer_export = module.nasfiler1.core_filer_export + } + /* add additional junctions by adding another junction block shown below junction { namespace_path = "/nfsdata2" core_filer_export = "/data2" } */ - } -} + } +} output "controller_username" { value = module.vfxtcontroller.controller_username @@ -163,13 +163,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) -} \ No newline at end of file + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) +} diff --git a/src/terraform/examples/vfxt/custom-vserver/main.tf b/src/terraform/examples/vfxt/custom-vserver/main.tf index db8294ae6..120332ee9 100644 --- a/src/terraform/examples/vfxt/custom-vserver/main.tf +++ b/src/terraform/examples/vfxt/custom-vserver/main.tf @@ -1,52 +1,52 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - vfxt_subnet_range = "10.0.1.0/24" - vserver_first_ip = "10.0.1.200" - vserver_ip_count = 32 - - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + vfxt_subnet_range = "10.0.1.0/24" + vserver_first_ip = "10.0.1.200" + vserver_ip_count = 32 + + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -65,13 +65,13 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location - subnet_cloud_cache_address_prefix = local.vfxt_subnet_range + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location + subnet_cloud_cache_address_prefix = local.vfxt_subnet_range - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -81,78 +81,78 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - vserver_first_ip = local.vserver_first_ip - vserver_ip_count = local.vserver_ip_count - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs1data" - core_filer_export = module.nasfiler1.core_filer_export - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + vserver_first_ip = local.vserver_first_ip + vserver_ip_count = local.vserver_ip_count + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs1data" + core_filer_export = module.nasfiler1.core_filer_export } -} + } +} output "filer_address" { value = module.nasfiler1.primary_ip @@ -171,13 +171,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) -} \ No newline at end of file + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) +} diff --git a/src/terraform/examples/vfxt/hammerspace/main.tf b/src/terraform/examples/vfxt/hammerspace/main.tf index a55161eae..37b58849a 100644 --- a/src/terraform/examples/vfxt/hammerspace/main.tf +++ b/src/terraform/examples/vfxt/hammerspace/main.tf @@ -1,93 +1,93 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - unique_name = "hammerspace1" - hammerspace_image_id = "" - use_highly_available = false - anvil_configuration = local.use_highly_available ? "High Availability" : "Standalone" - data_subnet_mask_bits = 25 - anvil_data_cluster_ip = "10.0.2.110" // leave blank to be dynamic - dsx_instance_count = 1 - // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes - // vm_size = "Standard_F16s_v2" - // vm_size = "Standard_F32s_v2" - // vm_size = "Standard_F48s_v2" - anvil_instance_type = "Standard_F16s_v2" - // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes - // vm_size = "Standard_F16s_v2" - // vm_size = "Standard_F32s_v2" - // vm_size = "Standard_F48s_v2" - dsx_instance_type = "Standard_F16s_v2" - - // storage_account_type = "Standard_LRS" - // storage_account_type = "StandardSSD_LRS" - storage_account_type = "Premium_LRS" - - // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ - // disk_size_gb = 127 // P10, E10, S10 - metadata_disk_size_gb = 255 // P15, E15, S15 - // disk_size_gb = 511 // P20, E20, S20 - // disk_size_gb = 1023 // P30, E30, S30 - // disk_size_gb = 2047 // P40, E40, S40 - // disk_size_gb = 4095 // P50, E50, S50 - // disk_size_gb = 8191 // P60, E60, S60 - // disk_size_gb = 16383 // P70, E70, S70 - // metadata_disk_size_gb = 32767 // P80, E80, S80 - - // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ - // disk_size_gb = 127 // P10, E10, S10 - // disk_size_gb = 255 // P15, E15, S15 - // disk_size_gb = 511 // P20, E20, S20 - // disk_size_gb = 1023 // P30, E30, S30 - // disk_size_gb = 2047 // P40, E40, S40 - datadisk_size_gb = 4095 // P50, E50, S50 - // disk_size_gb = 8191 // P60, E60, S60 - // disk_size_gb = 16383 // P70, E70, S70 - // data_disk_size_gb = 32767 // P80, E80, S80 - - hammerspace_filer_nfs_export_path = "/assets" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - namespace_path = "/assets" - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + unique_name = "hammerspace1" + hammerspace_image_id = "" + use_highly_available = false + anvil_configuration = local.use_highly_available ? "High Availability" : "Standalone" + data_subnet_mask_bits = 25 + anvil_data_cluster_ip = "10.0.2.110" // leave blank to be dynamic + dsx_instance_count = 1 + // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes + // vm_size = "Standard_F16s_v2" + // vm_size = "Standard_F32s_v2" + // vm_size = "Standard_F48s_v2" + anvil_instance_type = "Standard_F16s_v2" + // More sizes found here: https://docs.microsoft.com/en-us/azure/virtual-machines/sizes + // vm_size = "Standard_F16s_v2" + // vm_size = "Standard_F32s_v2" + // vm_size = "Standard_F48s_v2" + dsx_instance_type = "Standard_F16s_v2" + + // storage_account_type = "Standard_LRS" + // storage_account_type = "StandardSSD_LRS" + storage_account_type = "Premium_LRS" + + // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ + // disk_size_gb = 127 // P10, E10, S10 + metadata_disk_size_gb = 255 // P15, E15, S15 + // disk_size_gb = 511 // P20, E20, S20 + // disk_size_gb = 1023 // P30, E30, S30 + // disk_size_gb = 2047 // P40, E40, S40 + // disk_size_gb = 4095 // P50, E50, S50 + // disk_size_gb = 8191 // P60, E60, S60 + // disk_size_gb = 16383 // P70, E70, S70 + // metadata_disk_size_gb = 32767 // P80, E80, S80 + + // more disk sizes and pricing found here: https://azure.microsoft.com/en-us/pricing/details/managed-disks/ + // disk_size_gb = 127 // P10, E10, S10 + // disk_size_gb = 255 // P15, E15, S15 + // disk_size_gb = 511 // P20, E20, S20 + // disk_size_gb = 1023 // P30, E30, S30 + // disk_size_gb = 2047 // P40, E40, S40 + datadisk_size_gb = 4095 // P50, E50, S50 + // disk_size_gb = 8191 // P60, E60, S60 + // disk_size_gb = 16383 // P70, E70, S70 + // data_disk_size_gb = 32767 // P80, E80, S80 + + hammerspace_filer_nfs_export_path = "/assets" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + namespace_path = "/assets" + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -106,12 +106,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -121,118 +121,118 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "anvil" { - source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - hammerspace_image_id = local.hammerspace_image_id - unique_name = local.unique_name - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - anvil_configuration = local.anvil_configuration - anvil_instance_type = local.anvil_instance_type - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_ha_subnet_name = module.network.cloud_filers_ha_subnet_name - virtual_network_data_subnet_name = module.network.cloud_filers_subnet_name - virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits - anvil_data_cluster_ip = local.anvil_data_cluster_ip - anvil_metadata_disk_storage_type = local.storage_account_type - anvil_metadata_disk_size = local.metadata_disk_size_gb - - module_depends_on = concat(module.network.module_depends_on_ids, [azurerm_resource_group.nfsfiler.id]) + source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + hammerspace_image_id = local.hammerspace_image_id + unique_name = local.unique_name + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + anvil_configuration = local.anvil_configuration + anvil_instance_type = local.anvil_instance_type + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_ha_subnet_name = module.network.cloud_filers_ha_subnet_name + virtual_network_data_subnet_name = module.network.cloud_filers_subnet_name + virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits + anvil_data_cluster_ip = local.anvil_data_cluster_ip + anvil_metadata_disk_storage_type = local.storage_account_type + anvil_metadata_disk_size = local.metadata_disk_size_gb + + module_depends_on = concat(module.network.module_depends_on_ids, [azurerm_resource_group.nfsfiler.id]) } // the ephemeral filer module "dsx" { - source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/dsx" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - hammerspace_image_id = local.hammerspace_image_id - unique_name = local.unique_name - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - dsx_instance_count = local.dsx_instance_count - dsx_instance_type = local.dsx_instance_type - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_data_subnet_name = module.network.cloud_filers_subnet_name - virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits - anvil_password = module.anvil.web_ui_password - anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip - anvil_domain = module.anvil.anvil_domain - dsx_data_disk_storage_type = local.storage_account_type - dsx_data_disk_size = local.datadisk_size_gb - - module_depends_on = concat(module.network.module_depends_on_ids, [azurerm_resource_group.nfsfiler.id]) + source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/dsx" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + hammerspace_image_id = local.hammerspace_image_id + unique_name = local.unique_name + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + dsx_instance_count = local.dsx_instance_count + dsx_instance_type = local.dsx_instance_type + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_data_subnet_name = module.network.cloud_filers_subnet_name + virtual_network_data_subnet_mask_bits = local.data_subnet_mask_bits + anvil_password = module.anvil.web_ui_password + anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip + anvil_domain = module.anvil.anvil_domain + dsx_data_disk_storage_type = local.storage_account_type + dsx_data_disk_size = local.datadisk_size_gb + + module_depends_on = concat(module.network.module_depends_on_ids, [azurerm_resource_group.nfsfiler.id]) } module "anvil_configure" { - source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil-run-once-configure" - anvil_arm_virtual_machine_id = length(module.anvil.arm_virtual_machine_ids) == 0 ? "" : module.anvil.arm_virtual_machine_ids[0] - anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip - web_ui_password = module.anvil.web_ui_password - dsx_count = local.dsx_instance_count - nfs_export_path = local.hammerspace_filer_nfs_export_path - anvil_hostname = length(module.anvil.anvil_host_names) == 0 ? "" : module.anvil.anvil_host_names[0] - - module_depends_on = module.anvil.module_depends_on_ids + source = "github.com/Azure/Avere/src/terraform/modules/hammerspace/anvil-run-once-configure" + anvil_arm_virtual_machine_id = length(module.anvil.arm_virtual_machine_ids) == 0 ? "" : module.anvil.arm_virtual_machine_ids[0] + anvil_data_cluster_ip = module.anvil.anvil_data_cluster_ip + web_ui_password = module.anvil.web_ui_password + dsx_count = local.dsx_instance_count + nfs_export_path = local.hammerspace_filer_nfs_export_path + anvil_hostname = length(module.anvil.anvil_host_names) == 0 ? "" : module.anvil.anvil_host_names[0] + + module_depends_on = module.anvil.module_depends_on_ids } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - enable_nlm = false - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller, module.anvil_configure.module_depends_on_id] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = join(" ", module.dsx.dsx_ip_addresses) - cache_policy = local.cache_policy - junction { - namespace_path = local.namespace_path - core_filer_export = local.hammerspace_filer_nfs_export_path - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + enable_nlm = false + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller, module.anvil_configure.module_depends_on_id] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = join(" ", module.dsx.dsx_ip_addresses) + cache_policy = local.cache_policy + junction { + namespace_path = local.namespace_path + core_filer_export = local.hammerspace_filer_nfs_export_path } -} + } +} output "hammerspace_filer_addresses" { value = module.dsx.dsx_ip_addresses @@ -247,11 +247,11 @@ output "hammerspace_webui_address" { } output "hammerspace_webui_username" { - value = module.anvil.web_ui_username + value = module.anvil.web_ui_username } output "hammerspace_webui_password" { - value = module.anvil.web_ui_password + value = module.anvil.web_ui_password } output "controller_username" { @@ -263,17 +263,17 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "mount_namespace_path" { - value = local.namespace_path -} \ No newline at end of file + value = local.namespace_path +} diff --git a/src/terraform/examples/vfxt/netapp-across-region/main.tf b/src/terraform/examples/vfxt/netapp-across-region/main.tf index 583333f67..e5e486cf8 100644 --- a/src/terraform/examples/vfxt/netapp-across-region/main.tf +++ b/src/terraform/examples/vfxt/netapp-across-region/main.tf @@ -1,55 +1,55 @@ // customize the simple VM by editing the following local variables locals { - // the region of the main deployment - location = "eastus" - network_resource_group_name = "network_resource_group" - - // netapp filer details - filer_location = "westus2" - filer_resource_group_name = "filer_resource_group" - netapp_account_name = "netappaccount" - export_path = "data" - // possible values are Standard, Premium, Ultra - service_level = "Premium" - pool_size_in_tb = 4 - volume_storage_quota_in_gb = 100 - - // vnet to vnet settings - vpngw_generation = "Generation1" // generation and sku defined in https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-about-vpngateways#benchmark - vpngw_sku = "VpnGw2" - shared_key = "5v2ty45bt171p53c5h4r3dk4y" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // controller details - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // controller details - controller_add_public_ip = true - - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the main deployment + location = "eastus" + network_resource_group_name = "network_resource_group" + + // netapp filer details + filer_location = "westus2" + filer_resource_group_name = "filer_resource_group" + netapp_account_name = "netappaccount" + export_path = "data" + // possible values are Standard, Premium, Ultra + service_level = "Premium" + pool_size_in_tb = 4 + volume_storage_quota_in_gb = 100 + + // vnet to vnet settings + vpngw_generation = "Generation1" // generation and sku defined in https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-about-vpngateways#benchmark + vpngw_sku = "VpnGw2" + shared_key = "5v2ty45bt171p53c5h4r3dk4y" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // controller details + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // controller details + controller_add_public_ip = true + + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -71,12 +71,12 @@ provider "azurerm" { //////////////////////////////////////////////////////////////// module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_subnet" "rendergwsubnet" { @@ -94,10 +94,10 @@ resource "azurerm_resource_group" "nfsfiler" { } resource "azurerm_virtual_network" "filervnet" { - name = "filervnet" - address_space = ["192.168.0.0/22"] - location = azurerm_resource_group.nfsfiler.location - resource_group_name = azurerm_resource_group.nfsfiler.name + name = "filervnet" + address_space = ["192.168.0.0/22"] + location = azurerm_resource_group.nfsfiler.location + resource_group_name = azurerm_resource_group.nfsfiler.name } resource "azurerm_subnet" "filergwsubnet" { @@ -155,10 +155,10 @@ resource "azurerm_netapp_volume" "netappvolume" { storage_quota_in_gb = local.volume_storage_quota_in_gb export_policy_rule { - rule_index = 1 - allowed_clients = ["0.0.0.0/0"] + rule_index = 1 + allowed_clients = ["0.0.0.0/0"] protocols_enabled = ["NFSv3"] - unix_read_write = true + unix_read_write = true } } @@ -252,89 +252,89 @@ resource "azurerm_virtual_network_gateway_connection" "render_to_filer" { // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name } resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller, azurerm_virtual_network_gateway_connection.render_to_filer, azurerm_virtual_network_gateway_connection.filer_to_render] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = join(" ", tolist(azurerm_netapp_volume.netappvolume.mount_ip_addresses)) - cache_policy = local.cache_policy - junction { - namespace_path = "/${local.export_path}" - core_filer_export = "/${local.export_path}" - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller, azurerm_virtual_network_gateway_connection.render_to_filer, azurerm_virtual_network_gateway_connection.filer_to_render] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = join(" ", tolist(azurerm_netapp_volume.netappvolume.mount_ip_addresses)) + cache_policy = local.cache_policy + junction { + namespace_path = "/${local.export_path}" + core_filer_export = "/${local.export_path}" } + } } output "netapp_region" { - value = local.filer_location + value = local.filer_location } output "netapp_addresses" { - value = azurerm_netapp_volume.netappvolume.mount_ip_addresses + value = azurerm_netapp_volume.netappvolume.mount_ip_addresses } output "netapp_export" { - value = local.export_path + value = local.export_path } output "controller_username" { - value = module.vfxtcontroller.controller_username + value = module.vfxtcontroller.controller_username } output "controller_address" { - value = module.vfxtcontroller.controller_address + value = module.vfxtcontroller.controller_address } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "vfxt_region" { - value = local.location + value = local.location } output "vfxt_management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "vfxt_mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "vfxt_export_path" { - value = "/${local.export_path}" -} \ No newline at end of file + value = "/${local.export_path}" +} diff --git a/src/terraform/examples/vfxt/netapp/main.tf b/src/terraform/examples/vfxt/netapp/main.tf index 124997d2a..1d195a504 100644 --- a/src/terraform/examples/vfxt/netapp/main.tf +++ b/src/terraform/examples/vfxt/netapp/main.tf @@ -1,52 +1,52 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // netapp filer details - netapp_resource_group_name = "netapp_resource_group" - export_path = "data" - // possible values are Standard, Premium, Ultra - service_level = "Premium" - pool_size_in_tb = 4 - volume_storage_quota_in_gb = 100 - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // netapp filer details + netapp_resource_group_name = "netapp_resource_group" + export_path = "data" + // possible values are Standard, Premium, Ultra + service_level = "Premium" + pool_size_in_tb = 4 + volume_storage_quota_in_gb = 100 + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -65,12 +65,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_subnet" "netapp" { @@ -129,74 +129,74 @@ resource "azurerm_netapp_volume" "netappvolume" { storage_quota_in_gb = local.volume_storage_quota_in_gb export_policy_rule { - rule_index = 1 - allowed_clients = [module.network.vnet_address_space] + rule_index = 1 + allowed_clients = [module.network.vnet_address_space] protocols_enabled = ["NFSv3"] - unix_read_write = true + unix_read_write = true } } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name - module_depends_on = [module.network.vnet_id] + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = join(" ", tolist(azurerm_netapp_volume.netappvolume.mount_ip_addresses)) - cache_policy = local.cache_policy - junction { - namespace_path = "/${local.export_path}" - core_filer_export = "/${local.export_path}" - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = join(" ", tolist(azurerm_netapp_volume.netappvolume.mount_ip_addresses)) + cache_policy = local.cache_policy + junction { + namespace_path = "/${local.export_path}" + core_filer_export = "/${local.export_path}" } -} + } +} output "netapp_export_path" { - value = local.export_path + value = local.export_path } output "netapp_addresses" { - value = azurerm_netapp_volume.netappvolume.mount_ip_addresses + value = azurerm_netapp_volume.netappvolume.mount_ip_addresses } output "controller_username" { @@ -208,17 +208,17 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "vfxt_management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "vfxt_mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "vfxt_export_path" { - value = "/${local.export_path}" -} \ No newline at end of file + value = "/${local.export_path}" +} diff --git a/src/terraform/examples/vfxt/opencue/new-storage/main.tf b/src/terraform/examples/vfxt/opencue/new-storage/main.tf index a3c427d8c..2305cba68 100644 --- a/src/terraform/examples/vfxt/opencue/new-storage/main.tf +++ b/src/terraform/examples/vfxt/opencue/new-storage/main.tf @@ -1,50 +1,50 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - network_resource_group_name = "opencue_network_rg" - storage_resource_group_name = "opencue_storage_rg" - vfxt_resource_group_name = "opencue_vfxt_rg" - cuebot_resource_group_name = "opencue_cuebot_rg" - vmss_resource_group_name = "opencue_vmss_rg" - - // CueBot VM details - cuebot_name = "cuebot" - cuebot_vm_size = "Standard_D2s_v3" // Min 6GB RAM required by cuebot - vm_admin_password = "Password1234!" - vm_admin_username = "azureuser" - vm_ssh_key_data = "ssh-rsa AAAAB3...." - ssh_port = 22 - - // storage details - storage_account_name = "opencuetest1234" - avere_storage_container_name = "opencue" - nfs_export_path = "/opencue-demo" - - // vfxt details - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - - # download the latest moana island scene from https://www.oracle.com/technetwork/server-storage/vdbench-downloads-1901681.html - # and upload to an azure storage blob and put the URL below - - // vmss details - unique_name = "vmss" - vmss_priority = "Low" - vm_count = 2 - vmss_size = "Standard_D2s_v3" - mount_target = "/nfs" - opencue_env_vars = "CUE_FS_ROOT=${local.mount_target}/opencue-demo" - - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + network_resource_group_name = "opencue_network_rg" + storage_resource_group_name = "opencue_storage_rg" + vfxt_resource_group_name = "opencue_vfxt_rg" + cuebot_resource_group_name = "opencue_cuebot_rg" + vmss_resource_group_name = "opencue_vmss_rg" + + // CueBot VM details + cuebot_name = "cuebot" + cuebot_vm_size = "Standard_D2s_v3" // Min 6GB RAM required by cuebot + vm_admin_password = "Password1234!" + vm_admin_username = "azureuser" + vm_ssh_key_data = "ssh-rsa AAAAB3...." + ssh_port = 22 + + // storage details + storage_account_name = "opencuetest1234" + avere_storage_container_name = "opencue" + nfs_export_path = "/opencue-demo" + + // vfxt details + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + + # download the latest moana island scene from https://www.oracle.com/technetwork/server-storage/vdbench-downloads-1901681.html + # and upload to an azure storage blob and put the URL below + + // vmss details + unique_name = "vmss" + vmss_priority = "Low" + vm_count = 2 + vmss_size = "Standard_D2s_v3" + mount_target = "/nfs" + opencue_env_vars = "CUE_FS_ROOT=${local.mount_target}/opencue-demo" + + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -63,12 +63,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "storage" { @@ -83,12 +83,12 @@ resource "azurerm_storage_account" "storage" { account_tier = "Standard" account_replication_type = "LRS" network_rules { - virtual_network_subnet_ids = [ - module.network.cloud_cache_subnet_id, - // need for the controller to create the container - module.network.jumpbox_subnet_id, - ] - default_action = "Deny" + virtual_network_subnet_ids = [ + module.network.cloud_cache_subnet_id, + // need for the controller to create the container + module.network.jumpbox_subnet_id, + ] + default_action = "Deny" } // if the nsg associations do not complete before the storage account // create is started, it will fail with "subnet updating" @@ -97,172 +97,172 @@ resource "azurerm_storage_account" "storage" { // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = module.network.vnet_resource_group - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = module.network.vnet_resource_group + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } # // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = module.network.vnet_resource_group - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - - # node_size = "unsupported_test_SKU" - # node_cache_size = 1024 - - azure_storage_filer { - account_name = local.storage_account_name - container_name = local.avere_storage_container_name - custom_settings = [] - junction_namespace_path = local.nfs_export_path - } -} + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = module.network.vnet_resource_group + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + + # node_size = "unsupported_test_SKU" + # node_cache_size = 1024 + + azure_storage_filer { + account_name = local.storage_account_name + container_name = local.avere_storage_container_name + custom_settings = [] + junction_namespace_path = local.nfs_export_path + } +} resource "azurerm_resource_group" "cuebot_rg" { - name = local.cuebot_resource_group_name - location = local.location + name = local.cuebot_resource_group_name + location = local.location } resource "azurerm_public_ip" "cuebot_public_ip" { - name = "${local.cuebot_name}-public-ip" - resource_group_name = local.cuebot_resource_group_name - location = local.location - allocation_method = "Static" + name = "${local.cuebot_name}-public-ip" + resource_group_name = local.cuebot_resource_group_name + location = local.location + allocation_method = "Static" - depends_on = [azurerm_resource_group.cuebot_rg] + depends_on = [azurerm_resource_group.cuebot_rg] } resource "azurerm_network_interface" "cuebot_nic" { - name = "${local.cuebot_name}-nic" - location = local.location - resource_group_name = local.cuebot_resource_group_name - - ip_configuration { - name = "cuebotconfiguration" - subnet_id = module.network.jumpbox_subnet_id - private_ip_address_allocation = "Dynamic" - public_ip_address_id = azurerm_public_ip.cuebot_public_ip.id - } + name = "${local.cuebot_name}-nic" + location = local.location + resource_group_name = local.cuebot_resource_group_name + + ip_configuration { + name = "cuebotconfiguration" + subnet_id = module.network.jumpbox_subnet_id + private_ip_address_allocation = "Dynamic" + public_ip_address_id = azurerm_public_ip.cuebot_public_ip.id + } - depends_on = [module.network, azurerm_public_ip.cuebot_public_ip] + depends_on = [module.network, azurerm_public_ip.cuebot_public_ip] } resource "azurerm_virtual_machine" "cuebot" { - name = local.cuebot_name - location = local.location - resource_group_name = local.cuebot_resource_group_name - vm_size = local.cuebot_vm_size - network_interface_ids = [azurerm_network_interface.cuebot_nic.id] - - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "18.04-LTS" - version = "latest" - } - storage_os_disk { - name = "${local.cuebot_name}-osdisk" - caching = "ReadWrite" - create_option = "FromImage" - managed_disk_type = "Standard_LRS" - } - dynamic "os_profile" { - for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [null] - content { - computer_name = local.cuebot_name - admin_username = local.vm_admin_username - admin_password = os_profile.value - custom_data = templatefile("${path.module}/cloud-init.yml", {namespace_path = local.nfs_export_path, ssh_port = local.ssh_port, cache_ip = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0]})//local.cuebot_vm_cloud_init - } + name = local.cuebot_name + location = local.location + resource_group_name = local.cuebot_resource_group_name + vm_size = local.cuebot_vm_size + network_interface_ids = [azurerm_network_interface.cuebot_nic.id] + + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "18.04-LTS" + version = "latest" + } + storage_os_disk { + name = "${local.cuebot_name}-osdisk" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + dynamic "os_profile" { + for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [null] + content { + computer_name = local.cuebot_name + admin_username = local.vm_admin_username + admin_password = os_profile.value + custom_data = templatefile("${path.module}/cloud-init.yml", { namespace_path = local.nfs_export_path, ssh_port = local.ssh_port, cache_ip = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] }) //local.cuebot_vm_cloud_init } - // dynamic block when password is specified - dynamic "os_profile_linux_config" { - for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [] - content { - disable_password_authentication = false - } + } + // dynamic block when password is specified + dynamic "os_profile_linux_config" { + for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [] + content { + disable_password_authentication = false } - // dynamic block when SSH key is specified - dynamic "os_profile_linux_config" { - for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - disable_password_authentication = true - ssh_keys { - key_data = local.vm_ssh_key_data - path = "/home/${local.vm_admin_username}/.ssh/authorized_keys" - } - } + } + // dynamic block when SSH key is specified + dynamic "os_profile_linux_config" { + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + disable_password_authentication = true + ssh_keys { + key_data = local.vm_ssh_key_data + path = "/home/${local.vm_admin_username}/.ssh/authorized_keys" + } } + } - depends_on = [avere_vfxt.vfxt] + depends_on = [avere_vfxt.vfxt] } // the opencue module module "opencue_configure" { - source = "github.com/Azure/Avere/src/terraform/modules/opencue_config" - - node_address = module.vfxtcontroller.controller_address - admin_username = module.vfxtcontroller.controller_username - ssh_key_data = local.vm_ssh_key_data - nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] - nfs_export_path = local.nfs_export_path - ssh_port = local.ssh_port + source = "github.com/Azure/Avere/src/terraform/modules/opencue_config" + + node_address = module.vfxtcontroller.controller_address + admin_username = module.vfxtcontroller.controller_username + ssh_key_data = local.vm_ssh_key_data + nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] + nfs_export_path = local.nfs_export_path + ssh_port = local.ssh_port } // the VMSS module module "vmss" { - source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" - - resource_group_name = local.vmss_resource_group_name - location = local.location - vmss_priority = local.vmss_priority - admin_username = module.vfxtcontroller.controller_username - ssh_key_data = local.vm_ssh_key_data - unique_name = local.unique_name - vm_count = local.vm_count - vm_size = local.vmss_size - virtual_network_resource_group = module.network.vnet_resource_group - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.render_clients1_subnet_name - mount_target = local.mount_target - nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) - nfs_export_path = local.nfs_export_path - additional_env_vars = "${local.opencue_env_vars} CUEBOT_HOSTNAME=${azurerm_network_interface.cuebot_nic.private_ip_address}" - bootstrap_script_path = module.opencue_configure.bootstrap_script_path - module_depends_on = [module.opencue_configure.module_depends_on_id, azurerm_virtual_machine.cuebot] + source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" + + resource_group_name = local.vmss_resource_group_name + location = local.location + vmss_priority = local.vmss_priority + admin_username = module.vfxtcontroller.controller_username + ssh_key_data = local.vm_ssh_key_data + unique_name = local.unique_name + vm_count = local.vm_count + vm_size = local.vmss_size + virtual_network_resource_group = module.network.vnet_resource_group + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.render_clients1_subnet_name + mount_target = local.mount_target + nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + nfs_export_path = local.nfs_export_path + additional_env_vars = "${local.opencue_env_vars} CUEBOT_HOSTNAME=${azurerm_network_interface.cuebot_nic.private_ip_address}" + bootstrap_script_path = module.opencue_configure.bootstrap_script_path + module_depends_on = [module.opencue_configure.module_depends_on_id, azurerm_virtual_machine.cuebot] } output "cuebot_vm_ssh" { - value = "ssh ${local.vm_admin_username}@${azurerm_public_ip.cuebot_public_ip.ip_address}" + value = "ssh ${local.vm_admin_username}@${azurerm_public_ip.cuebot_public_ip.ip_address}" } output "controller_username" { @@ -278,15 +278,15 @@ output "controller_ssh_port" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "vmss_id" { @@ -302,9 +302,9 @@ output "vmss_name" { } output "vmss_addresses_command" { - // local-exec doesn't return output, and the only way to - // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 - // in the meantime just provide the az cli command to - // the customer - value = "az vmss nic list -g ${local.vmss_resource_group_name} --vmss-name ${module.vmss.vmss_name} --query \"[].ipConfigurations[].privateIpAddress\"" + // local-exec doesn't return output, and the only way to + // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 + // in the meantime just provide the az cli command to + // the customer + value = "az vmss nic list -g ${local.vmss_resource_group_name} --vmss-name ${module.vmss.vmss_name} --query \"[].ipConfigurations[].privateIpAddress\"" } diff --git a/src/terraform/examples/vfxt/opencue/pre-existing-azure-blob/main.tf b/src/terraform/examples/vfxt/opencue/pre-existing-azure-blob/main.tf index 77fa05ab6..2adc7e24a 100644 --- a/src/terraform/examples/vfxt/opencue/pre-existing-azure-blob/main.tf +++ b/src/terraform/examples/vfxt/opencue/pre-existing-azure-blob/main.tf @@ -1,55 +1,55 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - network_resource_group_name = "opencue_network_rg" - storage_resource_group_name = "opencue_storage_rg" - vfxt_resource_group_name = "opencue_vfxt_rg" - cuebot_resource_group_name = "opencue_cuebot_rg" - vmss_resource_group_name = "opencue_vmss_rg" - - // CueBot VM details - cuebot_name = "cuebot" - cuebot_vm_size = "Standard_D2s_v3" // Min 6GB RAM required by cuebot - vm_admin_password = "Password1234!" - vm_admin_username = "azureuser" - vm_ssh_key_data = "ssh-rsa AAAAB3...." - ssh_port = 22 - - // storage details - storage_account_name = "" - avere_storage_container_name = "opencue" - nfs_export_path = "/opencue-demo" - - // vfxt details - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - - # download the latest moana island scene from https://www.oracle.com/technetwork/server-storage/vdbench-downloads-1901681.html - # and upload to an azure storage blob and put the URL below - - // vmss details - unique_name = "vmss" - vmss_priority = "Low" - vm_count = 2 - vmss_size = "Standard_D2s_v3" - mount_target = "/nfs" - opencue_env_vars = "CUE_FS_ROOT=${local.mount_target}/opencue-demo" - - # Pre-populated storage account with data to speed up demo testing - pre_pop_storage_resource_group = "pre_pop_storage_resource_group" - pre_pop_storage_account_name = "prepopopencue" - pre_pop_container_name = "vfxt" - - alternative_resource_groups = [local.pre_pop_storage_resource_group] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + network_resource_group_name = "opencue_network_rg" + storage_resource_group_name = "opencue_storage_rg" + vfxt_resource_group_name = "opencue_vfxt_rg" + cuebot_resource_group_name = "opencue_cuebot_rg" + vmss_resource_group_name = "opencue_vmss_rg" + + // CueBot VM details + cuebot_name = "cuebot" + cuebot_vm_size = "Standard_D2s_v3" // Min 6GB RAM required by cuebot + vm_admin_password = "Password1234!" + vm_admin_username = "azureuser" + vm_ssh_key_data = "ssh-rsa AAAAB3...." + ssh_port = 22 + + // storage details + storage_account_name = "" + avere_storage_container_name = "opencue" + nfs_export_path = "/opencue-demo" + + // vfxt details + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + + # download the latest moana island scene from https://www.oracle.com/technetwork/server-storage/vdbench-downloads-1901681.html + # and upload to an azure storage blob and put the URL below + + // vmss details + unique_name = "vmss" + vmss_priority = "Low" + vm_count = 2 + vmss_size = "Standard_D2s_v3" + mount_target = "/nfs" + opencue_env_vars = "CUE_FS_ROOT=${local.mount_target}/opencue-demo" + + # Pre-populated storage account with data to speed up demo testing + pre_pop_storage_resource_group = "pre_pop_storage_resource_group" + pre_pop_storage_account_name = "prepopopencue" + pre_pop_container_name = "vfxt" + + alternative_resource_groups = [local.pre_pop_storage_resource_group] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -68,182 +68,182 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = module.network.vnet_resource_group - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = module.network.vnet_resource_group + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } # // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = module.network.vnet_resource_group - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - - # Test vFXT sku size - # node_size = "unsupported_test_SKU" - # node_cache_size = 1024 - - azure_storage_filer { - account_name = local.pre_pop_storage_account_name - container_name = local.pre_pop_container_name - custom_settings = [] - junction_namespace_path = local.nfs_export_path - } -} + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = module.network.vnet_resource_group + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + + # Test vFXT sku size + # node_size = "unsupported_test_SKU" + # node_cache_size = 1024 + + azure_storage_filer { + account_name = local.pre_pop_storage_account_name + container_name = local.pre_pop_container_name + custom_settings = [] + junction_namespace_path = local.nfs_export_path + } +} resource "azurerm_resource_group" "cuebot_rg" { - name = local.cuebot_resource_group_name - location = local.location + name = local.cuebot_resource_group_name + location = local.location } resource "azurerm_public_ip" "cuebot_public_ip" { - name = "${local.cuebot_name}-public-ip" - resource_group_name = local.cuebot_resource_group_name - location = local.location - allocation_method = "Static" + name = "${local.cuebot_name}-public-ip" + resource_group_name = local.cuebot_resource_group_name + location = local.location + allocation_method = "Static" - depends_on = [azurerm_resource_group.cuebot_rg] + depends_on = [azurerm_resource_group.cuebot_rg] } resource "azurerm_network_interface" "cuebot_nic" { - name = "${local.cuebot_name}-nic" - location = local.location - resource_group_name = local.cuebot_resource_group_name - - ip_configuration { - name = "cuebotconfiguration" - subnet_id = module.network.jumpbox_subnet_id - private_ip_address_allocation = "Dynamic" - public_ip_address_id = azurerm_public_ip.cuebot_public_ip.id - } + name = "${local.cuebot_name}-nic" + location = local.location + resource_group_name = local.cuebot_resource_group_name + + ip_configuration { + name = "cuebotconfiguration" + subnet_id = module.network.jumpbox_subnet_id + private_ip_address_allocation = "Dynamic" + public_ip_address_id = azurerm_public_ip.cuebot_public_ip.id + } - depends_on = [module.network, azurerm_public_ip.cuebot_public_ip] + depends_on = [module.network, azurerm_public_ip.cuebot_public_ip] } resource "azurerm_virtual_machine" "cuebot" { - name = local.cuebot_name - location = local.location - resource_group_name = local.cuebot_resource_group_name - vm_size = local.cuebot_vm_size - network_interface_ids = [azurerm_network_interface.cuebot_nic.id] - - - storage_image_reference { - publisher = "Canonical" - offer = "UbuntuServer" - sku = "18.04-LTS" - version = "latest" - } - storage_os_disk { - name = "${local.cuebot_name}-osdisk" - caching = "ReadWrite" - create_option = "FromImage" - managed_disk_type = "Standard_LRS" - } - dynamic "os_profile" { - for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [null] - content { - computer_name = local.cuebot_name - admin_username = local.vm_admin_username - admin_password = os_profile.value - custom_data = templatefile("${path.module}/cloud-init.yml", {namespace_path = local.nfs_export_path, ssh_port = local.ssh_port, cache_ip = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0]})//local.cuebot_vm_cloud_init - } + name = local.cuebot_name + location = local.location + resource_group_name = local.cuebot_resource_group_name + vm_size = local.cuebot_vm_size + network_interface_ids = [azurerm_network_interface.cuebot_nic.id] + + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "18.04-LTS" + version = "latest" + } + storage_os_disk { + name = "${local.cuebot_name}-osdisk" + caching = "ReadWrite" + create_option = "FromImage" + managed_disk_type = "Standard_LRS" + } + dynamic "os_profile" { + for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [null] + content { + computer_name = local.cuebot_name + admin_username = local.vm_admin_username + admin_password = os_profile.value + custom_data = templatefile("${path.module}/cloud-init.yml", { namespace_path = local.nfs_export_path, ssh_port = local.ssh_port, cache_ip = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] }) //local.cuebot_vm_cloud_init } - // dynamic block when password is specified - dynamic "os_profile_linux_config" { - for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [] - content { - disable_password_authentication = false - } + } + // dynamic block when password is specified + dynamic "os_profile_linux_config" { + for_each = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? [local.vm_admin_password] : [] + content { + disable_password_authentication = false } - // dynamic block when SSH key is specified - dynamic "os_profile_linux_config" { - for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - disable_password_authentication = true - ssh_keys { - key_data = local.vm_ssh_key_data - path = "/home/${local.vm_admin_username}/.ssh/authorized_keys" - } - } + } + // dynamic block when SSH key is specified + dynamic "os_profile_linux_config" { + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + disable_password_authentication = true + ssh_keys { + key_data = local.vm_ssh_key_data + path = "/home/${local.vm_admin_username}/.ssh/authorized_keys" + } } - - depends_on = [avere_vfxt.vfxt] + } + + depends_on = [avere_vfxt.vfxt] } // the opencue module module "opencue_configure" { - source = "github.com/Azure/Avere/src/terraform/modules/opencue_config" - - node_address = module.vfxtcontroller.controller_address - admin_username = module.vfxtcontroller.controller_username - ssh_key_data = local.vm_ssh_key_data - nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] - nfs_export_path = local.nfs_export_path - ssh_port = local.ssh_port + source = "github.com/Azure/Avere/src/terraform/modules/opencue_config" + + node_address = module.vfxtcontroller.controller_address + admin_username = module.vfxtcontroller.controller_username + ssh_key_data = local.vm_ssh_key_data + nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] + nfs_export_path = local.nfs_export_path + ssh_port = local.ssh_port } // the VMSS module module "vmss" { - source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" - - resource_group_name = local.vmss_resource_group_name - location = local.location - vmss_priority = local.vmss_priority - admin_username = module.vfxtcontroller.controller_username - ssh_key_data = local.vm_ssh_key_data - unique_name = local.unique_name - vm_count = local.vm_count - vm_size = local.vmss_size - virtual_network_resource_group = module.network.vnet_resource_group - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.render_clients1_subnet_name - mount_target = local.mount_target - nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) - nfs_export_path = local.nfs_export_path - additional_env_vars = "${local.opencue_env_vars} CUEBOT_HOSTNAME=${azurerm_network_interface.cuebot_nic.private_ip_address}" - bootstrap_script_path = module.opencue_configure.bootstrap_script_path - module_depends_on = [module.opencue_configure.module_depends_on_id, azurerm_virtual_machine.cuebot] + source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" + + resource_group_name = local.vmss_resource_group_name + location = local.location + vmss_priority = local.vmss_priority + admin_username = module.vfxtcontroller.controller_username + ssh_key_data = local.vm_ssh_key_data + unique_name = local.unique_name + vm_count = local.vm_count + vm_size = local.vmss_size + virtual_network_resource_group = module.network.vnet_resource_group + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.render_clients1_subnet_name + mount_target = local.mount_target + nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + nfs_export_path = local.nfs_export_path + additional_env_vars = "${local.opencue_env_vars} CUEBOT_HOSTNAME=${azurerm_network_interface.cuebot_nic.private_ip_address}" + bootstrap_script_path = module.opencue_configure.bootstrap_script_path + module_depends_on = [module.opencue_configure.module_depends_on_id, azurerm_virtual_machine.cuebot] } output "cuebot_vm_ssh" { - value = "ssh ${local.vm_admin_username}@${azurerm_public_ip.cuebot_public_ip.ip_address}" + value = "ssh ${local.vm_admin_username}@${azurerm_public_ip.cuebot_public_ip.ip_address}" } output "controller_username" { @@ -259,15 +259,15 @@ output "controller_ssh_port" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "vmss_id" { @@ -283,9 +283,9 @@ output "vmss_name" { } output "vmss_addresses_command" { - // local-exec doesn't return output, and the only way to - // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 - // in the meantime just provide the az cli command to - // the customer - value = "az vmss nic list -g ${local.vmss_resource_group_name} --vmss-name ${module.vmss.vmss_name} --query \"[].ipConfigurations[].privateIpAddress\"" -} \ No newline at end of file + // local-exec doesn't return output, and the only way to + // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 + // in the meantime just provide the az cli command to + // the customer + value = "az vmss nic list -g ${local.vmss_resource_group_name} --vmss-name ${module.vmss.vmss_name} --query \"[].ipConfigurations[].privateIpAddress\"" +} diff --git a/src/terraform/examples/vfxt/pipeline/centos/main.tf b/src/terraform/examples/vfxt/pipeline/centos/main.tf index ea9a656d6..bf4160a30 100644 --- a/src/terraform/examples/vfxt/pipeline/centos/main.tf +++ b/src/terraform/examples/vfxt/pipeline/centos/main.tf @@ -1,32 +1,32 @@ // customize the Secured VM by adjusting the following local variables locals { - // the region of the deployment - location = "eastus" - - // authentication details - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // leave ssh key data blank if you want to use a password - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - // VM details - resource_group_name = "centosresource_group" - unique_name = "vm" - vm_size = "Standard_D2s_v3" - - // virtual network information - virtual_network_resource_group_name = "network_resource_group" - virtual_network_name = "rendervnet" - virtual_network_subnet_name = "render_clients1" - - source_image_reference = { - publisher = "OpenLogic" - offer = "CentOS" - sku = "7.7" - version = "latest" - } + // the region of the deployment + location = "eastus" + + // authentication details + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // leave ssh key data blank if you want to use a password + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + // VM details + resource_group_name = "centosresource_group" + unique_name = "vm" + vm_size = "Standard_D2s_v3" + + // virtual network information + virtual_network_resource_group_name = "network_resource_group" + virtual_network_name = "rendervnet" + virtual_network_subnet_name = "render_clients1" + + source_image_reference = { + publisher = "OpenLogic" + offer = "CentOS" + sku = "7.7" + version = "latest" + } } terraform { @@ -67,13 +67,13 @@ resource "azurerm_network_interface" "main" { } resource "azurerm_linux_virtual_machine" "main" { - name = local.unique_name - resource_group_name = azurerm_resource_group.main.name - location = azurerm_resource_group.main.location - network_interface_ids = [azurerm_network_interface.main.id] - computer_name = local.unique_name - size = local.vm_size - + name = local.unique_name + resource_group_name = azurerm_resource_group.main.name + location = azurerm_resource_group.main.location + network_interface_ids = [azurerm_network_interface.main.id] + computer_name = local.unique_name + size = local.vm_size + source_image_reference { publisher = local.source_image_reference.publisher offer = local.source_image_reference.offer @@ -83,21 +83,21 @@ resource "azurerm_linux_virtual_machine" "main" { // by default the OS has encryption at rest os_disk { - name = "osdisk" + name = "osdisk" storage_account_type = "Standard_LRS" caching = "ReadWrite" } // configuration for authentication. If ssh key specified, ignore password - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data== null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } @@ -110,5 +110,5 @@ output "ip_address" { } output "ssh_command" { - value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" +} diff --git a/src/terraform/examples/vfxt/pipeline/ubuntu/main.tf b/src/terraform/examples/vfxt/pipeline/ubuntu/main.tf index 87d683d5c..bb7a23e41 100644 --- a/src/terraform/examples/vfxt/pipeline/ubuntu/main.tf +++ b/src/terraform/examples/vfxt/pipeline/ubuntu/main.tf @@ -1,32 +1,32 @@ // customize the Secured VM by adjusting the following local variables locals { - // the region of the deployment - location = "eastus" - - // authentication details - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // leave ssh key data blank if you want to use a password - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - // VM details - resource_group_name = "centosresource_group" - unique_name = "vm" - vm_size = "Standard_D2s_v3" - - // virtual network information - virtual_network_resource_group_name = "network_resource_group" - virtual_network_name = "rendervnet" - virtual_network_subnet_name = "render_clients1" - - source_image_reference = { - publisher = "Canonical" - offer = "0001-com-ubuntu-server-focal" - sku = "20_04-lts" - version = "latest" - } + // the region of the deployment + location = "eastus" + + // authentication details + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // leave ssh key data blank if you want to use a password + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + // VM details + resource_group_name = "centosresource_group" + unique_name = "vm" + vm_size = "Standard_D2s_v3" + + // virtual network information + virtual_network_resource_group_name = "network_resource_group" + virtual_network_name = "rendervnet" + virtual_network_subnet_name = "render_clients1" + + source_image_reference = { + publisher = "Canonical" + offer = "0001-com-ubuntu-server-focal" + sku = "20_04-lts" + version = "latest" + } } terraform { @@ -67,13 +67,13 @@ resource "azurerm_network_interface" "main" { } resource "azurerm_linux_virtual_machine" "main" { - name = local.unique_name - resource_group_name = azurerm_resource_group.main.name - location = azurerm_resource_group.main.location - network_interface_ids = [azurerm_network_interface.main.id] - computer_name = local.unique_name - size = local.vm_size - + name = local.unique_name + resource_group_name = azurerm_resource_group.main.name + location = azurerm_resource_group.main.location + network_interface_ids = [azurerm_network_interface.main.id] + computer_name = local.unique_name + size = local.vm_size + source_image_reference { publisher = local.source_image_reference.publisher offer = local.source_image_reference.offer @@ -83,21 +83,21 @@ resource "azurerm_linux_virtual_machine" "main" { // by default the OS has encryption at rest os_disk { - name = "osdisk" + name = "osdisk" storage_account_type = "Standard_LRS" caching = "ReadWrite" } // configuration for authentication. If ssh key specified, ignore password - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data== null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } @@ -110,5 +110,5 @@ output "ip_address" { } output "ssh_command" { - value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" -} \ No newline at end of file + value = "ssh ${local.vm_admin_username}@${azurerm_network_interface.main.ip_configuration[0].private_ip_address}" +} diff --git a/src/terraform/examples/vfxt/proxy/main.tf b/src/terraform/examples/vfxt/proxy/main.tf index 954fec999..dc92b2ffb 100644 --- a/src/terraform/examples/vfxt/proxy/main.tf +++ b/src/terraform/examples/vfxt/proxy/main.tf @@ -1,60 +1,60 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - ssh_source_ip = "*" - - // proxy details - proxy_resource_group_name = "proxy_resource_group" - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - - // storage details - storage_resource_group_name = "storage_resource_group" - storage_account_name = "storageaccount" - avere_storage_container_name = "vfxt" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - namespace_path = "/nfs1data" - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: in addition to storage account put the custom image resource group here - alternative_resource_groups = [local.storage_resource_group_name] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + ssh_source_ip = "*" + + // proxy details + proxy_resource_group_name = "proxy_resource_group" + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + + // storage details + storage_resource_group_name = "storage_resource_group" + storage_account_name = "storageaccount" + avere_storage_container_name = "vfxt" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + namespace_path = "/nfs1data" + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: in addition to storage account put the custom image resource group here + alternative_resource_groups = [local.storage_resource_group_name] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -73,32 +73,32 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network_secure" - resource_group_name = local.network_resource_group_name - location = local.location - ssh_source_address_prefix = local.ssh_source_ip + source = "github.com/Azure/Avere/src/terraform/modules/render_network_secure" + resource_group_name = local.network_resource_group_name + location = local.location + ssh_source_address_prefix = local.ssh_source_ip - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "proxy" { - name = local.proxy_resource_group_name + name = local.proxy_resource_group_name location = local.location } module "proxy" { - source = "github.com/Azure/Avere/src/terraform/modules/proxy" - resource_group_name = azurerm_resource_group.proxy.name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.proxy_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/proxy" + resource_group_name = azurerm_resource_group.proxy.name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.proxy_subnet_name } resource "azurerm_resource_group" "storage" { @@ -113,11 +113,11 @@ resource "azurerm_storage_account" "storage" { account_tier = "Standard" account_replication_type = "LRS" network_rules { - virtual_network_subnet_ids = [ - module.network.cloud_cache_subnet_id, - module.network.jumpbox_subnet_id, - ] - default_action = "Deny" + virtual_network_subnet_ids = [ + module.network.cloud_cache_subnet_id, + module.network.jumpbox_subnet_id, + ] + default_action = "Deny" } // if the nsg associations do not complete before the storage account // create is started, it will fail with "subnet updating" @@ -131,84 +131,84 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - proxy = "http://${module.proxy.address}:3128" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + proxy = "http://${module.proxy.address}:3128" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.jumpbox_subnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.jumpbox_subnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller and proxy at the - // same time as vfxt cluster to work around, add the explicit dependencies - depends_on = [module.vfxtcontroller, module.proxy] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - ntp_servers = "169.254.169.254" - proxy_uri = "http://${module.proxy.address}:3128" - cluster_proxy_uri = "http://${module.proxy.address}:3128" - image_id = local.vfxt_image_id - - azure_storage_filer { - account_name = azurerm_storage_account.storage.name - container_name = local.avere_storage_container_name - junction_namespace_path = "/storagevfxt" - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller and proxy at the + // same time as vfxt cluster to work around, add the explicit dependencies + depends_on = [module.vfxtcontroller, module.proxy] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + ntp_servers = "169.254.169.254" + proxy_uri = "http://${module.proxy.address}:3128" + cluster_proxy_uri = "http://${module.proxy.address}:3128" + image_id = local.vfxt_image_id + + azure_storage_filer { + account_name = azurerm_storage_account.storage.name + container_name = local.avere_storage_container_name + junction_namespace_path = "/storagevfxt" + } - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = local.namespace_path - core_filer_export = module.nasfiler1.core_filer_export - } + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = local.namespace_path + core_filer_export = module.nasfiler1.core_filer_export } + } } output "controller_username" { @@ -220,17 +220,17 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "mount_namespace_path" { - value = local.namespace_path -} \ No newline at end of file + value = local.namespace_path +} diff --git a/src/terraform/examples/vfxt/run-local/main.tf b/src/terraform/examples/vfxt/run-local/main.tf index f19db3633..40050fd9b 100644 --- a/src/terraform/examples/vfxt/run-local/main.tf +++ b/src/terraform/examples/vfxt/run-local/main.tf @@ -1,70 +1,70 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - - // network details - virtual_network_resource_group = "network_resource_group" - virtual_network_name = "rendervnet" - vfxt_network_subnet_name = "cloud_cache" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = null //"ssh-rsa AAAAB3...." - - // filer details - filer_address = "" - filer_export = "/data" - - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" + // the region of the deployment + location = "eastus" - tags = null // local.example_tags + // network details + virtual_network_resource_group = "network_resource_group" + virtual_network_name = "rendervnet" + vfxt_network_subnet_name = "cloud_cache" - example_tags = { - Movie = "some movie", - Artist = "some artist", - "Project Name" = "some name", - } + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = null //"ssh-rsa AAAAB3...." + + // filer details + filer_address = "" + filer_export = "/data" + + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + tags = null // local.example_tags + + example_tags = { + Movie = "some movie", + Artist = "some artist", + "Project Name" = "some name", + } } resource "avere_vfxt" "vfxt" { - run_local = true + run_local = true - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.virtual_network_resource_group - azure_network_name = local.virtual_network_name - azure_subnet_name = local.vfxt_network_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.virtual_network_resource_group + azure_network_name = local.virtual_network_name + azure_subnet_name = local.vfxt_network_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 - tags = local.tags + tags = local.tags - core_filer { - name = "nfs1" - fqdn_or_primary_ip = local.filer_address - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs1data" - core_filer_export = local.filer_export - } + core_filer { + name = "nfs1" + fqdn_or_primary_ip = local.filer_address + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs1data" + core_filer_export = local.filer_export } + } } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } diff --git a/src/terraform/examples/vfxt/user-assigned-managed-identity/main.tf b/src/terraform/examples/vfxt/user-assigned-managed-identity/main.tf index b7347b2fb..14d0baa9d 100644 --- a/src/terraform/examples/vfxt/user-assigned-managed-identity/main.tf +++ b/src/terraform/examples/vfxt/user-assigned-managed-identity/main.tf @@ -1,61 +1,61 @@ // customize the simple VM by editing the following local variables locals { - // service principal information, that have been scoped to the - // resource groups used in this example - subscription_id = "00000000-0000-0000-0000-000000000000" - client_id = "00000000-0000-0000-0000-000000000000" - client_secret = "00000000-0000-0000-0000-000000000000" - tenant_id = "00000000-0000-0000-0000-000000000000" - - controller_managed_identity_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/aaa_managed_identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/controllermi" - vfxt_managed_identity_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/aaa_managed_identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/vfxtmi" - - // the region of the deployment - location = "eastus" - // resource groups - rg_prefix = "aaa_" // this can be blank, it is used to group the resource groups together - network_resource_group_name = "${local.rg_prefix}network_resource_group" - storage_resource_group_name = "${local.rg_prefix}storage_resource_group" - vfxt_resource_group_name = "${local.rg_prefix}vfxt_resource_group" - - // user information - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - // storage details - storage_account_name = "storageaccount" - avere_storage_container_name = "vfxt" - - // vfxt details - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - controller_add_public_ip = true - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - ssh_port = 22 - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // service principal information, that have been scoped to the + // resource groups used in this example + subscription_id = "00000000-0000-0000-0000-000000000000" + client_id = "00000000-0000-0000-0000-000000000000" + client_secret = "00000000-0000-0000-0000-000000000000" + tenant_id = "00000000-0000-0000-0000-000000000000" + + controller_managed_identity_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/aaa_managed_identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/controllermi" + vfxt_managed_identity_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/aaa_managed_identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/vfxtmi" + + // the region of the deployment + location = "eastus" + // resource groups + rg_prefix = "aaa_" // this can be blank, it is used to group the resource groups together + network_resource_group_name = "${local.rg_prefix}network_resource_group" + storage_resource_group_name = "${local.rg_prefix}storage_resource_group" + vfxt_resource_group_name = "${local.rg_prefix}vfxt_resource_group" + + // user information + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + // storage details + storage_account_name = "storageaccount" + avere_storage_container_name = "vfxt" + + // vfxt details + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + controller_add_public_ip = true + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + ssh_port = 22 + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -69,44 +69,44 @@ terraform { } provider "azurerm" { - subscription_id = local.subscription_id - client_id = local.client_id - client_secret = local.client_secret - tenant_id = local.tenant_id + subscription_id = local.subscription_id + client_id = local.client_id + client_secret = local.client_secret + tenant_id = local.tenant_id - # If you are on a new subscription, and encounter resource provider registration - # issues, please uncomment the following line. - # Please following the directions for a new subscription: - # skip_provider_registration = "true" + # If you are on a new subscription, and encounter resource provider registration + # issues, please uncomment the following line. + # Please following the directions for a new subscription: + # skip_provider_registration = "true" - features {} + features {} } // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - create_resource_group = false - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + create_resource_group = false + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = local.storage_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = local.storage_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } resource "azurerm_storage_account" "storage" { @@ -116,12 +116,12 @@ resource "azurerm_storage_account" "storage" { account_tier = "Standard" account_replication_type = "LRS" network_rules { - virtual_network_subnet_ids = [ - module.network.cloud_cache_subnet_id, - // need for the controller to create the container - module.network.jumpbox_subnet_id, - ] - default_action = "Deny" + virtual_network_subnet_ids = [ + module.network.cloud_cache_subnet_id, + // need for the controller to create the container + module.network.jumpbox_subnet_id, + ] + default_action = "Deny" } // if the nsg associations do not complete before the storage account // create is started, it will fail with "subnet updating" @@ -130,67 +130,67 @@ resource "azurerm_storage_account" "storage" { // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - create_resource_group = false - user_assigned_managed_identity_id = local.controller_managed_identity_id - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + create_resource_group = false + user_assigned_managed_identity_id = local.controller_managed_identity_id + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the - // same time as vfxt cluster to work around, add the explicit dependencies - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - user_assigned_managed_identity = local.vfxt_managed_identity_id - - azure_storage_filer { - account_name = azurerm_storage_account.storage.name - container_name = local.avere_storage_container_name - junction_namespace_path = "/storagevfxt" - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the + // same time as vfxt cluster to work around, add the explicit dependencies + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + user_assigned_managed_identity = local.vfxt_managed_identity_id + + azure_storage_filer { + account_name = azurerm_storage_account.storage.name + container_name = local.avere_storage_container_name + junction_namespace_path = "/storagevfxt" + } - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = "/nfs1data" - core_filer_export = module.nasfiler1.core_filer_export - } + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = "/nfs1data" + core_filer_export = module.nasfiler1.core_filer_export } + } } output "controller_username" { @@ -202,13 +202,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) -} \ No newline at end of file + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) +} diff --git a/src/terraform/examples/vfxt/vdbench/azureblobfiler/main.tf b/src/terraform/examples/vfxt/vdbench/azureblobfiler/main.tf index 40bb04264..1ddfc7ca3 100644 --- a/src/terraform/examples/vfxt/vdbench/azureblobfiler/main.tf +++ b/src/terraform/examples/vfxt/vdbench/azureblobfiler/main.tf @@ -1,45 +1,45 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - network_resource_group_name = "vdbench_network_rg" - storage_resource_group_name = "vdbench_storage_rg" - vfxt_resource_group_name = "vdbench_vfxt_rg" - vmss_resource_group_name = "vdbench_vmss_rg" - - vm_admin_username = "azureuser" - // the vdbench example requires an ssh key - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // storage details - storage_account_name = "" - avere_storage_container_name = "vdbench" - nfs_export_path = "/vdbench" - - // vfxt details - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - - # download the latest vdbench from https://www.oracle.com/technetwork/server-storage/vdbench-downloads-1901681.html - # and upload to an azure storage blob and put the URL below - vdbench_url = "" - - // vmss details - unique_name = "vmss" - vm_count = 12 - vmss_size = "Standard_D2s_v3" - mount_target = "/data" - - alternative_resource_groups = [local.storage_resource_group_name] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + network_resource_group_name = "vdbench_network_rg" + storage_resource_group_name = "vdbench_storage_rg" + vfxt_resource_group_name = "vdbench_vfxt_rg" + vmss_resource_group_name = "vdbench_vmss_rg" + + vm_admin_username = "azureuser" + // the vdbench example requires an ssh key + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // storage details + storage_account_name = "" + avere_storage_container_name = "vdbench" + nfs_export_path = "/vdbench" + + // vfxt details + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + + # download the latest vdbench from https://www.oracle.com/technetwork/server-storage/vdbench-downloads-1901681.html + # and upload to an azure storage blob and put the URL below + vdbench_url = "" + + // vmss details + unique_name = "vmss" + vm_count = 12 + vmss_size = "Standard_D2s_v3" + mount_target = "/data" + + alternative_resource_groups = [local.storage_resource_group_name] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -58,12 +58,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "storage" { @@ -78,12 +78,12 @@ resource "azurerm_storage_account" "storage" { account_tier = "Standard" account_replication_type = "LRS" network_rules { - virtual_network_subnet_ids = [ - module.network.cloud_cache_subnet_id, - // need for the controller to create the container - module.network.jumpbox_subnet_id, - ] - default_action = "Deny" + virtual_network_subnet_ids = [ + module.network.cloud_cache_subnet_id, + // need for the controller to create the container + module.network.jumpbox_subnet_id, + ] + default_action = "Deny" } // if the nsg associations do not complete before the storage account // create is started, it will fail with "subnet updating" @@ -92,85 +92,85 @@ resource "azurerm_storage_account" "storage" { // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = module.network.vnet_resource_group - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = module.network.vnet_resource_group + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = module.network.vnet_resource_group - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - - azure_storage_filer { - account_name = azurerm_storage_account.storage.name - container_name = local.avere_storage_container_name - custom_settings = [] - junction_namespace_path = local.nfs_export_path - } -} + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = module.network.vnet_resource_group + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + + azure_storage_filer { + account_name = azurerm_storage_account.storage.name + container_name = local.avere_storage_container_name + custom_settings = [] + junction_namespace_path = local.nfs_export_path + } +} // the vdbench module module "vdbench_configure" { - source = "github.com/Azure/Avere/src/terraform/modules/vdbench_config" + source = "github.com/Azure/Avere/src/terraform/modules/vdbench_config" - node_address = module.vfxtcontroller.controller_address - admin_username = module.vfxtcontroller.controller_username - ssh_key_data = local.vm_ssh_key_data - nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] - nfs_export_path = local.nfs_export_path - vdbench_url = local.vdbench_url + node_address = module.vfxtcontroller.controller_address + admin_username = module.vfxtcontroller.controller_username + ssh_key_data = local.vm_ssh_key_data + nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] + nfs_export_path = local.nfs_export_path + vdbench_url = local.vdbench_url - module_depends_on = [avere_vfxt.vfxt] + module_depends_on = [avere_vfxt.vfxt] } // the VMSS module module "vmss" { - source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" - - resource_group_name = local.vmss_resource_group_name - location = local.location - admin_username = module.vfxtcontroller.controller_username - ssh_key_data = local.vm_ssh_key_data - unique_name = local.unique_name - vm_count = local.vm_count - vm_size = local.vmss_size - virtual_network_resource_group = module.network.vnet_resource_group - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.render_clients1_subnet_name - mount_target = local.mount_target - nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) - nfs_export_path = local.nfs_export_path - bootstrap_script_path = module.vdbench_configure.bootstrap_script_path - module_depends_on = [module.vdbench_configure.module_depends_on_id] - + source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" + + resource_group_name = local.vmss_resource_group_name + location = local.location + admin_username = module.vfxtcontroller.controller_username + ssh_key_data = local.vm_ssh_key_data + unique_name = local.unique_name + vm_count = local.vm_count + vm_size = local.vmss_size + virtual_network_resource_group = module.network.vnet_resource_group + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.render_clients1_subnet_name + mount_target = local.mount_target + nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + nfs_export_path = local.nfs_export_path + bootstrap_script_path = module.vdbench_configure.bootstrap_script_path + module_depends_on = [module.vdbench_configure.module_depends_on_id] + } output "controller_username" { @@ -182,15 +182,15 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } output "vmss_id" { @@ -206,9 +206,9 @@ output "vmss_name" { } output "vmss_addresses_command" { - // local-exec doesn't return output, and the only way to - // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 - // in the meantime just provide the az cli command to - // the customer - value = "az vmss nic list -g ${module.vmss.vmss_resource_group} --vmss-name ${module.vmss.vmss_name} --query \"[].ipConfigurations[].privateIpAddress\"" -} \ No newline at end of file + // local-exec doesn't return output, and the only way to + // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 + // in the meantime just provide the az cli command to + // the customer + value = "az vmss nic list -g ${module.vmss.vmss_resource_group} --vmss-name ${module.vmss.vmss_name} --query \"[].ipConfigurations[].privateIpAddress\"" +} diff --git a/src/terraform/examples/vfxt/vdbench/main.tf b/src/terraform/examples/vfxt/vdbench/main.tf index 29562b2a6..295bcc3c9 100644 --- a/src/terraform/examples/vfxt/vdbench/main.tf +++ b/src/terraform/examples/vfxt/vdbench/main.tf @@ -19,7 +19,7 @@ variable "ssh_key_data" { } variable "vserver_ip_addresses" { - type=list(string) + type = list(string) description = "sets the vserver ip addresses" } @@ -37,20 +37,20 @@ variable "vnet_resource_group" { variable "vnet_name" { description = "sets the vnet name" - default = "rendervnet" + default = "rendervnet" } variable "subnet_name" { description = "sets the subnet name" - default = "render_clients1" + default = "render_clients1" } // customize the simple VM by editing the following local variables locals { - unique_name = "vmss" - vm_count = 12 - vmss_size = "Standard_D2s_v3" - mount_target = "/data" + unique_name = "vmss" + vm_count = 12 + vmss_size = "Standard_D2s_v3" + mount_target = "/data" } terraform { @@ -69,35 +69,35 @@ provider "azurerm" { // the vdbench module module "vdbench_configure" { - source = "github.com/Azure/Avere/src/terraform/modules/vdbench_config" + source = "github.com/Azure/Avere/src/terraform/modules/vdbench_config" - node_address = var.controller_address - admin_username = var.controller_username - ssh_key_data = var.ssh_key_data - nfs_address = tolist(var.vserver_ip_addresses)[0] - nfs_export_path = var.nfs_export_path - vdbench_url = var.vdbench_url + node_address = var.controller_address + admin_username = var.controller_username + ssh_key_data = var.ssh_key_data + nfs_address = tolist(var.vserver_ip_addresses)[0] + nfs_export_path = var.nfs_export_path + vdbench_url = var.vdbench_url } // the VMSS module module "vmss" { - source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" - - resource_group_name = var.vmss_resource_group_name - location = var.location - admin_username = var.controller_username - ssh_key_data = var.ssh_key_data - unique_name = local.unique_name - vm_count = local.vm_count - vm_size = local.vmss_size - virtual_network_resource_group = var.vnet_resource_group - virtual_network_name = var.vnet_name - virtual_network_subnet_name = var.subnet_name - mount_target = local.mount_target - nfs_export_addresses = tolist(var.vserver_ip_addresses) - nfs_export_path = var.nfs_export_path - bootstrap_script_path = module.vdbench_configure.bootstrap_script_path - module_depends_on = [module.vdbench_configure.module_depends_on_id] + source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" + + resource_group_name = var.vmss_resource_group_name + location = var.location + admin_username = var.controller_username + ssh_key_data = var.ssh_key_data + unique_name = local.unique_name + vm_count = local.vm_count + vm_size = local.vmss_size + virtual_network_resource_group = var.vnet_resource_group + virtual_network_name = var.vnet_name + virtual_network_subnet_name = var.subnet_name + mount_target = local.mount_target + nfs_export_addresses = tolist(var.vserver_ip_addresses) + nfs_export_path = var.nfs_export_path + bootstrap_script_path = module.vdbench_configure.bootstrap_script_path + module_depends_on = [module.vdbench_configure.module_depends_on_id] } output "vmss_id" { @@ -113,9 +113,9 @@ output "vmss_name" { } output "vmss_addresses_command" { - // local-exec doesn't return output, and the only way to - // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 - // in the meantime just provide the az cli command to - // the customer - value = "az vmss nic list -g ${module.vmss.vmss_resource_group} --vmss-name ${module.vmss.vmss_name} --query [].ipConfigurations[].privateIpAddress" -} \ No newline at end of file + // local-exec doesn't return output, and the only way to + // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 + // in the meantime just provide the az cli command to + // the customer + value = "az vmss nic list -g ${module.vmss.vmss_resource_group} --vmss-name ${module.vmss.vmss_name} --query [].ipConfigurations[].privateIpAddress" +} diff --git a/src/terraform/examples/vfxt/vfxt-only/main.tf b/src/terraform/examples/vfxt/vfxt-only/main.tf index 733598d50..aa8b4b385 100644 --- a/src/terraform/examples/vfxt/vfxt-only/main.tf +++ b/src/terraform/examples/vfxt/vfxt-only/main.tf @@ -1,56 +1,56 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - virtual_network_resource_group = "network_resource_group" - virtual_network_name = "rendervnet" - controller_network_subnet_name = "jumpbox" - vfxt_network_subnet_name = "cloud_cache" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false, but ensure - // you have access to the subnet - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - tags = null // local.example_tags - - example_tags = { - Movie = "some movie", - Artist = "some artist", - "Project Name" = "some name", - } + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + virtual_network_resource_group = "network_resource_group" + virtual_network_name = "rendervnet" + controller_network_subnet_name = "jumpbox" + vfxt_network_subnet_name = "cloud_cache" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false, but ensure + // you have access to the subnet + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + tags = null // local.example_tags + + example_tags = { + Movie = "some movie", + Artist = "some artist", + "Project Name" = "some name", + } - // the proxy used by vfxt.py for cluster stand-up and scale-up / scale-down - proxy_uri = null - // the proxy used by the running vfxt cluster - cluster_proxy_uri = null + // the proxy used by vfxt.py for cluster stand-up and scale-up / scale-down + proxy_uri = null + // the proxy used by the running vfxt cluster + cluster_proxy_uri = null - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] } terraform { @@ -69,52 +69,52 @@ provider "azurerm" { // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - create_resource_group = false - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.virtual_network_resource_group - virtual_network_name = local.virtual_network_name - virtual_network_subnet_name = local.controller_network_subnet_name - - tags = local.tags + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + create_resource_group = false + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.virtual_network_resource_group + virtual_network_name = local.virtual_network_name + virtual_network_subnet_name = local.controller_network_subnet_name + + tags = local.tags } resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - proxy_uri = local.proxy_uri - cluster_proxy_uri = local.cluster_proxy_uri - image_id = local.vfxt_image_id - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.virtual_network_resource_group - azure_network_name = local.virtual_network_name - azure_subnet_name = local.vfxt_network_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - - tags = local.tags + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + proxy_uri = local.proxy_uri + cluster_proxy_uri = local.cluster_proxy_uri + image_id = local.vfxt_image_id + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.virtual_network_resource_group + azure_network_name = local.virtual_network_name + azure_subnet_name = local.vfxt_network_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + + tags = local.tags } output "controller_username" { @@ -126,13 +126,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) -} \ No newline at end of file + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) +} diff --git a/src/terraform/examples/vfxt/vmss/main.tf b/src/terraform/examples/vfxt/vmss/main.tf index dae9f7653..e603d7e54 100644 --- a/src/terraform/examples/vfxt/vmss/main.tf +++ b/src/terraform/examples/vfxt/vmss/main.tf @@ -1,55 +1,55 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - ssh_port = 22 - - // network details - network_resource_group_name = "network_resource_group" - - // nfs filer details - filer_resource_group_name = "filer_resource_group" - nfs_export_path = "/nfs1data" - - // vfxt details - vfxt_resource_group_name = "vfxt_resource_group" - // if you are running a locked down network, set controller_add_public_ip to false - controller_add_public_ip = true - vfxt_cluster_name = "vfxt" - vfxt_cluster_password = "VFXT_PASSWORD" - vfxt_ssh_key_data = local.vm_ssh_key_data - - // vmss details - vmss_resource_group_name = "vmss_rg" - unique_name = "uniquename" - vm_count = 2 - vmss_size = "Standard_DS2_v2" - mount_target = "/data" - // vfxt cache polies - // "Clients Bypassing the Cluster" - // "Read Caching" - // "Read and Write Caching" - // "Full Caching" - // "Transitioning Clients Before or After a Migration" - cache_policy = "Clients Bypassing the Cluster" - - // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace - controller_image_id = null - vfxt_image_id = null - // advanced scenario: put the custom image resource group here - alternative_resource_groups = [] - // advanced scenario: add external ports to work with cloud policies example [10022, 13389] - open_external_ports = [local.ssh_port,3389] - // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ - // or if accessing from cloud shell, put "AzureCloud" - open_external_sources = ["*"] + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + ssh_port = 22 + + // network details + network_resource_group_name = "network_resource_group" + + // nfs filer details + filer_resource_group_name = "filer_resource_group" + nfs_export_path = "/nfs1data" + + // vfxt details + vfxt_resource_group_name = "vfxt_resource_group" + // if you are running a locked down network, set controller_add_public_ip to false + controller_add_public_ip = true + vfxt_cluster_name = "vfxt" + vfxt_cluster_password = "VFXT_PASSWORD" + vfxt_ssh_key_data = local.vm_ssh_key_data + + // vmss details + vmss_resource_group_name = "vmss_rg" + unique_name = "uniquename" + vm_count = 2 + vmss_size = "Standard_DS2_v2" + mount_target = "/data" + // vfxt cache polies + // "Clients Bypassing the Cluster" + // "Read Caching" + // "Read and Write Caching" + // "Full Caching" + // "Transitioning Clients Before or After a Migration" + cache_policy = "Clients Bypassing the Cluster" + + // advanced scenario: vfxt and controller image ids, leave this null, unless not using default marketplace + controller_image_id = null + vfxt_image_id = null + // advanced scenario: put the custom image resource group here + alternative_resource_groups = [] + // advanced scenario: add external ports to work with cloud policies example [10022, 13389] + open_external_ports = [local.ssh_port, 3389] + // for a fully locked down internet get your external IP address from http://www.myipaddress.com/ + // or if accessing from cloud shell, put "AzureCloud" + open_external_sources = ["*"] } terraform { @@ -68,12 +68,12 @@ provider "azurerm" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.network_resource_group_name - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.network_resource_group_name + location = local.location - open_external_ports = local.open_external_ports - open_external_sources = local.open_external_sources + open_external_ports = local.open_external_ports + open_external_sources = local.open_external_sources } resource "azurerm_resource_group" "nfsfiler" { @@ -83,110 +83,110 @@ resource "azurerm_resource_group" "nfsfiler" { // the ephemeral filer module "nasfiler1" { - source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" - resource_group_name = azurerm_resource_group.nfsfiler.name - location = azurerm_resource_group.nfsfiler.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - vm_size = "Standard_D2s_v3" - unique_name = "nasfiler1" - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.cloud_filers_subnet_name + source = "github.com/Azure/Avere/src/terraform/modules/nfs_filer" + resource_group_name = azurerm_resource_group.nfsfiler.name + location = azurerm_resource_group.nfsfiler.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + vm_size = "Standard_D2s_v3" + unique_name = "nasfiler1" + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.cloud_filers_subnet_name } // the vfxt controller module "vfxtcontroller" { - source = "github.com/Azure/Avere/src/terraform/modules/controller3" - resource_group_name = local.vfxt_resource_group_name - location = local.location - admin_username = local.vm_admin_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - add_public_ip = local.controller_add_public_ip - image_id = local.controller_image_id - alternative_resource_groups = local.alternative_resource_groups - ssh_port = local.ssh_port - - // network details - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.jumpbox_subnet_name - - module_depends_on = [module.network.vnet_id] + source = "github.com/Azure/Avere/src/terraform/modules/controller3" + resource_group_name = local.vfxt_resource_group_name + location = local.location + admin_username = local.vm_admin_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + add_public_ip = local.controller_add_public_ip + image_id = local.controller_image_id + alternative_resource_groups = local.alternative_resource_groups + ssh_port = local.ssh_port + + // network details + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.jumpbox_subnet_name + + module_depends_on = [module.network.vnet_id] } // the vfxt resource "avere_vfxt" "vfxt" { - controller_address = module.vfxtcontroller.controller_address - controller_admin_username = module.vfxtcontroller.controller_username - // ssh key takes precedence over controller password - controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - controller_ssh_port = local.ssh_port - // terraform is not creating the implicit dependency on the controller module - // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster - // to work around, add the explicit dependency - depends_on = [module.vfxtcontroller] - - location = local.location - azure_resource_group = local.vfxt_resource_group_name - azure_network_resource_group = local.network_resource_group_name - azure_network_name = module.network.vnet_name - azure_subnet_name = module.network.cloud_cache_subnet_name - vfxt_cluster_name = local.vfxt_cluster_name - vfxt_admin_password = local.vfxt_cluster_password - vfxt_ssh_key_data = local.vfxt_ssh_key_data - vfxt_node_count = 3 - image_id = local.vfxt_image_id - - core_filer { - name = "nfs1" - fqdn_or_primary_ip = module.nasfiler1.primary_ip - cache_policy = local.cache_policy - junction { - namespace_path = local.nfs_export_path - core_filer_export = module.nasfiler1.core_filer_export - } + controller_address = module.vfxtcontroller.controller_address + controller_admin_username = module.vfxtcontroller.controller_username + // ssh key takes precedence over controller password + controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + controller_ssh_port = local.ssh_port + // terraform is not creating the implicit dependency on the controller module + // otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster + // to work around, add the explicit dependency + depends_on = [module.vfxtcontroller] + + location = local.location + azure_resource_group = local.vfxt_resource_group_name + azure_network_resource_group = local.network_resource_group_name + azure_network_name = module.network.vnet_name + azure_subnet_name = module.network.cloud_cache_subnet_name + vfxt_cluster_name = local.vfxt_cluster_name + vfxt_admin_password = local.vfxt_cluster_password + vfxt_ssh_key_data = local.vfxt_ssh_key_data + vfxt_node_count = 3 + image_id = local.vfxt_image_id + + core_filer { + name = "nfs1" + fqdn_or_primary_ip = module.nasfiler1.primary_ip + cache_policy = local.cache_policy + junction { + namespace_path = local.nfs_export_path + core_filer_export = module.nasfiler1.core_filer_export } -} + } +} // the vmss config module to install the round robin mount module "vmss_configure" { - source = "github.com/Azure/Avere/src/terraform/modules/vmss_config" + source = "github.com/Azure/Avere/src/terraform/modules/vmss_config" - node_address = module.vfxtcontroller.controller_address - admin_username = module.vfxtcontroller.controller_username - admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] - nfs_export_path = local.nfs_export_path + node_address = module.vfxtcontroller.controller_address + admin_username = module.vfxtcontroller.controller_username + admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0] + nfs_export_path = local.nfs_export_path - module_depends_on = [avere_vfxt.vfxt] + module_depends_on = [avere_vfxt.vfxt] } // the VMSS module module "vmss" { - source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" - - resource_group_name = local.vmss_resource_group_name - location = local.location - admin_username = module.vfxtcontroller.controller_username - admin_password = local.vm_admin_password - ssh_key_data = local.vm_ssh_key_data - unique_name = local.unique_name - vm_count = local.vm_count - vm_size = local.vmss_size - virtual_network_resource_group = local.network_resource_group_name - virtual_network_name = module.network.vnet_name - virtual_network_subnet_name = module.network.render_clients1_subnet_name - mount_target = local.mount_target - nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) - nfs_export_path = local.nfs_export_path - bootstrap_script_path = module.vmss_configure.bootstrap_script_path - module_depends_on = [module.vmss_configure.module_depends_on_id] + source = "github.com/Azure/Avere/src/terraform/modules/vmss_mountable" + + resource_group_name = local.vmss_resource_group_name + location = local.location + admin_username = module.vfxtcontroller.controller_username + admin_password = local.vm_admin_password + ssh_key_data = local.vm_ssh_key_data + unique_name = local.unique_name + vm_count = local.vm_count + vm_size = local.vmss_size + virtual_network_resource_group = local.network_resource_group_name + virtual_network_name = module.network.vnet_name + virtual_network_subnet_name = module.network.render_clients1_subnet_name + mount_target = local.mount_target + nfs_export_addresses = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + nfs_export_path = local.nfs_export_path + bootstrap_script_path = module.vmss_configure.bootstrap_script_path + module_depends_on = [module.vmss_configure.module_depends_on_id] } output "controller_username" { @@ -198,13 +198,13 @@ output "controller_address" { } output "ssh_command_with_avere_tunnel" { - value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" + value = "ssh -p ${local.ssh_port} -L8443:${avere_vfxt.vfxt.vfxt_management_ip}:443 ${module.vfxtcontroller.controller_username}@${module.vfxtcontroller.controller_address}" } output "management_ip" { - value = avere_vfxt.vfxt.vfxt_management_ip + value = avere_vfxt.vfxt.vfxt_management_ip } output "mount_addresses" { - value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) + value = tolist(avere_vfxt.vfxt.vserver_ip_addresses) } diff --git a/src/terraform/examples/vmss-rendering/main.tf b/src/terraform/examples/vmss-rendering/main.tf index 2193455f5..4f154c7a9 100644 --- a/src/terraform/examples/vmss-rendering/main.tf +++ b/src/terraform/examples/vmss-rendering/main.tf @@ -1,24 +1,24 @@ // customize the VMSS by editing the following local variables locals { - // the region of the deployment - location = "eastus" - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 - // populated where you are running terraform - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - - // network details - virtual_network_resource_group = "network_resource_group" - - // vmss details - vmss_resource_group_name = "vmss_rg" - unique_name = "unique" - vm_count = 3 - vmss_size = "Standard_D2s_v3" - use_ephemeral_os_disk = true + // the region of the deployment + location = "eastus" + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600 + // populated where you are running terraform + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + + // network details + virtual_network_resource_group = "network_resource_group" + + // vmss details + vmss_resource_group_name = "vmss_rg" + unique_name = "unique" + vm_count = 3 + vmss_size = "Standard_D2s_v3" + use_ephemeral_os_disk = true } terraform { @@ -42,40 +42,40 @@ resource "azurerm_resource_group" "vmss" { // the render network module "network" { - source = "github.com/Azure/Avere/src/terraform/modules/render_network" - resource_group_name = local.virtual_network_resource_group - location = local.location + source = "github.com/Azure/Avere/src/terraform/modules/render_network" + resource_group_name = local.virtual_network_resource_group + location = local.location } resource "azurerm_linux_virtual_machine_scale_set" "vmss" { - name = local.unique_name - resource_group_name = azurerm_resource_group.vmss.name - location = azurerm_resource_group.vmss.location - sku = local.vmss_size - instances = local.vm_count - admin_username = local.vm_admin_username - admin_password = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? local.vm_admin_password : null + name = local.unique_name + resource_group_name = azurerm_resource_group.vmss.name + location = azurerm_resource_group.vmss.location + sku = local.vmss_size + instances = local.vm_count + admin_username = local.vm_admin_username + admin_password = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? local.vm_admin_password : null disable_password_authentication = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? false : true # use low-priority with Delete. Stop Deallocate will be incompatible with OS Ephemeral disks - priority = "Spot" - eviction_policy = "Delete" + priority = "Spot" + eviction_policy = "Delete" // avoid overprovision as it can create race conditions with render managers - overprovision = false + overprovision = false // avoid use of zones so you get maximum spread of machines, and have > 100 nodes single_placement_group = false // avoid use of zones so you get maximum spread of machines zone_balance = false - zones = [] + zones = [] // avoid use proximity groups so you get maximum spread of machines // proximity_placement_group_id - + dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } source_image_reference { @@ -92,14 +92,14 @@ resource "azurerm_linux_virtual_machine_scale_set" "vmss" { dynamic "diff_disk_settings" { for_each = local.use_ephemeral_os_disk == true ? [local.use_ephemeral_os_disk] : [] content { - option = "Local" + option = "Local" } } } network_interface { - name = "vminic-${local.unique_name}" - primary = true + name = "vminic-${local.unique_name}" + primary = true enable_accelerated_networking = false ip_configuration { @@ -125,9 +125,9 @@ output "vmss_name" { } output "vmss_addresses_command" { - // local-exec doesn't return output, and the only way to - // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 - // in the meantime just provide the az cli command to - // the customer - value = "az vmss nic list -g ${azurerm_resource_group.vmss.name} --vmss-name ${azurerm_linux_virtual_machine_scale_set.vmss.name} --query \"[].ipConfigurations[].privateIpAddress\"" -} \ No newline at end of file + // local-exec doesn't return output, and the only way to + // try to get the output is follow advice from https://stackoverflow.com/questions/49136537/obtain-ip-of-internal-load-balancer-in-app-service-environment/49436100#49436100 + // in the meantime just provide the az cli command to + // the customer + value = "az vmss nic list -g ${azurerm_resource_group.vmss.name} --vmss-name ${azurerm_linux_virtual_machine_scale_set.vmss.name} --query \"[].ipConfigurations[].privateIpAddress\"" +} diff --git a/src/terraform/examples/windows/client-ps-cse/main.tf b/src/terraform/examples/windows/client-ps-cse/main.tf index 77c3d1bec..03b748a20 100644 --- a/src/terraform/examples/windows/client-ps-cse/main.tf +++ b/src/terraform/examples/windows/client-ps-cse/main.tf @@ -1,7 +1,7 @@ // customize the simple VM by adjusting the following local variables locals { // the region of the deployment - location = "eastus" + location = "eastus" vm_admin_username = "azureuser" // use either SSH Key data or admin password, if ssh_key_data is specified // then admin_password is ignored @@ -10,7 +10,7 @@ locals { unique_name = "unique" vm_size = "Standard_D2s_v3" - + resource_group_name = "windows_resource_group" // the following are the arguments to be passed to the custom script @@ -55,10 +55,10 @@ resource "azurerm_subnet" "subnet" { } resource "azurerm_public_ip" "vm" { - name = "${local.unique_name}-publicip" - location = local.location - resource_group_name = azurerm_resource_group.win.name - allocation_method = "Static" + name = "${local.unique_name}-publicip" + location = local.location + resource_group_name = azurerm_resource_group.win.name + allocation_method = "Static" } resource "azurerm_network_interface" "vm" { @@ -141,4 +141,4 @@ output "username" { output "jumpbox_address" { value = azurerm_public_ip.vm.ip_address -} \ No newline at end of file +} diff --git a/src/terraform/examples/windowsgridgpu/main.tf b/src/terraform/examples/windowsgridgpu/main.tf index 30bb756db..5ed00d43a 100644 --- a/src/terraform/examples/windowsgridgpu/main.tf +++ b/src/terraform/examples/windowsgridgpu/main.tf @@ -1,34 +1,34 @@ // customize the simple VM by editing the following local variables locals { - // the region of the deployment - location = "westus2" - resource_group = "windowsgridgpu" + // the region of the deployment + location = "westus2" + resource_group = "windowsgridgpu" - admin_username = "azureuser" - admin_password = "ReplacePassword$" + admin_username = "azureuser" + admin_password = "ReplacePassword$" - vm_size = "Standard_NV6" - unique_name = "wingrid" + vm_size = "Standard_NV6" + unique_name = "wingrid" - // update the below with information about the domain - ad_domain = "" // example "rendering.com" - // leave blank to add machine to default location - ou_path = "" - ad_username = "" - ad_password = "" + // update the below with information about the domain + ad_domain = "" // example "rendering.com" + // leave blank to add machine to default location + ou_path = "" + ad_username = "" + ad_password = "" - // specify 'Windows_Client' to specifies the type of on-premise - // license (also known as Azure Hybrid Use Benefit - // https://azure.microsoft.com/en-us/pricing/hybrid-benefit/faq/) - // which should be used for this Virtual Machine. - license_type = "None" - - teradici_license_key = "" - - // network details - virtual_network_resource_group = "network_resource_group" - virtual_network_name = "rendervnet" - virtual_network_subnet_name = "jumpbox" + // specify 'Windows_Client' to specifies the type of on-premise + // license (also known as Azure Hybrid Use Benefit + // https://azure.microsoft.com/en-us/pricing/hybrid-benefit/faq/) + // which should be used for this Virtual Machine. + license_type = "None" + + teradici_license_key = "" + + // network details + virtual_network_resource_group = "network_resource_group" + virtual_network_name = "rendervnet" + virtual_network_subnet_name = "jumpbox" } terraform { @@ -51,28 +51,28 @@ resource "azurerm_resource_group" "windowsgridgpu" { } module "windowsgridgpu" { - source = "github.com/Azure/Avere/src/terraform/modules/windowsgridgpu" - resource_group_name = local.resource_group - location = local.location - admin_username = local.admin_username - admin_password = local.admin_password - vm_size = local.vm_size - license_type = local.license_type - teradici_license_key = local.teradici_license_key + source = "github.com/Azure/Avere/src/terraform/modules/windowsgridgpu" + resource_group_name = local.resource_group + location = local.location + admin_username = local.admin_username + admin_password = local.admin_password + vm_size = local.vm_size + license_type = local.license_type + teradici_license_key = local.teradici_license_key - ad_domain = local.ad_domain - ou_path = local.ou_path - ad_username = local.ad_username - ad_password = local.ad_password + ad_domain = local.ad_domain + ou_path = local.ou_path + ad_username = local.ad_username + ad_password = local.ad_password - // network details - virtual_network_resource_group = local.virtual_network_resource_group - virtual_network_name = local.virtual_network_name - virtual_network_subnet_name = local.virtual_network_subnet_name + // network details + virtual_network_resource_group = local.virtual_network_resource_group + virtual_network_name = local.virtual_network_name + virtual_network_subnet_name = local.virtual_network_subnet_name - module_depends_on = [azurerm_resource_group.windowsgridgpu.id] + module_depends_on = [azurerm_resource_group.windowsgridgpu.id] } output "address" { value = module.windowsgridgpu.address -} \ No newline at end of file +} diff --git a/src/terraform/examples/wireguard/main.tf b/src/terraform/examples/wireguard/main.tf index a9281a4c0..1316c5279 100644 --- a/src/terraform/examples/wireguard/main.tf +++ b/src/terraform/examples/wireguard/main.tf @@ -1,37 +1,37 @@ // customize the simple VM by editing the following local variables locals { - // auth details - vm_admin_username = "azureuser" - // use either SSH Key data or admin password, if ssh_key_data is specified - // then admin_password is ignored - vm_admin_password = "ReplacePassword$" - // leave ssh key data blank if you want to use a password - vm_ssh_key_data = null //"ssh-rsa AAAAB3...." - wg_vm_size = "Standard_F32s_v2" - jb_vm_size = "Standard_D4s_v3" - - // region #1 - location1 = "eastus" - resource_group1 = "region1-rg" - unique_name1 = "region1" - address_space1 = "10.0.0.0/16" - gw_subnet1 = "10.0.0.0/24" - render_subnet1 = "10.0.1.0/24" - - // region #2 - location2 = "westus2" - resource_group2 = "region2-rg" - unique_name2 = "region2" - address_space2 = "10.1.0.0/16" - gw_subnet2 = "10.1.0.0/24" - render_subnet2 = "10.1.1.0/24" - - // wg cloud init - wg_script_file_b64 = base64gzip(replace(file("${path.module}/wginstall.sh"),"\r","")) - wg_cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { installcmd = local.wg_script_file_b64 }) - // jb cloud init - jb_script_file_b64 = base64gzip(replace(file("${path.module}/jbinstall.sh"),"\r","")) - jb_cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { installcmd = local.jb_script_file_b64 }) + // auth details + vm_admin_username = "azureuser" + // use either SSH Key data or admin password, if ssh_key_data is specified + // then admin_password is ignored + vm_admin_password = "ReplacePassword$" + // leave ssh key data blank if you want to use a password + vm_ssh_key_data = null //"ssh-rsa AAAAB3...." + wg_vm_size = "Standard_F32s_v2" + jb_vm_size = "Standard_D4s_v3" + + // region #1 + location1 = "eastus" + resource_group1 = "region1-rg" + unique_name1 = "region1" + address_space1 = "10.0.0.0/16" + gw_subnet1 = "10.0.0.0/24" + render_subnet1 = "10.0.1.0/24" + + // region #2 + location2 = "westus2" + resource_group2 = "region2-rg" + unique_name2 = "region2" + address_space2 = "10.1.0.0/16" + gw_subnet2 = "10.1.0.0/24" + render_subnet2 = "10.1.1.0/24" + + // wg cloud init + wg_script_file_b64 = base64gzip(replace(file("${path.module}/wginstall.sh"), "\r", "")) + wg_cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { installcmd = local.wg_script_file_b64 }) + // jb cloud init + jb_script_file_b64 = base64gzip(replace(file("${path.module}/jbinstall.sh"), "\r", "")) + jb_cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { installcmd = local.jb_script_file_b64 }) } terraform { @@ -73,77 +73,77 @@ resource "azurerm_subnet" "rendergwsubnet1" { } resource "azurerm_subnet" "rendernodes1" { - name = "RenderNodes" - resource_group_name = azurerm_resource_group.region1rg.name - virtual_network_name = azurerm_virtual_network.vnet1.name - address_prefixes = [local.render_subnet1] + name = "RenderNodes" + resource_group_name = azurerm_resource_group.region1rg.name + virtual_network_name = azurerm_virtual_network.vnet1.name + address_prefixes = [local.render_subnet1] } // the following is only needed if you need to ssh to the controller resource "azurerm_network_security_group" "ssh_nsg1" { - name = "ssh_nsg" - location = azurerm_resource_group.region1rg.location - resource_group_name = azurerm_resource_group.region1rg.name - - security_rule { - name = "ssh" - priority = 120 - direction = "Inbound" - access = "Allow" - protocol = "TCP" - source_port_range = "*" - destination_port_range = "22" - source_address_prefix = "*" - destination_address_prefix = "*" - } + name = "ssh_nsg" + location = azurerm_resource_group.region1rg.location + resource_group_name = azurerm_resource_group.region1rg.name - security_rule { - name = "wireguard" - priority = 121 - direction = "Inbound" - access = "Allow" - protocol = "TCP" - source_port_range = "*" - destination_port_range = "51820" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "ssh" + priority = 120 + direction = "Inbound" + access = "Allow" + protocol = "TCP" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } - security_rule { - name = "allowvnetin" - priority = 500 - direction = "Inbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "VirtualNetwork" - destination_address_prefix = "VirtualNetwork" - } + security_rule { + name = "wireguard" + priority = 121 + direction = "Inbound" + access = "Allow" + protocol = "TCP" + source_port_range = "*" + destination_port_range = "51820" + source_address_prefix = "*" + destination_address_prefix = "*" + } - security_rule { - name = "allowremotein" - priority = 510 - direction = "Inbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = local.address_space2 - destination_address_prefix = "VirtualNetwork" - } + security_rule { + name = "allowvnetin" + priority = 500 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + } - security_rule { - name = "denyallin" - priority = 3000 - direction = "Inbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "allowremotein" + priority = 510 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = local.address_space2 + destination_address_prefix = "VirtualNetwork" + } + + security_rule { + name = "denyallin" + priority = 3000 + direction = "Inbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } } resource "azurerm_subnet_network_security_group_association" "gw1" { @@ -158,10 +158,10 @@ resource "azurerm_subnet_network_security_group_association" "render1" { # wireguard VM resource "azurerm_public_ip" "wg1vm" { - name = "${local.unique_name1}-wg1publicip" - resource_group_name = azurerm_resource_group.region1rg.name - location = azurerm_resource_group.region1rg.location - allocation_method = "Static" + name = "${local.unique_name1}-wg1publicip" + resource_group_name = azurerm_resource_group.region1rg.name + location = azurerm_resource_group.region1rg.location + allocation_method = "Static" } resource "azurerm_network_interface" "wg1vm" { @@ -178,14 +178,14 @@ resource "azurerm_network_interface" "wg1vm" { } resource "azurerm_linux_virtual_machine" "wg1vm" { - name = "${local.unique_name1}-wg1vm" - resource_group_name = azurerm_resource_group.region1rg.name - location = azurerm_resource_group.region1rg.location - network_interface_ids = [azurerm_network_interface.wg1vm.id] - computer_name = "${local.unique_name1}-wg1vm" - custom_data = base64encode(local.wg_cloud_init_file) - size = local.wg_vm_size - + name = "${local.unique_name1}-wg1vm" + resource_group_name = azurerm_resource_group.region1rg.name + location = azurerm_resource_group.region1rg.location + network_interface_ids = [azurerm_network_interface.wg1vm.id] + computer_name = "${local.unique_name1}-wg1vm" + custom_data = base64encode(local.wg_cloud_init_file) + size = local.wg_vm_size + source_image_reference { publisher = "Canonical" offer = "0001-com-ubuntu-server-focal" @@ -201,20 +201,20 @@ resource "azurerm_linux_virtual_machine" "wg1vm" { } // configuration for authentication. If ssh key specified, ignore password - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data== null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } resource "azurerm_virtual_machine_extension" "wg1cse" { - name = "${local.unique_name1}-wg1cse" + name = "${local.unique_name1}-wg1cse" virtual_machine_id = azurerm_linux_virtual_machine.wg1vm.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" @@ -229,10 +229,10 @@ SETTINGS # linux jumpbox resource "azurerm_public_ip" "jb1vm" { - name = "${local.unique_name1}-j1publicip" - resource_group_name = azurerm_resource_group.region1rg.name - location = azurerm_resource_group.region1rg.location - allocation_method = "Static" + name = "${local.unique_name1}-j1publicip" + resource_group_name = azurerm_resource_group.region1rg.name + location = azurerm_resource_group.region1rg.location + allocation_method = "Static" } resource "azurerm_network_interface" "jb1vm" { @@ -249,14 +249,14 @@ resource "azurerm_network_interface" "jb1vm" { } resource "azurerm_linux_virtual_machine" "jb1vm" { - name = "${local.unique_name1}-jb1vm" - resource_group_name = azurerm_resource_group.region1rg.name - location = azurerm_resource_group.region1rg.location - network_interface_ids = [azurerm_network_interface.jb1vm.id] - computer_name = "${local.unique_name1}-jb1vm" - custom_data = base64encode(local.jb_cloud_init_file) - size = local.jb_vm_size - + name = "${local.unique_name1}-jb1vm" + resource_group_name = azurerm_resource_group.region1rg.name + location = azurerm_resource_group.region1rg.location + network_interface_ids = [azurerm_network_interface.jb1vm.id] + computer_name = "${local.unique_name1}-jb1vm" + custom_data = base64encode(local.jb_cloud_init_file) + size = local.jb_vm_size + source_image_reference { publisher = "Canonical" offer = "0001-com-ubuntu-server-focal" @@ -272,20 +272,20 @@ resource "azurerm_linux_virtual_machine" "jb1vm" { } // configuration for authentication. If ssh key specified, ignore password - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data== null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } resource "azurerm_virtual_machine_extension" "jb1cse" { - name = "${local.unique_name1}-jb1cse" + name = "${local.unique_name1}-jb1cse" virtual_machine_id = azurerm_linux_virtual_machine.jb1vm.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" @@ -323,65 +323,65 @@ resource "azurerm_subnet" "rendergwsubnet2" { } resource "azurerm_subnet" "rendernodes2" { - name = "RenderNodes" - virtual_network_name = azurerm_virtual_network.vnet2.name - resource_group_name = azurerm_resource_group.region2rg.name - address_prefixes = [local.render_subnet2] + name = "RenderNodes" + virtual_network_name = azurerm_virtual_network.vnet2.name + resource_group_name = azurerm_resource_group.region2rg.name + address_prefixes = [local.render_subnet2] } // the following is only needed if you need to ssh to the controller resource "azurerm_network_security_group" "ssh_nsg2" { - name = "ssh_nsg" - location = azurerm_resource_group.region2rg.location - resource_group_name = azurerm_resource_group.region2rg.name - - security_rule { - name = "ssh" - priority = 120 - direction = "Inbound" - access = "Allow" - protocol = "TCP" - source_port_range = "*" - destination_port_range = "22" - source_address_prefix = "*" - destination_address_prefix = "*" - } + name = "ssh_nsg" + location = azurerm_resource_group.region2rg.location + resource_group_name = azurerm_resource_group.region2rg.name - security_rule { - name = "wireguard" - priority = 121 - direction = "Inbound" - access = "Allow" - protocol = "TCP" - source_port_range = "*" - destination_port_range = "51820" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "ssh" + priority = 120 + direction = "Inbound" + access = "Allow" + protocol = "TCP" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } - security_rule { - name = "allowremotein" - priority = 510 - direction = "Inbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = local.address_space1 - destination_address_prefix = "VirtualNetwork" - } + security_rule { + name = "wireguard" + priority = 121 + direction = "Inbound" + access = "Allow" + protocol = "TCP" + source_port_range = "*" + destination_port_range = "51820" + source_address_prefix = "*" + destination_address_prefix = "*" + } - security_rule { - name = "denyallin" - priority = 3000 - direction = "Inbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "allowremotein" + priority = 510 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = local.address_space1 + destination_address_prefix = "VirtualNetwork" + } + + security_rule { + name = "denyallin" + priority = 3000 + direction = "Inbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } } resource "azurerm_subnet_network_security_group_association" "gw2" { @@ -396,10 +396,10 @@ resource "azurerm_subnet_network_security_group_association" "render2" { # wireguard VM resource "azurerm_public_ip" "wg2vm" { - name = "${local.unique_name2}-wg1publicip" - resource_group_name = azurerm_resource_group.region2rg.name - location = azurerm_resource_group.region2rg.location - allocation_method = "Static" + name = "${local.unique_name2}-wg1publicip" + resource_group_name = azurerm_resource_group.region2rg.name + location = azurerm_resource_group.region2rg.location + allocation_method = "Static" } resource "azurerm_network_interface" "wg2vm" { @@ -416,14 +416,14 @@ resource "azurerm_network_interface" "wg2vm" { } resource "azurerm_linux_virtual_machine" "wg2vm" { - name = "${local.unique_name2}-wg2vm" - resource_group_name = azurerm_resource_group.region2rg.name - location = azurerm_resource_group.region2rg.location - network_interface_ids = [azurerm_network_interface.wg2vm.id] - computer_name = "${local.unique_name2}-wg2vm" - custom_data = base64encode(local.wg_cloud_init_file) - size = local.wg_vm_size - + name = "${local.unique_name2}-wg2vm" + resource_group_name = azurerm_resource_group.region2rg.name + location = azurerm_resource_group.region2rg.location + network_interface_ids = [azurerm_network_interface.wg2vm.id] + computer_name = "${local.unique_name2}-wg2vm" + custom_data = base64encode(local.wg_cloud_init_file) + size = local.wg_vm_size + source_image_reference { publisher = "Canonical" offer = "0001-com-ubuntu-server-focal" @@ -439,20 +439,20 @@ resource "azurerm_linux_virtual_machine" "wg2vm" { } // configuration for authentication. If ssh key specified, ignore password - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data== null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } resource "azurerm_virtual_machine_extension" "wg2cse" { - name = "${local.unique_name2}-wg1cse" + name = "${local.unique_name2}-wg1cse" virtual_machine_id = azurerm_linux_virtual_machine.wg2vm.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" @@ -467,10 +467,10 @@ SETTINGS # linux jumpbox resource "azurerm_public_ip" "jb2vm" { - name = "${local.unique_name2}-j2publicip" - resource_group_name = azurerm_resource_group.region2rg.name - location = azurerm_resource_group.region2rg.location - allocation_method = "Static" + name = "${local.unique_name2}-j2publicip" + resource_group_name = azurerm_resource_group.region2rg.name + location = azurerm_resource_group.region2rg.location + allocation_method = "Static" } resource "azurerm_network_interface" "jb2vm" { @@ -487,14 +487,14 @@ resource "azurerm_network_interface" "jb2vm" { } resource "azurerm_linux_virtual_machine" "jb2vm" { - name = "${local.unique_name2}-jb2vm" - resource_group_name = azurerm_resource_group.region2rg.name - location = azurerm_resource_group.region2rg.location - network_interface_ids = [azurerm_network_interface.jb2vm.id] - computer_name = "${local.unique_name2}-jb2vm" - custom_data = base64encode(local.jb_cloud_init_file) - size = local.jb_vm_size - + name = "${local.unique_name2}-jb2vm" + resource_group_name = azurerm_resource_group.region2rg.name + location = azurerm_resource_group.region2rg.location + network_interface_ids = [azurerm_network_interface.jb2vm.id] + computer_name = "${local.unique_name2}-jb2vm" + custom_data = base64encode(local.jb_cloud_init_file) + size = local.jb_vm_size + source_image_reference { publisher = "Canonical" offer = "0001-com-ubuntu-server-focal" @@ -510,20 +510,20 @@ resource "azurerm_linux_virtual_machine" "jb2vm" { } // configuration for authentication. If ssh key specified, ignore password - admin_username = local.vm_admin_username - admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null + admin_username = local.vm_admin_username + admin_password = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? local.vm_admin_password : null disable_password_authentication = (local.vm_ssh_key_data == null || local.vm_ssh_key_data == "") && local.vm_admin_password != null && local.vm_admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = local.vm_ssh_key_data== null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] - content { - username = local.vm_admin_username - public_key = local.vm_ssh_key_data - } + for_each = local.vm_ssh_key_data == null || local.vm_ssh_key_data == "" ? [] : [local.vm_ssh_key_data] + content { + username = local.vm_admin_username + public_key = local.vm_ssh_key_data + } } } resource "azurerm_virtual_machine_extension" "jb2cse" { - name = "${local.unique_name2}-jb2cse" + name = "${local.unique_name2}-jb2cse" virtual_machine_id = azurerm_linux_virtual_machine.jb2vm.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" @@ -566,4 +566,4 @@ output "jb2_public_address" { output "jb2_private_address" { value = azurerm_network_interface.jb2vm.ip_configuration[0].private_ip_address -} \ No newline at end of file +} diff --git a/src/terraform/modules/cachewarmer_build/main.tf b/src/terraform/modules/cachewarmer_build/main.tf index 80910e5d0..8813e0653 100644 --- a/src/terraform/modules/cachewarmer_build/main.tf +++ b/src/terraform/modules/cachewarmer_build/main.tf @@ -1,20 +1,20 @@ locals { - mount_dir = "/b" - bootstrap_dir = "bootstrap" + mount_dir = "/b" + bootstrap_dir = "bootstrap" manager_bootstrap_path = "/${local.bootstrap_dir}/bootstrap.cachewarmer-manager.sh" - worker_bootstrap_path = "/${local.bootstrap_dir}/bootstrap.cachewarmer-worker.sh" + worker_bootstrap_path = "/${local.bootstrap_dir}/bootstrap.cachewarmer-worker.sh" } resource "null_resource" "build_cachewarmer_bootstrap" { count = var.deploy_cachewarmer ? 1 : 0 connection { - type = "ssh" - port = var.ssh_port - host = var.node_address - user = var.admin_username - password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password - private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null + type = "ssh" + port = var.ssh_port + host = var.node_address + user = var.admin_username + password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password + private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null } provisioner "remote-exec" { @@ -57,5 +57,3 @@ resource "null_resource" "build_cachewarmer_bootstrap" { depends_on = [var.module_depends_on] } - - diff --git a/src/terraform/modules/cachewarmer_build/outputs.tf b/src/terraform/modules/cachewarmer_build/outputs.tf index 7cdb0e3dc..b52114a8c 100644 --- a/src/terraform/modules/cachewarmer_build/outputs.tf +++ b/src/terraform/modules/cachewarmer_build/outputs.tf @@ -20,5 +20,5 @@ output "cachewarmer_manager_bootstrap_script_path" { output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = null_resource.build_cachewarmer_bootstrap[0].id -} \ No newline at end of file + value = null_resource.build_cachewarmer_bootstrap[0].id +} diff --git a/src/terraform/modules/cachewarmer_build/variables.tf b/src/terraform/modules/cachewarmer_build/variables.tf index edc71bc30..efb58af26 100644 --- a/src/terraform/modules/cachewarmer_build/variables.tf +++ b/src/terraform/modules/cachewarmer_build/variables.tf @@ -1,20 +1,20 @@ variable "deploy_cachewarmer" { description = "specifies to create the cachewarmer or not" - default = true + default = true } variable "node_address" { - description = "The address of controller or jumpbox" + description = "The address of controller or jumpbox" } variable "admin_username" { description = "Admin username on the controller or jumpbox" - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the controller or jumpbox. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { @@ -23,18 +23,18 @@ variable "ssh_key_data" { variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "bootstrap_mount_address" { - description = "the mount address that hosts the worker bootstrap script" + description = "the mount address that hosts the worker bootstrap script" } variable "bootstrap_export_path" { - description = "the export path that hosts the worker bootstrap script" + description = "the export path that hosts the worker bootstrap script" } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +} diff --git a/src/terraform/modules/cachewarmer_manager_install/main.tf b/src/terraform/modules/cachewarmer_manager_install/main.tf index 9d38aacac..9d38519fa 100644 --- a/src/terraform/modules/cachewarmer_manager_install/main.tf +++ b/src/terraform/modules/cachewarmer_manager_install/main.tf @@ -1,25 +1,25 @@ locals { - mount_dir = "/b" - bootstrap_dir = "bootstrap" - vmss_password_str = var.vmss_password == null ? "" : var.vmss_password - vmss_ssh_public_key_str = var.vmss_ssh_public_key == null ? "" : var.vmss_ssh_public_key - vmss_subnet_name_str = var.vmss_subnet_name == null ? "" : var.vmss_subnet_name - manager_bootstrap_path= "/${local.bootstrap_dir}/bootstrap.cachewarmer-manager.sh" - env_vars = "export BOOTSTRAP_PATH=${local.mount_dir} && export STORAGE_ACCOUNT=${var.storage_account} && export STORAGE_KEY='${var.storage_key}' && export QUEUE_PREFIX=${var.queue_name_prefix} && export BOOTSTRAP_EXPORT_PATH=${var.bootstrap_export_path} && export BOOTSTRAP_MOUNT_ADDRESS=${var.bootstrap_mount_address} && export BOOTSTRAP_SCRIPT=${var.bootstrap_worker_script_path} && export VMSS_USERNAME=${var.vmss_user_name} && export VMSS_SSHPUBLICKEY='${local.vmss_ssh_public_key_str}' && export VMSS_PASSWORD='${local.vmss_password_str}' && export VMSS_SUBNET=${local.vmss_subnet_name_str}" + mount_dir = "/b" + bootstrap_dir = "bootstrap" + vmss_password_str = var.vmss_password == null ? "" : var.vmss_password + vmss_ssh_public_key_str = var.vmss_ssh_public_key == null ? "" : var.vmss_ssh_public_key + vmss_subnet_name_str = var.vmss_subnet_name == null ? "" : var.vmss_subnet_name + manager_bootstrap_path = "/${local.bootstrap_dir}/bootstrap.cachewarmer-manager.sh" + env_vars = "export BOOTSTRAP_PATH=${local.mount_dir} && export STORAGE_ACCOUNT=${var.storage_account} && export STORAGE_KEY='${var.storage_key}' && export QUEUE_PREFIX=${var.queue_name_prefix} && export BOOTSTRAP_EXPORT_PATH=${var.bootstrap_export_path} && export BOOTSTRAP_MOUNT_ADDRESS=${var.bootstrap_mount_address} && export BOOTSTRAP_SCRIPT=${var.bootstrap_worker_script_path} && export VMSS_USERNAME=${var.vmss_user_name} && export VMSS_SSHPUBLICKEY='${local.vmss_ssh_public_key_str}' && export VMSS_PASSWORD='${local.vmss_password_str}' && export VMSS_SUBNET=${local.vmss_subnet_name_str}" } resource "null_resource" "install_cachewarmer_manager" { count = var.deploy_cachewarmer ? 1 : 0 - + connection { - type = "ssh" - port = var.ssh_port - host = var.node_address - user = var.admin_username - password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password - private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null + type = "ssh" + port = var.ssh_port + host = var.node_address + user = var.admin_username + password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password + private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null } - + provisioner "remote-exec" { inline = [ "set -x", diff --git a/src/terraform/modules/cachewarmer_manager_install/outputs.tf b/src/terraform/modules/cachewarmer_manager_install/outputs.tf index f361d4e1c..695697399 100644 --- a/src/terraform/modules/cachewarmer_manager_install/outputs.tf +++ b/src/terraform/modules/cachewarmer_manager_install/outputs.tf @@ -1,4 +1,4 @@ output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = null_resource.install_cachewarmer_manager[0].id -} \ No newline at end of file + value = null_resource.install_cachewarmer_manager[0].id +} diff --git a/src/terraform/modules/cachewarmer_manager_install/variables.tf b/src/terraform/modules/cachewarmer_manager_install/variables.tf index e5c4ca3e6..1c8b68f4a 100644 --- a/src/terraform/modules/cachewarmer_manager_install/variables.tf +++ b/src/terraform/modules/cachewarmer_manager_install/variables.tf @@ -1,20 +1,20 @@ variable "deploy_cachewarmer" { description = "specifies to create the cachewarmer or not" - default = true + default = true } variable "node_address" { - description = "The address of controller or jumpbox" + description = "The address of controller or jumpbox" } variable "admin_username" { description = "Admin username on the controller or jumpbox" - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the controller or jumpbox. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { @@ -23,59 +23,59 @@ variable "ssh_key_data" { variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "bootstrap_mount_address" { - description = "the mount address that hosts the manager and worker bootstrap script" + description = "the mount address that hosts the manager and worker bootstrap script" } variable "bootstrap_export_path" { - description = "the export path that hosts the manager and worker bootstrap script" + description = "the export path that hosts the manager and worker bootstrap script" } variable "bootstrap_manager_script_path" { - description = "the script path that hosts the manager bootstrap script" + description = "the script path that hosts the manager bootstrap script" } variable "bootstrap_worker_script_path" { - description = "the script path that hosts the manager bootstrap script" + description = "the script path that hosts the manager bootstrap script" } variable "storage_account" { - description = "the storage account holding the queue" + description = "the storage account holding the queue" } variable "storage_key" { - description = "the storage key" + description = "the storage key" } variable "queue_name_prefix" { - description = "the queue name prefix for the job management" + description = "the queue name prefix for the job management" } variable "vmss_user_name" { - description = "(optional) the username for the vmss vms" - default = "azureuser" + description = "(optional) the username for the vmss vms" + default = "azureuser" } variable "vmss_password" { - description = "(optional) the password for the vmss vms, this is unused if the public key is specified" - default = null + description = "(optional) the password for the vmss vms, this is unused if the public key is specified" + default = null } variable "vmss_ssh_public_key" { - description = "(optional) the ssh public key for the vmss vms, this will be used by default, however if this is blank, the password will be used" - default = null + description = "(optional) the ssh public key for the vmss vms, this will be used by default, however if this is blank, the password will be used" + default = null } variable "vmss_subnet_name" { - description = "(optional) the subnet to use for the VMSS, if not specified use the same subnet as the controller" - default = null + description = "(optional) the subnet to use for the VMSS, if not specified use the same subnet as the controller" + default = null } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" } diff --git a/src/terraform/modules/cachewarmer_submitjob/main.tf b/src/terraform/modules/cachewarmer_submitjob/main.tf index 187f3d696..d85c00f23 100644 --- a/src/terraform/modules/cachewarmer_submitjob/main.tf +++ b/src/terraform/modules/cachewarmer_submitjob/main.tf @@ -1,20 +1,20 @@ locals { - mount_dir = "/b" + mount_dir = "/b" block_flag = var.block_until_warm ? " -blockUntilWarm " : "" } resource "null_resource" "cachewarmer_submitjob" { count = var.deploy_cachewarmer ? 1 : 0 - + connection { - type = "ssh" - port = var.ssh_port - host = var.node_address - user = var.admin_username - password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password - private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null + type = "ssh" + port = var.ssh_port + host = var.node_address + user = var.admin_username + password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password + private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null } - + provisioner "remote-exec" { inline = [ "set -x", diff --git a/src/terraform/modules/cachewarmer_submitjob/outputs.tf b/src/terraform/modules/cachewarmer_submitjob/outputs.tf index 9da1a830c..a8c26e0a4 100644 --- a/src/terraform/modules/cachewarmer_submitjob/outputs.tf +++ b/src/terraform/modules/cachewarmer_submitjob/outputs.tf @@ -1,4 +1,4 @@ output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = null_resource.cachewarmer_submitjob[0].id -} \ No newline at end of file + value = null_resource.cachewarmer_submitjob[0].id +} diff --git a/src/terraform/modules/cachewarmer_submitjob/variables.tf b/src/terraform/modules/cachewarmer_submitjob/variables.tf index e788c2597..1098be1ab 100644 --- a/src/terraform/modules/cachewarmer_submitjob/variables.tf +++ b/src/terraform/modules/cachewarmer_submitjob/variables.tf @@ -1,20 +1,20 @@ variable "deploy_cachewarmer" { description = "specifies to create the cachewarmer or not" - default = true + default = true } variable "node_address" { - description = "The address of controller or jumpbox" + description = "The address of controller or jumpbox" } variable "admin_username" { description = "Admin username on the controller or jumpbox" - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the controller or jumpbox. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { @@ -23,39 +23,39 @@ variable "ssh_key_data" { variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "storage_account" { - description = "the storage account holding the queue" + description = "the storage account holding the queue" } variable "storage_key" { - description = "the storage key" + description = "the storage key" } variable "queue_name_prefix" { - description = "the queue name prefix for the job management" + description = "the queue name prefix for the job management" } variable "warm_mount_addresses" { - description = "the warm target cache filer mount addresses separated by comma" + description = "the warm target cache filer mount addresses separated by comma" } variable "warm_target_export_path" { - description = "the warm target export path" + description = "the warm target export path" } variable "warm_target_path" { - description = "the target path to warm" + description = "the target path to warm" } variable "block_until_warm" { description = "block the operation until the cache warming has finished" - default = true + default = true } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +} diff --git a/src/terraform/modules/cachewarmer_submitmultiplejobs/main.tf b/src/terraform/modules/cachewarmer_submitmultiplejobs/main.tf index 7977e85bc..f89e9eaf4 100644 --- a/src/terraform/modules/cachewarmer_submitmultiplejobs/main.tf +++ b/src/terraform/modules/cachewarmer_submitmultiplejobs/main.tf @@ -1,22 +1,22 @@ locals { - mount_dir = "/b" - block_flag = " -blockUntilWarm " - no_block_flag = "" - warm_paths_array = [for i, z in var.warm_paths: setproduct([i], z)][0] + mount_dir = "/b" + block_flag = " -blockUntilWarm " + no_block_flag = "" + warm_paths_array = [for i, z in var.warm_paths : setproduct([i], z)][0] } resource "null_resource" "cachewarmer_submitmultiplejobs" { count = length(local.warm_paths_array) connection { - type = "ssh" - port = var.ssh_port - host = var.node_address - user = var.admin_username - password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password - private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null + type = "ssh" + port = var.ssh_port + host = var.node_address + user = var.admin_username + password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password + private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null } - + provisioner "remote-exec" { inline = [ "set -x", diff --git a/src/terraform/modules/cachewarmer_submitmultiplejobs/outputs.tf b/src/terraform/modules/cachewarmer_submitmultiplejobs/outputs.tf index c133c5424..941b2a0a0 100644 --- a/src/terraform/modules/cachewarmer_submitmultiplejobs/outputs.tf +++ b/src/terraform/modules/cachewarmer_submitmultiplejobs/outputs.tf @@ -1,4 +1,4 @@ output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = null_resource.cachewarmer_submitmultiplejobs[0].id -} \ No newline at end of file + value = null_resource.cachewarmer_submitmultiplejobs[0].id +} diff --git a/src/terraform/modules/cachewarmer_submitmultiplejobs/variables.tf b/src/terraform/modules/cachewarmer_submitmultiplejobs/variables.tf index df6e7bab9..5f538fa40 100644 --- a/src/terraform/modules/cachewarmer_submitmultiplejobs/variables.tf +++ b/src/terraform/modules/cachewarmer_submitmultiplejobs/variables.tf @@ -1,15 +1,15 @@ variable "node_address" { - description = "The address of controller or jumpbox" + description = "The address of controller or jumpbox" } variable "admin_username" { description = "Admin username on the controller or jumpbox" - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the controller or jumpbox. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { @@ -18,36 +18,36 @@ variable "ssh_key_data" { variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "storage_account" { - description = "the storage account holding the queue" + description = "the storage account holding the queue" } variable "storage_key" { - description = "the storage key" + description = "the storage key" } variable "queue_name_prefix" { - description = "the queue name prefix for the job management" + description = "the queue name prefix for the job management" } variable "warm_mount_addresses" { - description = "the warm target cache filer mount addresses separated by comma" + description = "the warm target cache filer mount addresses separated by comma" } variable "warm_paths" { - description = "the export and target paths to warm, separated by ':'" - default = {} + description = "the export and target paths to warm, separated by ':'" + default = {} } variable "block_until_warm" { description = "block the operation until the cache warming has finished" - default = true + default = true } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +} diff --git a/src/terraform/modules/centosgridgpu/main.tf b/src/terraform/modules/centosgridgpu/main.tf index 69ad5673b..f110b3644 100644 --- a/src/terraform/modules/centosgridgpu/main.tf +++ b/src/terraform/modules/centosgridgpu/main.tf @@ -1,6 +1,6 @@ locals { # send the script file to custom data, adding env vars - script_file_b64 = base64gzip(replace(file("${path.module}/install.sh"),"\r","")) + script_file_b64 = base64gzip(replace(file("${path.module}/install.sh"), "\r", "")) cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { installcmd = local.script_file_b64, ssh_port = var.ssh_port }) } @@ -15,7 +15,7 @@ data "azurerm_subnet" "vnet" { data "azurerm_subscription" "primary" {} data "azurerm_resource_group" "vm" { - name = var.resource_group_name + name = var.resource_group_name depends_on = [var.module_depends_on] } @@ -34,17 +34,17 @@ resource "azurerm_network_interface" "vm" { } resource "azurerm_linux_virtual_machine" "vm" { - name = "${var.unique_name}-vm" - location = var.location - resource_group_name = data.azurerm_resource_group.vm.name + name = "${var.unique_name}-vm" + location = var.location + resource_group_name = data.azurerm_resource_group.vm.name network_interface_ids = [azurerm_network_interface.vm.id] - computer_name = var.unique_name - custom_data = base64encode(local.cloud_init_file) - size = var.vm_size + computer_name = var.unique_name + custom_data = base64encode(local.cloud_init_file) + size = var.vm_size os_disk { - name = "${var.unique_name}-osdisk" - caching = "ReadWrite" + name = "${var.unique_name}-osdisk" + caching = "ReadWrite" storage_account_type = "Standard_LRS" } @@ -60,20 +60,20 @@ resource "azurerm_linux_virtual_machine" "vm" { } } - admin_username = var.admin_username - admin_password = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? var.admin_password : null + admin_username = var.admin_username + admin_password = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? var.admin_password : null disable_password_authentication = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = var.ssh_key_data == null || var.ssh_key_data == "" ? [] : [var.ssh_key_data] - content { - username = var.admin_username - public_key = var.ssh_key_data - } + for_each = var.ssh_key_data == null || var.ssh_key_data == "" ? [] : [var.ssh_key_data] + content { + username = var.admin_username + public_key = var.ssh_key_data + } } } resource "azurerm_virtual_machine_extension" "cse" { - name = "${var.unique_name}-cse" + name = "${var.unique_name}-cse" virtual_machine_id = azurerm_linux_virtual_machine.vm.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" diff --git a/src/terraform/modules/centosgridgpu/outputs.tf b/src/terraform/modules/centosgridgpu/outputs.tf index 84b08d51a..7f22e24a4 100644 --- a/src/terraform/modules/centosgridgpu/outputs.tf +++ b/src/terraform/modules/centosgridgpu/outputs.tf @@ -1,12 +1,12 @@ output "address" { - value = "${azurerm_network_interface.vm.ip_configuration[0].private_ip_address}" + value = azurerm_network_interface.vm.ip_configuration[0].private_ip_address } output "username" { - value = "${var.admin_username}" + value = var.admin_username } output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = azurerm_virtual_machine_extension.cse.id -} \ No newline at end of file + value = azurerm_virtual_machine_extension.cse.id +} diff --git a/src/terraform/modules/centosgridgpu/variables.tf b/src/terraform/modules/centosgridgpu/variables.tf index 74d60cbf1..41c9a6a9b 100644 --- a/src/terraform/modules/centosgridgpu/variables.tf +++ b/src/terraform/modules/centosgridgpu/variables.tf @@ -3,17 +3,17 @@ variable "resource_group_name" { } variable "location" { - description = "The Azure Region into which the dnsserver will be created." + description = "The Azure Region into which the dnsserver will be created." } variable "admin_username" { description = "Admin username on the dnsserver." - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the dnsserver. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { @@ -22,17 +22,17 @@ variable "ssh_key_data" { variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "unique_name" { description = "The unique name used for the dnsserver and for resource names associated with the VM." - default = "centosgridgpu" + default = "centosgridgpu" } variable "vm_size" { description = "Size of the VM." - default = "Standard_NV6" + default = "Standard_NV6" } variable "virtual_network_resource_group" { @@ -49,30 +49,30 @@ variable "virtual_network_subnet_name" { variable "private_ip_address" { description = "specifies a static private ip address to use" - default = null + default = null } variable "image_id" { description = "specifies a custom image id, if not use marketplace" - default = null + default = null } variable "install_pcoip" { description = "specifies true or false to install pcoip" - default = true + default = true } variable "search_domain" { description = "specifies a default search domain to use, if more than one separate by spaces" - default = "" + default = "" } variable "teradici_license_key" { description = "specifies the teradici pcoipagent license key" - default = "" + default = "" } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +} diff --git a/src/terraform/modules/controller3/main.tf b/src/terraform/modules/controller3/main.tf index d6f19c36d..1091778ca 100644 --- a/src/terraform/modules/controller3/main.tf +++ b/src/terraform/modules/controller3/main.tf @@ -9,8 +9,8 @@ data "azurerm_subnet" "vnet" { data "azurerm_subscription" "primary" {} locals { - msazure_patchidentity_file_b64 = base64gzip(replace(file("${path.module}/msazure.py.patchidentity"),"\r","")) - vfxtpy_patchzone_file_b64 = base64gzip(replace(file("${path.module}/vfxt.py.patchzone"),"\r","")) + msazure_patchidentity_file_b64 = base64gzip(replace(file("${path.module}/msazure.py.patchidentity"), "\r", "")) + vfxtpy_patchzone_file_b64 = base64gzip(replace(file("${path.module}/vfxt.py.patchzone"), "\r", "")) # send the script file to custom data, adding env vars cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { vfxtpy_patchzone = local.vfxtpy_patchzone_file_b64, msazure_patchidentity = local.msazure_patchidentity_file_b64, ssh_port = var.ssh_port }) # the roles assigned to the controller managed identity principal @@ -23,17 +23,17 @@ locals { create_compute_role = "Virtual Machine Contributor" # publisher / offer / sku - image_parts = var.image_id == null ? [] : split(":", var.image_id) + image_parts = var.image_id == null ? [] : split(":", var.image_id) is_custom_image = var.image_id == null ? false : (length(local.image_parts) < 4 && length(var.image_id) > 0) - publisher = length(local.image_parts) >= 4 ? local.image_parts[0] : "microsoft-avere" - offer = length(local.image_parts) >= 4 ? local.image_parts[1] : "vfxt" - sku = length(local.image_parts) >= 4 ? local.image_parts[2] : "avere-vfxt-controller" - version = length(local.image_parts) >= 4 ? local.image_parts[3] : "latest" + publisher = length(local.image_parts) >= 4 ? local.image_parts[0] : "microsoft-avere" + offer = length(local.image_parts) >= 4 ? local.image_parts[1] : "vfxt" + sku = length(local.image_parts) >= 4 ? local.image_parts[2] : "avere-vfxt-controller" + version = length(local.image_parts) >= 4 ? local.image_parts[3] : "latest" # the plan details are the same for all marketplace controller images - plan_name = "avere-vfxt-controller" + plan_name = "avere-vfxt-controller" plan_publisher = "microsoft-avere" - plan_product = "vfxt" + plan_product = "vfxt" } resource "azurerm_resource_group" "vm" { @@ -56,16 +56,16 @@ data "azurerm_resource_group" "vm" { } resource "azurerm_public_ip" "vm" { - name = "${var.unique_name}-publicip" - location = var.location - resource_group_name = var.create_resource_group ? azurerm_resource_group.vm[0].name : data.azurerm_resource_group.vm[0].name - allocation_method = "Static" + name = "${var.unique_name}-publicip" + location = var.location + resource_group_name = var.create_resource_group ? azurerm_resource_group.vm[0].name : data.azurerm_resource_group.vm[0].name + allocation_method = "Static" - tags = var.tags + tags = var.tags - count = var.deploy_controller && var.add_public_ip ? 1 : 0 + count = var.deploy_controller && var.add_public_ip ? 1 : 0 - depends_on = [var.module_depends_on] + depends_on = [var.module_depends_on] } resource "azurerm_network_interface" "vm" { @@ -88,23 +88,23 @@ resource "azurerm_network_interface" "vm" { } resource "azurerm_linux_virtual_machine" "vm" { - name = "${var.unique_name}-vm" - location = var.location - resource_group_name = var.create_resource_group ? azurerm_resource_group.vm[0].name : data.azurerm_resource_group.vm[0].name + name = "${var.unique_name}-vm" + location = var.location + resource_group_name = var.create_resource_group ? azurerm_resource_group.vm[0].name : data.azurerm_resource_group.vm[0].name network_interface_ids = [azurerm_network_interface.vm[0].id] - computer_name = var.unique_name - custom_data = var.apply_patch ? base64encode(local.cloud_init_file) : null - size = var.vm_size - source_image_id = local.is_custom_image ? var.image_id : null - + computer_name = var.unique_name + custom_data = var.apply_patch ? base64encode(local.cloud_init_file) : null + size = var.vm_size + source_image_id = local.is_custom_image ? var.image_id : null + identity { - type = var.user_assigned_managed_identity_id == null ? "SystemAssigned" : "UserAssigned" + type = var.user_assigned_managed_identity_id == null ? "SystemAssigned" : "UserAssigned" identity_ids = var.user_assigned_managed_identity_id == null ? [] : [var.user_assigned_managed_identity_id] } os_disk { - name = "${var.unique_name}-osdisk" - caching = "ReadWrite" + name = "${var.unique_name}-osdisk" + caching = "ReadWrite" storage_account_type = "Standard_LRS" } @@ -121,21 +121,21 @@ resource "azurerm_linux_virtual_machine" "vm" { dynamic "plan" { for_each = local.is_custom_image ? [] : ["microsoft-avere"] content { - name = local.plan_name + name = local.plan_name publisher = local.plan_publisher - product = local.plan_product + product = local.plan_product } } - admin_username = var.admin_username - admin_password = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? var.admin_password : null + admin_username = var.admin_username + admin_password = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? var.admin_password : null disable_password_authentication = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = var.ssh_key_data == null || var.ssh_key_data == "" ? [] : [var.ssh_key_data] - content { - username = var.admin_username - public_key = var.ssh_key_data - } + for_each = var.ssh_key_data == null || var.ssh_key_data == "" ? [] : [var.ssh_key_data] + content { + username = var.admin_username + public_key = var.ssh_key_data + } } count = var.deploy_controller ? 1 : 0 @@ -150,8 +150,8 @@ locals { var.resource_group_name, var.virtual_network_resource_group, ], - var.alternative_resource_groups)) - + var.alternative_resource_groups)) + user_access_rgs = var.user_assigned_managed_identity_id != null ? [] : distinct( [ var.resource_group_name, diff --git a/src/terraform/modules/controller3/outputs.tf b/src/terraform/modules/controller3/outputs.tf index d054e2ae4..55335ae54 100644 --- a/src/terraform/modules/controller3/outputs.tf +++ b/src/terraform/modules/controller3/outputs.tf @@ -1,12 +1,12 @@ output "controller_address" { - value = "${var.deploy_controller ? (var.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm[0].ip_configuration[0].private_ip_address) : ""}" + value = var.deploy_controller ? (var.add_public_ip ? azurerm_public_ip.vm[0].ip_address : azurerm_network_interface.vm[0].ip_configuration[0].private_ip_address) : "" } output "controller_username" { - value = "${var.admin_username}" + value = var.admin_username } output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = var.deploy_controller ? (var.user_assigned_managed_identity_id != null ? azurerm_linux_virtual_machine.vm[0].id : azurerm_role_assignment.create_compute[0].id) : data.azurerm_subnet.vnet.id -} \ No newline at end of file + value = var.deploy_controller ? (var.user_assigned_managed_identity_id != null ? azurerm_linux_virtual_machine.vm[0].id : azurerm_role_assignment.create_compute[0].id) : data.azurerm_subnet.vnet.id +} diff --git a/src/terraform/modules/controller3/variables.tf b/src/terraform/modules/controller3/variables.tf index 6c6728213..e9b14e09a 100644 --- a/src/terraform/modules/controller3/variables.tf +++ b/src/terraform/modules/controller3/variables.tf @@ -1,11 +1,11 @@ variable "deploy_controller" { description = "specifies to create the controller or not" - default = true + default = true } variable "create_resource_group" { description = "specifies to create the resource group" - default = true + default = true } variable "resource_group_name" { @@ -13,17 +13,17 @@ variable "resource_group_name" { } variable "location" { - description = "The Azure Region into which the controller will be created." + description = "The Azure Region into which the controller will be created." } variable "admin_username" { description = "Admin username on the controller." - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the controller. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { @@ -32,17 +32,17 @@ variable "ssh_key_data" { variable "unique_name" { description = "The unique name used for the controller and for resource names associated with the VM." - default = "controller" + default = "controller" } variable "vm_size" { description = "Size of the VM." - default = "Standard_A1_v2" + default = "Standard_A1_v2" } variable "user_assigned_managed_identity_id" { description = "Use this managed identity over system assigned identity." - default = null + default = null } variable "virtual_network_resource_group" { @@ -59,35 +59,35 @@ variable "virtual_network_subnet_name" { variable "add_public_ip" { description = "specifies if the controller should have a publice ip" - default = false + default = false } variable "image_id" { description = "specifies a custom image id if not use marketplace" - default = null + default = null } variable "alternative_resource_groups" { description = "specifies alternative resource groups including those containing custom images or storage accounts" - default = [] + default = [] } variable "apply_patch" { description = "specifies if the controller should have a publice ip" - default = true + default = true } variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "tags" { description = "specifies key value pairs of tags" - default = null + default = null } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +} diff --git a/src/terraform/modules/dnsserver/main.tf b/src/terraform/modules/dnsserver/main.tf index b7018038e..f3174cea1 100644 --- a/src/terraform/modules/dnsserver/main.tf +++ b/src/terraform/modules/dnsserver/main.tf @@ -1,128 +1,128 @@ locals { - local_list_a_records = length(var.avere_address_list) == 0 ? [] : [for i in range(length(var.avere_address_list)): "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${var.avere_address_list[i]}\""] - local_list_a_records_reverse = length(var.avere_address_list) == 0 ? [] : [for i in range(length(var.avere_address_list)): "local-data-ptr: \"${var.avere_address_list[i]} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] + local_list_a_records = length(var.avere_address_list) == 0 ? [] : [for i in range(length(var.avere_address_list)) : "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${var.avere_address_list[i]}\""] + local_list_a_records_reverse = length(var.avere_address_list) == 0 ? [] : [for i in range(length(var.avere_address_list)) : "local-data-ptr: \"${var.avere_address_list[i]} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] # alternate fqdn - local_alternate_list_a_records = length(var.avere_address_list) == 0 ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(length(var.avere_address_list)): - "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${var.avere_address_list[j]}\"" - ] + local_alternate_list_a_records = length(var.avere_address_list) == 0 ? [] : flatten([ + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(length(var.avere_address_list)) : + "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${var.avere_address_list[j]}\"" + ] ]) # reverse records local_alternate_list_a_records_reverse = length(var.avere_address_list) == 0 ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(length(var.avere_address_list)): - "local-data-ptr: \"${var.avere_address_list[j]} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(length(var.avere_address_list)) : + "local-data-ptr: \"${var.avere_address_list[j]} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" + ] ]) # create the A record lines for the first Avere - last_octet = var.avere_first_ip_addr == "" ? "" : split(".", var.avere_first_ip_addr)[3] + last_octet = var.avere_first_ip_addr == "" ? "" : split(".", var.avere_first_ip_addr)[3] addr_prefix = var.avere_first_ip_addr == "" ? "" : trimsuffix(var.avere_first_ip_addr, ".${local.last_octet}") # technique from article: https://forum.netgate.com/topic/120486/round-robin-for-dns-forwarder-network-address/3 - local_a_records = var.avere_first_ip_addr == "" ? [] : [for i in range(var.avere_ip_addr_count): "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix}.${local.last_octet + i}\""] - local_a_records_reverse = var.avere_first_ip_addr == "" ? [] : [for i in range(var.avere_ip_addr_count): "local-data-ptr: \"${local.addr_prefix}.${local.last_octet + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] - + local_a_records = var.avere_first_ip_addr == "" ? [] : [for i in range(var.avere_ip_addr_count) : "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix}.${local.last_octet + i}\""] + local_a_records_reverse = var.avere_first_ip_addr == "" ? [] : [for i in range(var.avere_ip_addr_count) : "local-data-ptr: \"${local.addr_prefix}.${local.last_octet + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] + # alternate fqdn - local_alternate_a_records = var.avere_first_ip_addr == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count): - "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix}.${local.last_octet + j}\"" - ] + local_alternate_a_records = var.avere_first_ip_addr == "" ? [] : flatten([ + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count) : + "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix}.${local.last_octet + j}\"" + ] ]) # reverse records local_alternate_a_records_reverse = var.avere_first_ip_addr == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count): - "local-data-ptr: \"${local.addr_prefix}.${local.last_octet + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count) : + "local-data-ptr: \"${local.addr_prefix}.${local.last_octet + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" + ] ]) # create the A record lines for the second Avere - last_octet2 = var.avere_first_ip_addr2 == "" ? "" : split(".", var.avere_first_ip_addr2)[3] + last_octet2 = var.avere_first_ip_addr2 == "" ? "" : split(".", var.avere_first_ip_addr2)[3] addr_prefix2 = var.avere_first_ip_addr2 == "" ? "" : trimsuffix(var.avere_first_ip_addr2, ".${local.last_octet2}") - local_a_records2 = var.avere_first_ip_addr2 == "" ? [] : [for i in range(var.avere_ip_addr_count2): "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix2}.${local.last_octet2 + i}\""] - local_a_records_reverse2 = var.avere_first_ip_addr2 == "" ? [] : [for i in range(var.avere_ip_addr_count2): "local-data-ptr: \"${local.addr_prefix2}.${local.last_octet2 + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] + local_a_records2 = var.avere_first_ip_addr2 == "" ? [] : [for i in range(var.avere_ip_addr_count2) : "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix2}.${local.last_octet2 + i}\""] + local_a_records_reverse2 = var.avere_first_ip_addr2 == "" ? [] : [for i in range(var.avere_ip_addr_count2) : "local-data-ptr: \"${local.addr_prefix2}.${local.last_octet2 + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] # alternate fqdn local_alternate_a_records2 = var.avere_first_ip_addr2 == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count2): - "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix2}.${local.last_octet2 + j}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count2) : + "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix2}.${local.last_octet2 + j}\"" + ] ]) # reverse records local_alternate_a_records_reverse2 = var.avere_first_ip_addr2 == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count2): - "local-data-ptr: \"${local.addr_prefix2}.${local.last_octet2 + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count2) : + "local-data-ptr: \"${local.addr_prefix2}.${local.last_octet2 + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" + ] ]) # create the A record lines for the third Avere - last_octet3 = var.avere_first_ip_addr3 == "" ? "" : split(".", var.avere_first_ip_addr3)[3] + last_octet3 = var.avere_first_ip_addr3 == "" ? "" : split(".", var.avere_first_ip_addr3)[3] addr_prefix3 = var.avere_first_ip_addr3 == "" ? "" : trimsuffix(var.avere_first_ip_addr3, ".${local.last_octet3}") - local_a_records3 = var.avere_first_ip_addr3 == "" ? [] : [for i in range(var.avere_ip_addr_count3): "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix3}.${local.last_octet3 + i}\""] - local_a_records_reverse3 = var.avere_first_ip_addr3 == "" ? [] : [for i in range(var.avere_ip_addr_count3): "local-data-ptr: \"${local.addr_prefix3}.${local.last_octet3 + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] + local_a_records3 = var.avere_first_ip_addr3 == "" ? [] : [for i in range(var.avere_ip_addr_count3) : "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix3}.${local.last_octet3 + i}\""] + local_a_records_reverse3 = var.avere_first_ip_addr3 == "" ? [] : [for i in range(var.avere_ip_addr_count3) : "local-data-ptr: \"${local.addr_prefix3}.${local.last_octet3 + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] # alternate fqdn local_alternate_a_records3 = var.avere_first_ip_addr3 == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count3): - "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix3}.${local.last_octet3 + j}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count3) : + "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix3}.${local.last_octet3 + j}\"" + ] ]) # reverse records local_alternate_a_records_reverse3 = var.avere_first_ip_addr3 == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count3): - "local-data-ptr: \"${local.addr_prefix3}.${local.last_octet3 + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count3) : + "local-data-ptr: \"${local.addr_prefix3}.${local.last_octet3 + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" + ] ]) - # create the A record lines for the fourth Avere - last_octet4 = var.avere_first_ip_addr4 == "" ? "" : split(".", var.avere_first_ip_addr4)[3] + # create the A record lines for the fourth Avere + last_octet4 = var.avere_first_ip_addr4 == "" ? "" : split(".", var.avere_first_ip_addr4)[3] addr_prefix4 = var.avere_first_ip_addr4 == "" ? "" : trimsuffix(var.avere_first_ip_addr4, ".${local.last_octet4}") - local_a_records4 = var.avere_first_ip_addr4 == "" ? [] : [for i in range(var.avere_ip_addr_count4): "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix4}.${local.last_octet4 + i}\""] - local_a_records_reverse4 = var.avere_first_ip_addr4 == "" ? [] : [for i in range(var.avere_ip_addr_count4): "local-data-ptr: \"${local.addr_prefix4}.${local.last_octet4 + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] + local_a_records4 = var.avere_first_ip_addr4 == "" ? [] : [for i in range(var.avere_ip_addr_count4) : "local-data: \"${var.avere_filer_fqdn} ${var.dns_max_ttl_seconds} A ${local.addr_prefix4}.${local.last_octet4 + i}\""] + local_a_records_reverse4 = var.avere_first_ip_addr4 == "" ? [] : [for i in range(var.avere_ip_addr_count4) : "local-data-ptr: \"${local.addr_prefix4}.${local.last_octet4 + i} ${var.dns_max_ttl_seconds} ${var.avere_filer_fqdn}\""] # alternate fqdn local_alternate_a_records4 = var.avere_first_ip_addr4 == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count4): - "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix4}.${local.last_octet4 + j}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count4) : + "local-data: \"${var.avere_filer_alternate_fqdn[i]} ${var.dns_max_ttl_seconds} A ${local.addr_prefix4}.${local.last_octet4 + j}\"" + ] ]) # reverse records local_alternate_a_records_reverse4 = var.avere_first_ip_addr4 == "" ? [] : flatten([ - for i in range(length(var.avere_filer_alternate_fqdn)): [ - for j in range(var.avere_ip_addr_count4): - "local-data-ptr: \"${local.addr_prefix4}.${local.last_octet4 + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" - ] + for i in range(length(var.avere_filer_alternate_fqdn)) : [ + for j in range(var.avere_ip_addr_count4) : + "local-data-ptr: \"${local.addr_prefix4}.${local.last_octet4 + j} ${var.dns_max_ttl_seconds} ${var.avere_filer_alternate_fqdn[i]}\"" + ] ]) - + # join everything into the same string - all_a_records = concat(local.local_list_a_records, local.local_list_a_records_reverse, local.local_alternate_list_a_records, local.local_alternate_list_a_records_reverse, local.local_a_records, local.local_a_records_reverse, local.local_alternate_a_records, local.local_alternate_a_records_reverse, local.local_a_records2, local.local_a_records_reverse2, local.local_alternate_a_records2, local.local_alternate_a_records_reverse2, local.local_a_records3, local.local_a_records_reverse3, local.local_alternate_a_records3, local.local_alternate_a_records_reverse3, local.local_a_records4, local.local_a_records_reverse4, local.local_alternate_a_records4, local.local_alternate_a_records_reverse4) + all_a_records = concat(local.local_list_a_records, local.local_list_a_records_reverse, local.local_alternate_list_a_records, local.local_alternate_list_a_records_reverse, local.local_a_records, local.local_a_records_reverse, local.local_alternate_a_records, local.local_alternate_a_records_reverse, local.local_a_records2, local.local_a_records_reverse2, local.local_alternate_a_records2, local.local_alternate_a_records_reverse2, local.local_a_records3, local.local_a_records_reverse3, local.local_alternate_a_records3, local.local_alternate_a_records_reverse3, local.local_a_records4, local.local_a_records_reverse4, local.local_alternate_a_records4, local.local_alternate_a_records_reverse4) local_zone_record_str = "local-zone: \"${var.avere_filer_fqdn}\" transparent" - local_a_records_str = join("\n ", local.all_a_records) + local_a_records_str = join("\n ", local.all_a_records) # create the dns forward lines - dns_servers = var.dns_server == null || var.dns_server == "" ? [] : split(" ", var.dns_server) - forward_lines = [for s in local.dns_servers : "forward-addr: ${s}" if trimspace("${s}") != ""] + dns_servers = var.dns_server == null || var.dns_server == "" ? [] : split(" ", var.dns_server) + forward_lines = [for s in local.dns_servers : "forward-addr: ${s}" if trimspace("${s}") != ""] foward_lines_str = join("\n ", local.forward_lines) - excluded_subnets = [for s in var.excluded_subnet_cidrs : "access-control-view: ${s} excludedsubnetview" if trimspace("${s}") != ""] + excluded_subnets = [for s in var.excluded_subnet_cidrs : "access-control-view: ${s} excludedsubnetview" if trimspace("${s}") != ""] excluded_subnets_str = join("\n ", local.excluded_subnets) # send the script file to custom data, adding env vars - script_file_b64 = base64gzip(replace(file("${path.module}/install.sh"),"\r","")) - unbound_conf_file_b64 = base64gzip(replace(templatefile("${path.module}/unbound.conf", { max_ttl = var.dns_max_ttl_seconds, excluded_subnets = local.excluded_subnets_str, local_zone_line = local.local_zone_record_str, arecord_lines = local.local_a_records_str, forward_addr_lines = local.foward_lines_str }),"\r","")) - cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { installcmd = local.script_file_b64, unboundconf = local.unbound_conf_file_b64, ssh_port = var.ssh_port }) + script_file_b64 = base64gzip(replace(file("${path.module}/install.sh"), "\r", "")) + unbound_conf_file_b64 = base64gzip(replace(templatefile("${path.module}/unbound.conf", { max_ttl = var.dns_max_ttl_seconds, excluded_subnets = local.excluded_subnets_str, local_zone_line = local.local_zone_record_str, arecord_lines = local.local_a_records_str, forward_addr_lines = local.foward_lines_str }), "\r", "")) + cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { installcmd = local.script_file_b64, unboundconf = local.unbound_conf_file_b64, ssh_port = var.ssh_port }) } data "azurerm_subnet" "vnet" { @@ -136,7 +136,7 @@ data "azurerm_subnet" "vnet" { data "azurerm_subscription" "primary" {} data "azurerm_resource_group" "vm" { - name = var.resource_group_name + name = var.resource_group_name depends_on = [var.module_depends_on] } @@ -155,17 +155,17 @@ resource "azurerm_network_interface" "vm" { } resource "azurerm_linux_virtual_machine" "vm" { - name = "${var.unique_name}-vm" - location = var.location - resource_group_name = data.azurerm_resource_group.vm.name + name = "${var.unique_name}-vm" + location = var.location + resource_group_name = data.azurerm_resource_group.vm.name network_interface_ids = [azurerm_network_interface.vm.id] - computer_name = var.unique_name - custom_data = base64encode(local.cloud_init_file) - size = var.vm_size + computer_name = var.unique_name + custom_data = base64encode(local.cloud_init_file) + size = var.vm_size os_disk { - name = "${var.unique_name}-osdisk" - caching = "ReadWrite" + name = "${var.unique_name}-osdisk" + caching = "ReadWrite" storage_account_type = "Standard_LRS" } @@ -176,20 +176,20 @@ resource "azurerm_linux_virtual_machine" "vm" { version = "latest" } - admin_username = var.admin_username - admin_password = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? var.admin_password : null + admin_username = var.admin_username + admin_password = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? var.admin_password : null disable_password_authentication = (var.ssh_key_data == null || var.ssh_key_data == "") && var.admin_password != null && var.admin_password != "" ? false : true dynamic "admin_ssh_key" { - for_each = var.ssh_key_data == null || var.ssh_key_data == "" ? [] : [var.ssh_key_data] - content { - username = var.admin_username - public_key = var.ssh_key_data - } + for_each = var.ssh_key_data == null || var.ssh_key_data == "" ? [] : [var.ssh_key_data] + content { + username = var.admin_username + public_key = var.ssh_key_data + } } } resource "azurerm_virtual_machine_extension" "cse" { - name = "${var.unique_name}-cse" + name = "${var.unique_name}-cse" virtual_machine_id = azurerm_linux_virtual_machine.vm.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" diff --git a/src/terraform/modules/dnsserver/outputs.tf b/src/terraform/modules/dnsserver/outputs.tf index bece1bcf4..d27918efe 100644 --- a/src/terraform/modules/dnsserver/outputs.tf +++ b/src/terraform/modules/dnsserver/outputs.tf @@ -1,12 +1,12 @@ output "dnsserver_address" { - value = "${azurerm_network_interface.vm.ip_configuration[0].private_ip_address}" + value = azurerm_network_interface.vm.ip_configuration[0].private_ip_address } output "dnsserver_username" { - value = "${var.admin_username}" + value = var.admin_username } output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = azurerm_virtual_machine_extension.cse.id -} \ No newline at end of file + value = azurerm_virtual_machine_extension.cse.id +} diff --git a/src/terraform/modules/hammerspace/anvil-run-once-configure/main.tf b/src/terraform/modules/hammerspace/anvil-run-once-configure/main.tf index e272af851..75c4cf650 100644 --- a/src/terraform/modules/hammerspace/anvil-run-once-configure/main.tf +++ b/src/terraform/modules/hammerspace/anvil-run-once-configure/main.tf @@ -1,6 +1,6 @@ locals { - script_file_b64 = base64gzip(replace(file("${path.module}/configure-anvil.py"),"\r","")) + script_file_b64 = base64gzip(replace(file("${path.module}/configure-anvil.py"), "\r", "")) command = "mkdir -p /opt && touch /opt/configure-anvil.py && echo ${local.script_file_b64} | base64 -d | gunzip > /opt/configure-anvil.py && python2 /opt/configure-anvil.py ${var.anvil_data_cluster_ip} ${var.web_ui_password} ${var.dsx_count} -a '${var.ad_domain}' -u '${var.ad_user}' -p '${var.ad_user_password}' -n '${var.local_site_name}' -s '${var.nfs_export_path}' --azure-account '${var.azure_storage_account}' --azure-account-key '${var.azure_storage_account_key}' --azure-account-container '${var.azure_storage_account_container}' " } @@ -21,4 +21,4 @@ resource "azurerm_virtual_machine_extension" "cse" { SETTINGS depends_on = [var.module_depends_on] -} \ No newline at end of file +} diff --git a/src/terraform/modules/hammerspace/anvil-run-once-configure/variables.tf b/src/terraform/modules/hammerspace/anvil-run-once-configure/variables.tf index 0083409a1..59cd32d6a 100644 --- a/src/terraform/modules/hammerspace/anvil-run-once-configure/variables.tf +++ b/src/terraform/modules/hammerspace/anvil-run-once-configure/variables.tf @@ -16,42 +16,42 @@ variable "dsx_count" { variable "nfs_export_path" { description = "the nfs export path to export from the Hammerspace filer, leave blank to not set" - default = "" + default = "" } variable "local_site_name" { description = "the local site name, leave blank to not set" - default = "" + default = "" } variable "ad_domain" { description = "the ad domainname, leave blank to not set" - default = "" + default = "" } variable "ad_user" { description = "the ad user, leave blank to not set" - default = "" + default = "" } variable "ad_user_password" { description = "the ad user password, leave blank to not set" - default = "" + default = "" } variable "azure_storage_account" { description = "the azure storage account name" - default = "" + default = "" } variable "azure_storage_account_key" { description = "the azure storage account key" - default = "" + default = "" } variable "azure_storage_account_container" { description = "the azure storage account container" - default = "" + default = "" } variable "anvil_hostname" { @@ -59,6 +59,6 @@ variable "anvil_hostname" { } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" } diff --git a/src/terraform/modules/hammerspace/anvil/main.tf b/src/terraform/modules/hammerspace/anvil/main.tf index 26565f1a4..8e5d23b33 100644 --- a/src/terraform/modules/hammerspace/anvil/main.tf +++ b/src/terraform/modules/hammerspace/anvil/main.tf @@ -7,11 +7,11 @@ locals { // best practice is to ensure the ha_subnet needs to be isolated - anvil_dynamic_cluster_ip = var.anvil_data_cluster_ip == "" - load_balancer_fe_name = "${var.unique_name}LoadBalancerFrontEnd" + anvil_dynamic_cluster_ip = var.anvil_data_cluster_ip == "" + load_balancer_fe_name = "${var.unique_name}LoadBalancerFrontEnd" // advanced - domain = "${var.unique_name}.azure" + domain = "${var.unique_name}.azure" is_high_availability = var.anvil_configuration == "High Availability" } @@ -81,7 +81,7 @@ resource "azurerm_lb_rule" "anvilloadbalancerlbrule" { locals { anvil_node_count = local.is_high_availability ? 2 : 1 - anvil_host_names = [for i in range(local.anvil_node_count): + anvil_host_names = [for i in range(local.anvil_node_count) : "${var.unique_name}anvil${i}" ] @@ -92,7 +92,7 @@ locals { anvil_lb_ip = local.is_high_availability ? azurerm_lb.anvilloadbalancer[0].frontend_ip_configuration[0].private_ip_address : "" - // configure the custom data + // configure the custom data standalone_custom_data = [ < 0 ? var.open_external_sources : [] - content { - name = "SSH-${security_rule.key + 120}" - priority = security_rule.key + 120 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_ranges = var.open_external_ports - source_address_prefix = security_rule.value - destination_address_prefix = "*" - } + name = "ssh_nsg" + location = var.location + resource_group_name = var.resource_group_name + + dynamic "security_rule" { + for_each = length(var.open_external_ports) > 0 ? var.open_external_sources : [] + content { + name = "SSH-${security_rule.key + 120}" + priority = security_rule.key + 120 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_ranges = var.open_external_ports + source_address_prefix = security_rule.value + destination_address_prefix = "*" } - - dynamic "security_rule" { - for_each = length(var.open_external_udp_ports) > 0 ? var.open_external_sources : [] - content { - name = "udp-${security_rule.key + 121}" - priority = security_rule.key + 121 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_ranges = var.open_external_udp_ports - source_address_prefix = security_rule.value - destination_address_prefix = "*" - } + } + + dynamic "security_rule" { + for_each = length(var.open_external_udp_ports) > 0 ? var.open_external_sources : [] + content { + name = "udp-${security_rule.key + 121}" + priority = security_rule.key + 121 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_ranges = var.open_external_udp_ports + source_address_prefix = security_rule.value + destination_address_prefix = "*" } + } - depends_on = [var.module_depends_on, azurerm_resource_group.render_rg] + depends_on = [var.module_depends_on, azurerm_resource_group.render_rg] } resource "azurerm_network_security_group" "no_internet_nsg" { - name = "no_internet_nsg" - location = var.location - resource_group_name = var.resource_group_name - - // block all inbound from lb, etc - security_rule { - name = "nointernetinbound" - priority = 130 - direction = "Inbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "Internet" - destination_address_prefix = "*" - } - - depends_on = [var.module_depends_on, azurerm_resource_group.render_rg] + name = "no_internet_nsg" + location = var.location + resource_group_name = var.resource_group_name + + // block all inbound from lb, etc + security_rule { + name = "nointernetinbound" + priority = 130 + direction = "Inbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "Internet" + destination_address_prefix = "*" + } + + depends_on = [var.module_depends_on, azurerm_resource_group.render_rg] } resource "azurerm_virtual_network" "vnet" { - name = "rendervnet" - address_space = [var.vnet_address_space] - location = var.location - resource_group_name = var.resource_group_name - dns_servers = var.dns_servers + name = "rendervnet" + address_space = [var.vnet_address_space] + location = var.location + resource_group_name = var.resource_group_name + dns_servers = var.dns_servers - depends_on = [var.module_depends_on, azurerm_resource_group.render_rg] + depends_on = [var.module_depends_on, azurerm_resource_group.render_rg] } resource "azurerm_subnet" "cloud_cache" { - name = var.subnet_cloud_cache_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = var.resource_group_name - address_prefixes = [var.subnet_cloud_cache_address_prefix] - service_endpoints = ["Microsoft.Storage"] + name = var.subnet_cloud_cache_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = var.resource_group_name + address_prefixes = [var.subnet_cloud_cache_address_prefix] + service_endpoints = ["Microsoft.Storage"] } resource "azurerm_subnet_network_security_group_association" "cloud_cache" { - subnet_id = azurerm_subnet.cloud_cache.id - network_security_group_id = azurerm_network_security_group.no_internet_nsg.id + subnet_id = azurerm_subnet.cloud_cache.id + network_security_group_id = azurerm_network_security_group.no_internet_nsg.id } resource "azurerm_subnet" "cloud_filers" { - name = var.subnet_cloud_filers_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = var.resource_group_name - address_prefixes = [var.subnet_cloud_filers_address_prefix] + name = var.subnet_cloud_filers_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = var.resource_group_name + address_prefixes = [var.subnet_cloud_filers_address_prefix] } resource "azurerm_subnet_network_security_group_association" "cloud_filers" { @@ -102,10 +102,10 @@ resource "azurerm_subnet_network_security_group_association" "cloud_filers" { } resource "azurerm_subnet" "cloud_filers_ha" { - name = var.subnet_cloud_filers_ha_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = var.resource_group_name - address_prefixes = [var.subnet_cloud_filers_ha_address_prefix] + name = var.subnet_cloud_filers_ha_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = var.resource_group_name + address_prefixes = [var.subnet_cloud_filers_ha_address_prefix] } resource "azurerm_subnet_network_security_group_association" "cloud_filers_ha" { @@ -114,12 +114,12 @@ resource "azurerm_subnet_network_security_group_association" "cloud_filers_ha" { } resource "azurerm_subnet" "jumpbox" { - name = var.subnet_jumpbox_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = var.resource_group_name - address_prefixes = [var.subnet_jumpbox_address_prefix] - # needed for the controller to add storage containers - service_endpoints = ["Microsoft.Storage"] + name = var.subnet_jumpbox_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = var.resource_group_name + address_prefixes = [var.subnet_jumpbox_address_prefix] + # needed for the controller to add storage containers + service_endpoints = ["Microsoft.Storage"] } resource "azurerm_subnet_network_security_group_association" "jumpbox" { @@ -128,10 +128,10 @@ resource "azurerm_subnet_network_security_group_association" "jumpbox" { } resource "azurerm_subnet" "render_clients1" { - name = var.subnet_render_clients1_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = var.resource_group_name - address_prefixes = [var.subnet_render_clients1_address_prefix] + name = var.subnet_render_clients1_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = var.resource_group_name + address_prefixes = [var.subnet_render_clients1_address_prefix] } // partition the render clients in groups of roughly 500 nodes (max 507, and azure takes 5 reserved) @@ -141,13 +141,13 @@ resource "azurerm_subnet_network_security_group_association" "render_clients1" { } resource "azurerm_subnet" "render_clients2" { - name = var.subnet_render_clients2_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = var.resource_group_name - address_prefixes = [var.subnet_render_clients2_address_prefix] + name = var.subnet_render_clients2_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = var.resource_group_name + address_prefixes = [var.subnet_render_clients2_address_prefix] } resource "azurerm_subnet_network_security_group_association" "render_clients2" { subnet_id = azurerm_subnet.render_clients2.id network_security_group_id = azurerm_network_security_group.no_internet_nsg.id -} \ No newline at end of file +} diff --git a/src/terraform/modules/render_network/outputs.tf b/src/terraform/modules/render_network/outputs.tf index ba8203b37..0ac703ebb 100644 --- a/src/terraform/modules/render_network/outputs.tf +++ b/src/terraform/modules/render_network/outputs.tf @@ -10,7 +10,7 @@ output "vnet_name" { output "vnet_address_space" { description = "The full address space of the virtual network" - value = var.vnet_address_space + value = var.vnet_address_space } output "vnet_id" { @@ -80,5 +80,5 @@ output "render_clients2_subnet_id" { output "module_depends_on_ids" { description = "the id(s) to force others to wait" - value = [azurerm_subnet_network_security_group_association.cloud_cache.id,azurerm_subnet_network_security_group_association.cloud_filers.id,azurerm_subnet_network_security_group_association.jumpbox.id,azurerm_subnet_network_security_group_association.render_clients1.id,azurerm_subnet_network_security_group_association.render_clients2.id] -} \ No newline at end of file + value = [azurerm_subnet_network_security_group_association.cloud_cache.id, azurerm_subnet_network_security_group_association.cloud_filers.id, azurerm_subnet_network_security_group_association.jumpbox.id, azurerm_subnet_network_security_group_association.render_clients1.id, azurerm_subnet_network_security_group_association.render_clients2.id] +} diff --git a/src/terraform/modules/render_network/variables.tf b/src/terraform/modules/render_network/variables.tf index 60fa9c361..5d6072060 100644 --- a/src/terraform/modules/render_network/variables.tf +++ b/src/terraform/modules/render_network/variables.tf @@ -1,107 +1,107 @@ variable "create_resource_group" { description = "specifies to create the resource group" - default = true + default = true } variable "resource_group_name" { - description = "The resource group to contain the NFS filer." + description = "The resource group to contain the NFS filer." } variable "location" { - description = "The Azure Region into which all resources of NFS filer will be created." + description = "The Azure Region into which all resources of NFS filer will be created." } variable "vnet_address_space" { - description = "The full address space of the virtual network" - default = "10.0.0.0/16" + description = "The full address space of the virtual network" + default = "10.0.0.0/16" } variable "subnet_cloud_cache_subnet_name" { - description = "The name for the cloud cache subnet." - default = "cloud_cache" + description = "The name for the cloud cache subnet." + default = "cloud_cache" } variable "subnet_cloud_cache_address_prefix" { - description = "The address prefix used for the cloud cache subnet." - default = "10.0.1.0/24" + description = "The address prefix used for the cloud cache subnet." + default = "10.0.1.0/24" } variable "subnet_cloud_filers_subnet_name" { - description = "The name for the cloud filers subnet." - default = "cloud_filers" + description = "The name for the cloud filers subnet." + default = "cloud_filers" } variable "subnet_cloud_filers_address_prefix" { - description = "The address prefix used for the cloud filers subnet." - default = "10.0.2.0/25" + description = "The address prefix used for the cloud filers subnet." + default = "10.0.2.0/25" } variable "subnet_cloud_filers_ha_subnet_name" { - description = "The name for the cloud filers subnet." - default = "cloud_filers_ha" + description = "The name for the cloud filers subnet." + default = "cloud_filers_ha" } variable "subnet_cloud_filers_ha_address_prefix" { - description = "The address prefix used for the cloud filers ha subnet." - default = "10.0.2.128/25" + description = "The address prefix used for the cloud filers ha subnet." + default = "10.0.2.128/25" } variable "subnet_jumpbox_subnet_name" { - description = "The name for the jumpbox subnet." - default = "jumpbox" + description = "The name for the jumpbox subnet." + default = "jumpbox" } variable "subnet_jumpbox_address_prefix" { - description = "The address prefix used for the jumpbox subnet." - default = "10.0.3.0/24" + description = "The address prefix used for the jumpbox subnet." + default = "10.0.3.0/24" } variable "subnet_render_clients1_subnet_name" { - description = "The name for the first render clients subnet." - default = "render_clients1" + description = "The name for the first render clients subnet." + default = "render_clients1" } variable "subnet_render_clients1_address_prefix" { - description = "The address prefix used for the first render clients subnet." - default = "10.0.4.0/23" + description = "The address prefix used for the first render clients subnet." + default = "10.0.4.0/23" } variable "subnet_render_clients2_subnet_name" { - description = "The name for the second render clients subnet." - default = "render_clients2" + description = "The name for the second render clients subnet." + default = "render_clients2" } variable "subnet_render_clients2_address_prefix" { - description = "The address prefix used for the second render clients subnet." - default = "10.0.6.0/23" + description = "The address prefix used for the second render clients subnet." + default = "10.0.6.0/23" } variable "dns_servers" { - description = "a list of dns servers" - default = null + description = "a list of dns servers" + default = null } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" } variable "open_external_ports" { - default = [22,3389] - # ports 443, 4172, 60443 required for terradici - # default = [22,3389,443,4172,60443] - description = "these are the tcp ports to open externally on the jumpbox subnet" + default = [22, 3389] + # ports 443, 4172, 60443 required for terradici + # default = [22,3389,443,4172,60443] + description = "these are the tcp ports to open externally on the jumpbox subnet" } variable "open_external_udp_ports" { - default = [] - # ports 4172 required for terradici - # default = [4172] - description = "these are the udp ports to open externally on the jumpbox subnet" + default = [] + # ports 4172 required for terradici + # default = [4172] + description = "these are the udp ports to open externally on the jumpbox subnet" } variable "open_external_sources" { - default = ["*"] - description = "this is the external source to open on the subnet" -} \ No newline at end of file + default = ["*"] + description = "this is the external source to open on the subnet" +} diff --git a/src/terraform/modules/render_network_secure/main.tf b/src/terraform/modules/render_network_secure/main.tf index 82e0c025d..b2e13995f 100644 --- a/src/terraform/modules/render_network_secure/main.tf +++ b/src/terraform/modules/render_network_secure/main.tf @@ -1,217 +1,217 @@ resource "azurerm_resource_group" "render_rg" { - name = var.resource_group_name - location = var.location + name = var.resource_group_name + location = var.location } // the following is only needed if you need to ssh to the controller resource "azurerm_network_security_group" "ssh_nsg" { - name = "ssh_nsg" - location = var.location - resource_group_name = azurerm_resource_group.render_rg.name + name = "ssh_nsg" + location = var.location + resource_group_name = azurerm_resource_group.render_rg.name - dynamic "security_rule" { - for_each = length(var.open_external_ports) > 0 ? var.open_external_sources : [] - content { - name = "SSH-${security_rule.key + 120}" - priority = security_rule.key + 120 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_ranges = var.open_external_ports - source_address_prefix = security_rule.value - destination_address_prefix = "*" - } + dynamic "security_rule" { + for_each = length(var.open_external_ports) > 0 ? var.open_external_sources : [] + content { + name = "SSH-${security_rule.key + 120}" + priority = security_rule.key + 120 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_ranges = var.open_external_ports + source_address_prefix = security_rule.value + destination_address_prefix = "*" } + } - security_rule { - name = "allowvnetin" - priority = 500 - direction = "Inbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "VirtualNetwork" - destination_address_prefix = "VirtualNetwork" - } + security_rule { + name = "allowvnetin" + priority = 500 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + } - security_rule { - name = "allowvnetout" - priority = 500 - direction = "Outbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "VirtualNetwork" - destination_address_prefix = "VirtualNetwork" - } + security_rule { + name = "allowvnetout" + priority = 500 + direction = "Outbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + } - security_rule { - name = "SSH" - priority = 1000 - direction = "Inbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "22" - source_address_prefix = var.ssh_source_address_prefix - destination_address_prefix = "*" - } + security_rule { + name = "SSH" + priority = 1000 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = var.ssh_source_address_prefix + destination_address_prefix = "*" + } - security_rule { - name = "allowazurestorage" - priority = 2010 - direction = "Outbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "443" - source_address_prefix = "VirtualNetwork" - destination_address_prefix = "Storage.${var.location}" - } + security_rule { + name = "allowazurestorage" + priority = 2010 + direction = "Outbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "Storage.${var.location}" + } - security_rule { - name = "denyallin" - priority = 3000 - direction = "Inbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "denyallin" + priority = 3000 + direction = "Inbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } - security_rule { - name = "denyallout" - priority = 3000 - direction = "Outbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "denyallout" + priority = 3000 + direction = "Outbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } } resource "azurerm_network_security_group" "no_internet_nsg" { - name = "no_internet_nsg" - location = var.location - resource_group_name = azurerm_resource_group.render_rg.name - - security_rule { - name = "allowvnetin" - priority = 500 - direction = "Inbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "VirtualNetwork" - destination_address_prefix = "VirtualNetwork" - } + name = "no_internet_nsg" + location = var.location + resource_group_name = azurerm_resource_group.render_rg.name - security_rule { - name = "allowvnetout" - priority = 500 - direction = "Outbound" - access = "Allow" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "VirtualNetwork" - destination_address_prefix = "VirtualNetwork" - } + security_rule { + name = "allowvnetin" + priority = 500 + direction = "Inbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + } - security_rule { - name = "allowproxy80" - priority = 2000 - direction = "Outbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "80" - source_address_prefix = var.subnet_proxy_address_prefix - destination_address_prefix = "*" - } + security_rule { + name = "allowvnetout" + priority = 500 + direction = "Outbound" + access = "Allow" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "VirtualNetwork" + } - security_rule { - name = "allowproxy443" - priority = 2001 - direction = "Outbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "443" - source_address_prefix = var.subnet_proxy_address_prefix - destination_address_prefix = "*" - } + security_rule { + name = "allowproxy80" + priority = 2000 + direction = "Outbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = var.subnet_proxy_address_prefix + destination_address_prefix = "*" + } - security_rule { - name = "allowazurestorage" - priority = 2010 - direction = "Outbound" - access = "Allow" - protocol = "Tcp" - source_port_range = "*" - destination_port_range = "443" - source_address_prefix = "VirtualNetwork" - destination_address_prefix = "Storage.${var.location}" - } + security_rule { + name = "allowproxy443" + priority = 2001 + direction = "Outbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = var.subnet_proxy_address_prefix + destination_address_prefix = "*" + } - security_rule { - name = "denyallin" - priority = 3000 - direction = "Inbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "allowazurestorage" + priority = 2010 + direction = "Outbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "VirtualNetwork" + destination_address_prefix = "Storage.${var.location}" + } - security_rule { - name = "denyallout" - priority = 3000 - direction = "Outbound" - access = "Deny" - protocol = "*" - source_port_range = "*" - destination_port_range = "*" - source_address_prefix = "*" - destination_address_prefix = "*" - } + security_rule { + name = "denyallin" + priority = 3000 + direction = "Inbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "denyallout" + priority = 3000 + direction = "Outbound" + access = "Deny" + protocol = "*" + source_port_range = "*" + destination_port_range = "*" + source_address_prefix = "*" + destination_address_prefix = "*" + } } resource "azurerm_virtual_network" "vnet" { - name = "rendervnet" - address_space = [var.vnet_address_space] - location = var.location - resource_group_name = azurerm_resource_group.render_rg.name + name = "rendervnet" + address_space = [var.vnet_address_space] + location = var.location + resource_group_name = azurerm_resource_group.render_rg.name } resource "azurerm_subnet" "cloud_cache" { - name = var.subnet_cloud_cache_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = azurerm_resource_group.render_rg.name - address_prefixes = [var.subnet_cloud_cache_address_prefix] - service_endpoints = ["Microsoft.Storage"] + name = var.subnet_cloud_cache_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_resource_group.render_rg.name + address_prefixes = [var.subnet_cloud_cache_address_prefix] + service_endpoints = ["Microsoft.Storage"] } resource "azurerm_subnet_network_security_group_association" "cloud_cache" { - subnet_id = azurerm_subnet.cloud_cache.id - network_security_group_id = azurerm_network_security_group.no_internet_nsg.id + subnet_id = azurerm_subnet.cloud_cache.id + network_security_group_id = azurerm_network_security_group.no_internet_nsg.id } resource "azurerm_subnet" "cloud_filers" { - name = var.subnet_cloud_filers_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = azurerm_resource_group.render_rg.name - address_prefixes = [var.subnet_cloud_filers_address_prefix] + name = var.subnet_cloud_filers_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_resource_group.render_rg.name + address_prefixes = [var.subnet_cloud_filers_address_prefix] } resource "azurerm_subnet_network_security_group_association" "cloud_filers" { @@ -220,12 +220,12 @@ resource "azurerm_subnet_network_security_group_association" "cloud_filers" { } resource "azurerm_subnet" "jumpbox" { - name = var.subnet_jumpbox_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = azurerm_resource_group.render_rg.name - address_prefixes = [var.subnet_jumpbox_address_prefix] - # needed for the controller to add storage containers - service_endpoints = ["Microsoft.Storage"] + name = var.subnet_jumpbox_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_resource_group.render_rg.name + address_prefixes = [var.subnet_jumpbox_address_prefix] + # needed for the controller to add storage containers + service_endpoints = ["Microsoft.Storage"] } resource "azurerm_subnet_network_security_group_association" "jumpbox" { @@ -234,10 +234,10 @@ resource "azurerm_subnet_network_security_group_association" "jumpbox" { } resource "azurerm_subnet" "render_clients1" { - name = var.subnet_render_clients1_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = azurerm_resource_group.render_rg.name - address_prefixes = [var.subnet_render_clients1_address_prefix] + name = var.subnet_render_clients1_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_resource_group.render_rg.name + address_prefixes = [var.subnet_render_clients1_address_prefix] } // partition the render clients in groups of roughly 500 nodes (max 507, and azure takes 5 reserved) @@ -247,10 +247,10 @@ resource "azurerm_subnet_network_security_group_association" "render_clients1" { } resource "azurerm_subnet" "render_clients2" { - name = var.subnet_render_clients2_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = azurerm_resource_group.render_rg.name - address_prefixes = [var.subnet_render_clients2_address_prefix] + name = var.subnet_render_clients2_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_resource_group.render_rg.name + address_prefixes = [var.subnet_render_clients2_address_prefix] } resource "azurerm_subnet_network_security_group_association" "render_clients2" { @@ -259,13 +259,13 @@ resource "azurerm_subnet_network_security_group_association" "render_clients2" { } resource "azurerm_subnet" "proxy" { - name = var.subnet_proxy_subnet_name - virtual_network_name = azurerm_virtual_network.vnet.name - resource_group_name = azurerm_resource_group.render_rg.name - address_prefixes = [var.subnet_proxy_address_prefix] + name = var.subnet_proxy_subnet_name + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_resource_group.render_rg.name + address_prefixes = [var.subnet_proxy_address_prefix] } resource "azurerm_subnet_network_security_group_association" "proxy" { subnet_id = azurerm_subnet.proxy.id network_security_group_id = azurerm_network_security_group.no_internet_nsg.id -} \ No newline at end of file +} diff --git a/src/terraform/modules/render_network_secure/outputs.tf b/src/terraform/modules/render_network_secure/outputs.tf index d9f0ce87e..bee5f1f27 100644 --- a/src/terraform/modules/render_network_secure/outputs.tf +++ b/src/terraform/modules/render_network_secure/outputs.tf @@ -66,4 +66,4 @@ output "proxy_subnet_name" { output "proxy_subnet_id" { description = "The full id of the proxy subnet." value = azurerm_subnet.proxy.id -} \ No newline at end of file +} diff --git a/src/terraform/modules/render_network_secure/variables.tf b/src/terraform/modules/render_network_secure/variables.tf index 3972fd7dc..697daada0 100644 --- a/src/terraform/modules/render_network_secure/variables.tf +++ b/src/terraform/modules/render_network_secure/variables.tf @@ -1,87 +1,87 @@ variable "resource_group_name" { - description = "The resource group to contain the NFS filer." + description = "The resource group to contain the NFS filer." } variable "location" { - description = "The Azure Region into which all resources of NFS filer will be created." + description = "The Azure Region into which all resources of NFS filer will be created." } variable "vnet_address_space" { - description = "The full address space of the virtual network" - default = "10.0.0.0/16" + description = "The full address space of the virtual network" + default = "10.0.0.0/16" } variable "subnet_cloud_cache_subnet_name" { - description = "The name for the cloud cache subnet." - default = "cloud_cache" + description = "The name for the cloud cache subnet." + default = "cloud_cache" } variable "subnet_cloud_cache_address_prefix" { - description = "The address prefix used for the cloud cache subnet." - default = "10.0.1.0/24" + description = "The address prefix used for the cloud cache subnet." + default = "10.0.1.0/24" } variable "subnet_cloud_filers_subnet_name" { - description = "The name for the cloud filers subnet." - default = "cloud_filers" + description = "The name for the cloud filers subnet." + default = "cloud_filers" } variable "subnet_cloud_filers_address_prefix" { - description = "The address prefix used for the cloud filers subnet." - default = "10.0.2.0/24" + description = "The address prefix used for the cloud filers subnet." + default = "10.0.2.0/24" } variable "subnet_jumpbox_subnet_name" { - description = "The name for the jumpbox subnet." - default = "jumpbox" + description = "The name for the jumpbox subnet." + default = "jumpbox" } variable "subnet_jumpbox_address_prefix" { - description = "The address prefix used for the jumpbox subnet." - default = "10.0.3.0/24" + description = "The address prefix used for the jumpbox subnet." + default = "10.0.3.0/24" } variable "subnet_render_clients1_subnet_name" { - description = "The name for the first render clients subnet." - default = "render_clients1" + description = "The name for the first render clients subnet." + default = "render_clients1" } variable "subnet_render_clients1_address_prefix" { - description = "The address prefix used for the first render clients subnet." - default = "10.0.4.0/23" + description = "The address prefix used for the first render clients subnet." + default = "10.0.4.0/23" } variable "subnet_render_clients2_subnet_name" { - description = "The name for the second render clients subnet." - default = "render_clients2" + description = "The name for the second render clients subnet." + default = "render_clients2" } variable "subnet_render_clients2_address_prefix" { - description = "The address prefix used for the second render clients subnet." - default = "10.0.6.0/23" + description = "The address prefix used for the second render clients subnet." + default = "10.0.6.0/23" } variable "subnet_proxy_subnet_name" { - description = "The name for the proxy subnet." - default = "proxy" + description = "The name for the proxy subnet." + default = "proxy" } variable "subnet_proxy_address_prefix" { - description = "The address prefix used for the proxy subnet." - default = "10.0.255.248/29" + description = "The address prefix used for the proxy subnet." + default = "10.0.255.248/29" } variable "ssh_source_address_prefix" { - description = "The source address prefix granted for ssh access." - default = "*" + description = "The source address prefix granted for ssh access." + default = "*" } variable "open_external_ports" { - default = [22] - description = "these are the ports to open externally on the jumpbox subnet, default is 22" + default = [22] + description = "these are the ports to open externally on the jumpbox subnet, default is 22" } variable "open_external_sources" { - default = ["*"] - description = "this is the external source to open on the subnet" -} \ No newline at end of file + default = ["*"] + description = "this is the external source to open on the subnet" +} diff --git a/src/terraform/modules/vdbench_config/main.tf b/src/terraform/modules/vdbench_config/main.tf index ff435bfb9..9eabb2eb5 100644 --- a/src/terraform/modules/vdbench_config/main.tf +++ b/src/terraform/modules/vdbench_config/main.tf @@ -1,18 +1,18 @@ locals { - bootstrap_script = "https://raw.githubusercontent.com/Azure/Avere/main/src/clientapps/vdbench/bootstrap.vdbench.sh" - mount_dir = "/b" - bootstrap_dir = "bootstrap" + bootstrap_script = "https://raw.githubusercontent.com/Azure/Avere/main/src/clientapps/vdbench/bootstrap.vdbench.sh" + mount_dir = "/b" + bootstrap_dir = "bootstrap" } resource "null_resource" "install_vdbench_bootstrap" { # Bootstrap script can run on any instance of the cluster # So we just choose the first in this case connection { - type = "ssh" - port = var.ssh_port - host = var.node_address - user = var.admin_username - password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password - private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null + type = "ssh" + port = var.ssh_port + host = var.node_address + user = var.admin_username + password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password + private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null } provisioner "remote-exec" { diff --git a/src/terraform/modules/vdbench_config/variables.tf b/src/terraform/modules/vdbench_config/variables.tf index 2a6a1028d..861280790 100644 --- a/src/terraform/modules/vdbench_config/variables.tf +++ b/src/terraform/modules/vdbench_config/variables.tf @@ -1,15 +1,15 @@ variable "node_address" { - description = "The address of controller or jumpbox" + description = "The address of controller or jumpbox" } variable "admin_username" { description = "Admin username on the controller or jumpbox" - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the controller or jumpbox. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored." @@ -17,22 +17,22 @@ variable "ssh_key_data" { variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "nfs_address" { - description = "the private name or ip address of the nfs server" + description = "the private name or ip address of the nfs server" } variable "nfs_export_path" { - description = "The writeable path exported on the nfs server that will host the boostrap scripts" + description = "The writeable path exported on the nfs server that will host the boostrap scripts" } variable "vdbench_url" { - description = "The VDBench URL points to the VDBench zip binary." + description = "The VDBench URL points to the VDBench zip binary." } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +} diff --git a/src/terraform/modules/vmss_config/main.tf b/src/terraform/modules/vmss_config/main.tf index b10ef1ca5..2a4a69baa 100644 --- a/src/terraform/modules/vmss_config/main.tf +++ b/src/terraform/modules/vmss_config/main.tf @@ -1,18 +1,18 @@ locals { - bootstrap_script = "https://raw.githubusercontent.com/Azure/Avere/main/src/client/bootstrap.sh" - mount_dir = "/b" - bootstrap_dir = "bootstrap" + bootstrap_script = "https://raw.githubusercontent.com/Azure/Avere/main/src/client/bootstrap.sh" + mount_dir = "/b" + bootstrap_dir = "bootstrap" } resource "null_resource" "install_bootstrap" { # Bootstrap script can run on any instance of the cluster # So we just choose the first in this case connection { - type = "ssh" - port = var.ssh_port - host = var.node_address - user = var.admin_username - password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password - private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null + type = "ssh" + port = var.ssh_port + host = var.node_address + user = var.admin_username + password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password + private_key = var.ssh_key_data != null && var.ssh_key_data != "" ? file("~/.ssh/id_rsa") : null } provisioner "remote-exec" { diff --git a/src/terraform/modules/vmss_config/outputs.tf b/src/terraform/modules/vmss_config/outputs.tf index d7f8b32d6..a11101b36 100644 --- a/src/terraform/modules/vmss_config/outputs.tf +++ b/src/terraform/modules/vmss_config/outputs.tf @@ -5,5 +5,5 @@ output "bootstrap_script_path" { output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = null_resource.install_bootstrap.id -} \ No newline at end of file + value = null_resource.install_bootstrap.id +} diff --git a/src/terraform/modules/vmss_config/variables.tf b/src/terraform/modules/vmss_config/variables.tf index c45a495d7..b15d8f637 100644 --- a/src/terraform/modules/vmss_config/variables.tf +++ b/src/terraform/modules/vmss_config/variables.tf @@ -1,15 +1,15 @@ variable "node_address" { - description = "The address of controller or jumpbox" + description = "The address of controller or jumpbox" } variable "admin_username" { description = "Admin username on the controller or jumpbox" - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the controller or jumpbox. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "ssh_key_data" { @@ -18,18 +18,18 @@ variable "ssh_key_data" { variable "ssh_port" { description = "specifies the tcp port to use for ssh" - default = 22 + default = 22 } variable "nfs_address" { - description = "the private name or ip address of the nfs server" + description = "the private name or ip address of the nfs server" } variable "nfs_export_path" { - description = "The writeable path exported on the nfs server that will host the boostrap scripts" + description = "The writeable path exported on the nfs server that will host the boostrap scripts" } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +} diff --git a/src/terraform/modules/windowsgridgpu/outputs.tf b/src/terraform/modules/windowsgridgpu/outputs.tf index 84b08d51a..7f22e24a4 100644 --- a/src/terraform/modules/windowsgridgpu/outputs.tf +++ b/src/terraform/modules/windowsgridgpu/outputs.tf @@ -1,12 +1,12 @@ output "address" { - value = "${azurerm_network_interface.vm.ip_configuration[0].private_ip_address}" + value = azurerm_network_interface.vm.ip_configuration[0].private_ip_address } output "username" { - value = "${var.admin_username}" + value = var.admin_username } output "module_depends_on_id" { description = "the id(s) to force others to wait" - value = azurerm_virtual_machine_extension.cse.id -} \ No newline at end of file + value = azurerm_virtual_machine_extension.cse.id +} diff --git a/src/terraform/modules/windowsgridgpu/variables.tf b/src/terraform/modules/windowsgridgpu/variables.tf index d81e72c4c..416d5f7da 100644 --- a/src/terraform/modules/windowsgridgpu/variables.tf +++ b/src/terraform/modules/windowsgridgpu/variables.tf @@ -3,47 +3,47 @@ variable "resource_group_name" { } variable "location" { - description = "The Azure Region into which the dnsserver will be created." + description = "The Azure Region into which the dnsserver will be created." } variable "admin_username" { description = "Admin username on the dnsserver." - default = "azureuser" + default = "azureuser" } variable "admin_password" { description = "(optional) The password used for access to the dnsserver. If not specified, ssh_key_data needs to be set." - default = null + default = null } variable "unique_name" { description = "The unique name used for the dnsserver and for resource names associated with the VM." - default = "wingrid" + default = "wingrid" } variable "vm_size" { description = "Size of the VM." - default = "Standard_NV6" + default = "Standard_NV6" } variable "ad_domain" { description = "Size of the VM." - default = "" + default = "" } variable "ou_path" { description = "Size of the VM." - default = "" + default = "" } variable "ad_username" { description = "Size of the VM." - default = "" + default = "" } variable "ad_password" { description = "Size of the VM." - default = "" + default = "" } variable "virtual_network_resource_group" { @@ -60,45 +60,45 @@ variable "virtual_network_subnet_name" { variable "private_ip_address" { description = "specifies a static private ip address to use" - default = null + default = null } variable "image_id" { description = "specifies a custom image id, if not use marketplace" - default = null + default = null } variable "install_pcoip" { description = "specifies true or false to install pcoip" - default = true + default = true } variable "grid_url" { description = "specifies the grid url" - default = "https://bit1.blob.core.windows.net/bin/Graphics/Windows/461.09_grid_win10_server2016_server2019_64bit_azure_swl.exe" + default = "https://bit1.blob.core.windows.net/bin/Graphics/Windows/461.09_grid_win10_server2016_server2019_64bit_azure_swl.exe" } variable "teradici_pcoipagent_url" { description = "specifies the teradici pcoipagent" - default = "https://bit1.blob.core.windows.net/bin/Teradici/pcoip-agent-graphics_21.03.0.exe" + default = "https://bit1.blob.core.windows.net/bin/Teradici/pcoip-agent-graphics_21.03.0.exe" } variable "teradici_license_key" { description = "specifies the teradici pcoipagent license key" - default = "" + default = "" } variable "license_type" { description = "specify 'Windows_Client' to specifies the type of on-premise license (also known as Azure Hybrid Use Benefit https://azure.microsoft.com/en-us/pricing/hybrid-benefit/faq/) which should be used for this Virtual Machine." - default = "None" + default = "None" } variable "storage_account_type" { description = "specify the type of OS Disk. Possible values are Standard_LRS, StandardSSD_LRS and Premium_LRS" - default = "StandardSSD_LRS" + default = "StandardSSD_LRS" } variable "module_depends_on" { - default = [""] + default = [""] description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2" -} \ No newline at end of file +}