Skip to content

Commit

Permalink
add ability to specify ssh port and ssh key for controller and vfxt
Browse files Browse the repository at this point in the history
  • Loading branch information
anhowe committed Jun 24, 2020
1 parent 02bf373 commit 0853689
Show file tree
Hide file tree
Showing 27 changed files with 207 additions and 33 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,7 @@ output "storage_resource_group_name" {
output "storage_account_name" {
value = "\"${local.storage_account_name}\""
}

output "use_blob_storage" {
value = true
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ locals {
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."

// nfs filer details
filer_resource_group_name = "houdini_storage_rg"
storage_resource_group_name = "houdini_storage_rg"
// more filer sizes listed at https://github.com/Azure/Avere/tree/master/src/terraform/modules/nfs_filer
filer_size = "Standard_D2s_v3"

Expand All @@ -32,7 +32,7 @@ provider "azurerm" {
}

resource "azurerm_resource_group" "nfsfiler" {
name = local.filer_resource_group_name
name = local.storage_resource_group_name
location = local.location
}

Expand Down Expand Up @@ -65,3 +65,10 @@ output "filer_export" {
value = "\"${module.nasfiler1.core_filer_export}\""
}

output "storage_resource_group_name" {
value = "\"${local.storage_resource_group_name}\""
}

output "use_nfs_storage" {
value = true
}
82 changes: 66 additions & 16 deletions src/terraform/examples/houdinienvironment/3.cache/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,17 @@ locals {
// if you use SSH key, ensure you have ~/.ssh/id_rsa with permission 600
// populated where you are running terraform
vm_ssh_key_data = null //"ssh-rsa AAAAB3...."
ssh_port = 22

// vfxt details
vfxt_resource_group_name = "houdini_vfxt_rg"
// if you are running a locked down network, set controller_add_public_ip to false
controller_add_public_ip = true
vfxt_cluster_name = "vfxt"
vfxt_cluster_password = "VFXT_PASSWORD"
vfxt_ssh_key_data = local.vm_ssh_key_data

// replace below variables with the infrastructure variables from 1.base_infrastructure
// replace below variables with the infrastructure variables from 0.network
location = ""
storage_account_name = ""
storage_resource_group_name = ""
Expand All @@ -25,12 +27,33 @@ locals {
vnet_render_clients1_subnet_id = ""
vnet_resource_group = ""

// either replace below variables from
// 1.storage/blobstorage,
use_blob_storage = false
storage_account_name = ""
//storage_resource_group_name = ""
// or 1.storage/nfsfiler
use_nfs_storage = false
filer_address = ""
filer_export = ""
storage_resource_group_name = ""

// advanced scenarios: the variables below raraly need changing
// in addition to storage account put the custom image resource group here
alternative_resource_groups = [local.storage_resource_group_name]
// cloud filer details
junction_namespace_path = "/storagevfxt"
junction_namespace_path_clfs = "/houdiniclfs"
junction_namespace_path_filer = "/houdinifiler"
// only for the blob storage
storage_container_name = "cache"
// only for the nfs filer storage
// vfxt cache polies
// "Clients Bypassing the Cluster"
// "Read Caching"
// "Read and Write Caching"
// "Full Caching"
// "Transitioning Clients Before or After a Migration"
cache_policy = "Clients Bypassing the Cluster"
}

provider "azurerm" {
Expand All @@ -48,6 +71,7 @@ module "vfxtcontroller" {
ssh_key_data = local.vm_ssh_key_data
add_public_ip = local.controller_add_public_ip
alternative_resource_groups = local.alternative_resource_groups
ssh_port = local.ssh_port

// network details
virtual_network_resource_group = local.vnet_resource_group
Expand All @@ -61,10 +85,11 @@ resource "avere_vfxt" "vfxt" {
controller_admin_username = module.vfxtcontroller.controller_username
// ssh key takes precedence over controller password
controller_admin_password = local.vm_ssh_key_data != null && local.vm_ssh_key_data != "" ? "" : local.vm_admin_password
controller_ssh_port = local.ssh_port
// terraform is not creating the implicit dependency on the controller module
// otherwise during destroy, it tries to destroy the controller at the same time as vfxt cluster
// to work around, add the explicit dependency
depends_on = [module.vfxtcontroller]
depends_on = [module.vfxtcontroller.module_depends_on_id]

// network
azure_network_resource_group = local.vnet_resource_group
Expand All @@ -75,6 +100,7 @@ resource "avere_vfxt" "vfxt" {
azure_resource_group = local.vfxt_resource_group_name
vfxt_cluster_name = local.vfxt_cluster_name
vfxt_admin_password = local.vfxt_cluster_password
vfxt_ssh_key_data = local.vfxt_ssh_key_data
vfxt_node_count = 3

global_custom_settings = [
Expand All @@ -85,17 +111,40 @@ resource "avere_vfxt" "vfxt" {
"cluster.NfsFrontEndCwnd EK 1",
]

azure_storage_filer {
account_name = local.storage_account_name
container_name = local.storage_container_name
junction_namespace_path = local.junction_namespace_path
custom_settings = [
"client_rt_preferred FE 524288",
"client_wt_preferred NO 524288",
"nfsConnMult YW 20",
"autoWanOptimize YF 2",
"always_forward OZ 1",
]
dynamic "azure_storage_filer" {
for_each = local.use_blob_storage ? ["use_blob_storage"] : []
content {
account_name = local.storage_account_name
container_name = local.storage_container_name
junction_namespace_path = local.junction_namespace_path_clfs
custom_settings = [
"client_rt_preferred FE 524288",
"client_wt_preferred NO 524288",
"nfsConnMult YW 20",
"autoWanOptimize YF 2",
"always_forward OZ 1",
]
}
}

dynamic "core_filer" {
for_each = local.use_nfs_storage ? ["use_nfs_storage"] : []
content {
name = "nfs1"
fqdn_or_primary_ip = local.filer_address
cache_policy = local.cache_policy
custom_settings = [
"client_rt_preferred FE 524288",
"client_wt_preferred NO 524288",
"nfsConnMult YW 20",
"autoWanOptimize YF 2",
"always_forward OZ 1",
]
junction {
namespace_path = local.junction_namespace_path_filer
core_filer_export = local.filer_export
}
}
}
}

Expand All @@ -105,10 +154,11 @@ module "mount_nfs" {
node_address = module.vfxtcontroller.controller_address
admin_username = local.vm_admin_username
admin_password = local.vm_admin_password
ssh_port = local.ssh_port
ssh_key_data = local.vm_ssh_key_data
mount_dir = "/mnt/nfs"
nfs_address = tolist(avere_vfxt.vfxt.vserver_ip_addresses)[0]
nfs_export_path = local.junction_namespace_path
nfs_export_path = local.use_nfs_storage ? local.junction_namespace_path_filer : local.junction_namespace_path_clfs
}

output "controller_username" {
Expand All @@ -132,5 +182,5 @@ output "mount_addresses" {
}

output "mount_path" {
value = "\"${local.junction_namespace_path}\""
value = "\"${local.use_nfs_storage ? local.junction_namespace_path_filer : local.junction_namespace_path_clfs}\""
}
1 change: 1 addition & 0 deletions src/terraform/modules/cachewarmer_build/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ locals {
resource "null_resource" "build_cachewarmer_bootstrap" {
connection {
type = "ssh"
port = var.ssh_port
host = var.node_address
user = var.admin_username
password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password
Expand Down
5 changes: 5 additions & 0 deletions src/terraform/modules/cachewarmer_build/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ variable "ssh_key_data" {
description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored."
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "bootstrap_mount_address" {
description = "the mount address that hosts the worker bootstrap script"
}
Expand Down
1 change: 1 addition & 0 deletions src/terraform/modules/cachewarmer_manager_install/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ locals {
resource "null_resource" "install_cachewarmer_manager" {
connection {
type = "ssh"
port = var.ssh_port
host = var.node_address
user = var.admin_username
password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ variable "ssh_key_data" {
description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored."
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "bootstrap_mount_address" {
description = "the mount address that hosts the manager and worker bootstrap script"
}
Expand Down
1 change: 1 addition & 0 deletions src/terraform/modules/cachewarmer_submitjob/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ locals {
resource "null_resource" "cachewarmer_submitjob" {
connection {
type = "ssh"
port = var.ssh_port
host = var.node_address
user = var.admin_username
password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password
Expand Down
5 changes: 5 additions & 0 deletions src/terraform/modules/cachewarmer_submitjob/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ variable "ssh_key_data" {
description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored."
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "jobMount_address" {
description = "the mount address for warm job processing"
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ resource "null_resource" "cachewarmer_submitmultiplejobs" {

connection {
type = "ssh"
port = var.ssh_port
host = var.node_address
user = var.admin_username
password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ variable "ssh_key_data" {
description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored."
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "jobMount_address" {
description = "the mount address for warm job processing"
}
Expand Down
4 changes: 3 additions & 1 deletion src/terraform/modules/controller/cloud-init.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,6 @@ write_files:
permissions: '0755'

runcmd:
- patch --quiet --forward /usr/local/lib/python2.7/dist-packages/vFXT/msazure.py /usr/local/lib/python2.7/dist-packages/vFXT/msazure.py.patch1
- set -x
- patch --quiet --forward /usr/local/lib/python2.7/dist-packages/vFXT/msazure.py /usr/local/lib/python2.7/dist-packages/vFXT/msazure.py.patch1
- if [ "${ssh_port}" -ne "22" ]; then sed -i 's/^#\?Port .*/Port ${ssh_port}/' /etc/ssh/sshd_config && systemctl restart sshd ; fi
2 changes: 1 addition & 1 deletion src/terraform/modules/controller/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ locals {
# send the script file to custom data, adding env vars
script_file_b64 = base64gzip(replace(file("${path.module}/averecmd.txt"),"\r",""))
msazure_patch1_file_b64 = base64gzip(replace(file("${path.module}/msazure.py.patch1"),"\r",""))
cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { averecmd = local.script_file_b64, msazure_patch1 = local.msazure_patch1_file_b64 })
cloud_init_file = templatefile("${path.module}/cloud-init.tpl", { averecmd = local.script_file_b64, msazure_patch1 = local.msazure_patch1_file_b64, ssh_port = var.ssh_port })
# the roles assigned to the controller managed identity principal
# the contributor role is required to create Avere clusters
avere_create_cluster_role = "Avere Contributor"
Expand Down
5 changes: 5 additions & 0 deletions src/terraform/modules/controller/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,11 @@ variable "apply_patch" {
default = true
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "module_depends_on" {
default = [""]
description = "depends on workaround discussed in https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2"
Expand Down
1 change: 1 addition & 0 deletions src/terraform/modules/mount_nfs/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ resource "null_resource" "install_bootstrap" {
# So we just choose the first in this case
connection {
type = "ssh"
port = var.ssh_port
host = var.node_address
user = var.admin_username
password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password
Expand Down
5 changes: 5 additions & 0 deletions src/terraform/modules/mount_nfs/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ variable "ssh_key_data" {
description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored."
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "mount_dir" {
description = "the mount directory on the local machine"
default = "/mnt/nfs"
Expand Down
1 change: 1 addition & 0 deletions src/terraform/modules/vdbench_config/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ resource "null_resource" "install_vdbench_bootstrap" {
# So we just choose the first in this case
connection {
type = "ssh"
port = var.ssh_port
host = var.node_address
user = var.admin_username
password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password
Expand Down
5 changes: 5 additions & 0 deletions src/terraform/modules/vdbench_config/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@ variable "ssh_key_data" {
description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored."
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "nfs_address" {
description = "the private name or ip address of the nfs server"
}
Expand Down
1 change: 1 addition & 0 deletions src/terraform/modules/vmss_config/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ resource "null_resource" "install_bootstrap" {
# So we just choose the first in this case
connection {
type = "ssh"
port = var.ssh_port
host = var.node_address
user = var.admin_username
password = var.ssh_key_data != null && var.ssh_key_data != "" ? "" : var.admin_password
Expand Down
5 changes: 5 additions & 0 deletions src/terraform/modules/vmss_config/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,11 @@ variable "ssh_key_data" {
description = "(optional) The public SSH key used for access to the controller or jumpbox. If not specified, the password needs to be set. The ssh_key_data takes precedence over the password, and if set, the password will be ignored."
}

variable "ssh_port" {
description = "specifies the tcp port to use for ssh"
default = 22
}

variable "nfs_address" {
description = "the private name or ip address of the nfs server"
}
Expand Down
2 changes: 2 additions & 0 deletions src/terraform/providers/terraform-provider-avere/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ The following arguments are supported:
* <a name="controller_address"></a>[controller_address](#controller_address) - (Optional if [run_local](#run_local) is set to true) the ip address of the controller. This address may be public or private. If private it will need to be reachable from where terraform is executed.
* <a name="controller_admin_username"></a>[controller_admin_username](#controller_admin_username) - (Optional if [run_local](#run_local) is set to true) the admin username to the controller
* <a name="controller_admin_password"></a>[controller_admin_password](#controller_admin_password) - (Optional) only specify if [run_local](#run_local) is set to false and password is to be used to access the key, instead of the ssh key ~/.ssh/id_rsa
* <a name="controller_ssh_port"></a>[controller_ssh_port](#controller_ssh_port) - (Optional) only specify if [run_local](#run_local) is set to false and the ssh is a value other than the default port 22.
* <a name="run_local"></a>[run_local](#run_local) - (Optional) specifies if terraform is run directly on the controller (or similar machine with vfxt.py, az cli, and averecmd). This defaults to false, and if false, a minimum of [controller_address](#controller_address) and [controller_admin_username](#controller_admin_username) must be set.
* <a name="allow_non_ascii"></a>[run_local](#run_local) - (Optional) non-ascii characters can break deployment so this is set to `false` by default. In more advanced scenarios, the ascii check may be disabled by setting to `true`.
* <a name="location"></a>[location](#location) - (Required) specify the azure region. Note: cluster re-created if modified.
Expand All @@ -91,6 +92,7 @@ The following arguments are supported:
* <a name="image_id"></a>[image_id](#image_id) - (Optional) specify a custom image id for the vFXT. This is useful when needing to use a bug fix or there is a marketplace outage. For more information see the [docs on how to create a custom image for the conroller and vfxt](../../examples/vfxt#create-vfxt-controller-from-custom-images). Note: cluster re-created if modified.
* <a name="vfxt_cluster_name"></a>[vfxt_cluster_name](#vfxt_cluster_name) - (Required) this is the name of the vFXT cluster that is shown when you browse to the management ip. To help Avere support, choose a name that matches the Avere's purpose. Note: cluster re-created if modified.
* <a name="vfxt_admin_password"></a>[vfxt_admin_password](#vfxt_admin_password) - (Required) the password for the vFXT cluster. Note: cluster re-created if modified.
* <a name="vfxt_ssh_key_data"></a>[vfxt_ssh_key_data](#vfxt_ssh_key_data) - (Optional) deploy the cluster using the ssh public key for authentication instead of the password, this is useful to align with policies.
* <a name="vfxt_node_count"></a>[vfxt_node_count](#vfxt_node_count) - (Required) the number of nodes to deploy for the Avere cluster. The count may be a minimum of 3 and a maximum of 16. If the cluster is already deployed, this will result in scaling up or down to the node count. It requires about 15 minutes to delete and add each node in a scale-up or scale-down scenario.
* <a name="node_cache_size"></a>[node_cache_size](#node_cache_size) - (Optional) The cache size in GB to use for each Avere vFXT VM. There are two options: 1024 or 4096 where 4096 is the default value. Note: cluster re-created if modified.
* <a name="vserver_first_ip"></a>[vserver_first_ip](#vserver_first_ip) - (Optional) To ensure predictable vserver ranges for dns pre-population, specify the first IP of the vserver. This will create consecutive ip addresses based on the node count set in [vfxt_node_count](#vfxt_node_count). The following configuration is recommended:
Expand Down
Loading

0 comments on commit 0853689

Please sign in to comment.