Skip to content

Commit

Permalink
2.168.0
Browse files Browse the repository at this point in the history
  • Loading branch information
root committed Aug 22, 2024
1 parent 3bc23d8 commit 53c6b03
Show file tree
Hide file tree
Showing 13 changed files with 57 additions and 22 deletions.
4 changes: 2 additions & 2 deletions aws_deprecated/aws_combined_ha/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,8 @@ provider "aws" {
}

module "aws_roles" {
source = "./../aws_roles"
region = var.region
source = "./../aws_roles"
region = var.region
s3_bucket_id = module.aws_common.s3_bucket_id
}

Expand Down
16 changes: 8 additions & 8 deletions aws_v2/awsVars.tfvars
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
# Adjust configuration options for your AWS deployment.

# General settings
region = "us-east-1" # Specify the region where you'd like to deploy.
key_name = "aws_virginia" # Specify the name of the key pair you'd like to use.
ami_id = "ami-12345" # VM image you'd like to use.
region = "us-east-1" # Specify the region where you'd like to deploy.
key_name = "aws_virginia" # Specify the name of the key pair you'd like to use.
ami_id = "ami-12345" # VM image you'd like to use.

public_deployment = false # Set to true if you want to deploy a public instance. Set to false for a private instance.
certificate_arn = "arn:aws:iam::1234:server-certificate/" # ARN of the certificate you'd like to use for HTTPS. Only required for private deployments.

# Tags to identify deployed resources.
tags = {
launch_date = "01/01/1970"
# example_tag_key2 = "example_value2"
# Add more tags as needed by following the format: tag_name = "tag_value"
launch_date = "01/01/1970"
# example_tag_key2 = "example_value2"
# Add more tags as needed by following the format: tag_name = "tag_value"
}

# VM and storage settings
Expand All @@ -22,8 +22,8 @@ vm_size = "m5.4xlarge" # Choose your desired VM size. Default is "m5.4xlarge".
vol_size = 100 # Desired disk volume size in GB. Default is 100.

# Network settings
ssh_location = [ "1.2.3.4/32" ] # List IPs you wish to whitelist for SSH access (Public Deployments Only)
http_location = [ "2.3.4.5/32" ] # List IPs you wish to whitelist for HTTP access
ssh_location = ["1.2.3.4/32"] # List IPs you wish to whitelist for SSH access (Public Deployments Only)
http_location = ["2.3.4.5/32"] # List IPs you wish to whitelist for HTTP access


# If you'd like to use custom networking, uncomment the following block and provide the necessary information.
Expand Down
2 changes: 1 addition & 1 deletion aws_v2/modules/iam/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -563,4 +563,4 @@ output "role_name" {

output "instance_role_name" {
value = aws_iam_role.instance_role.name
}
}
1 change: 1 addition & 0 deletions aws_v2/modules/networking/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ resource "aws_security_group" "security_group" {
name = "CadoSecGroupAlt"
description = "Allow SSH and HTTPS Connections"
vpc_id = data.aws_vpc.selected_vpc_id.id
tags = var.tags

ingress {
protocol = "tcp"
Expand Down
13 changes: 11 additions & 2 deletions azure/azure_transient/main.tf
Original file line number Diff line number Diff line change
@@ -1,6 +1,12 @@

// Variables

variable "deploy_nfs" {
type = bool
description = "Deploy NFS for storing files after processing. Setting to false will disable the re-running of analysis pipelines and downloading files."
default = true
}

variable "image_id" {
type = string
description = "Cado Response VHD blobstore URL"
Expand Down Expand Up @@ -208,6 +214,7 @@ data "azurerm_storage_container" "container" {
}

data "azurerm_storage_share" "share" {
count = var.deploy_nfs ? 1 : 0
name = "cadoshare"
storage_account_name = data.azurerm_storage_account.storage.name
}
Expand Down Expand Up @@ -265,13 +272,15 @@ resource "azurerm_linux_virtual_machine" "vm" {
"echo processing_mode = ${var.processing_mode} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo deployment_mode = terraform | sudo tee -a /home/admin/processor/first_run.cfg",
"echo worker_instance = ${var.worker_vm_type} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo azure_storage_account = ${data.azurerm_storage_account.storage.name} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo azure_storage_share = ${data.azurerm_storage_share.share.name} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo feature_flag_platform_upgrade = ${var.feature_flag_platform_upgrade} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo bucket = ${data.azurerm_storage_container.container.name} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo PROXY_url = ${var.proxy} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo PROXY_cert_url = ${var.proxy_cert_url} | sudo tee -a /home/admin/processor/first_run.cfg",
],
var.deploy_nfs ? [
"echo azure_storage_share = ${data.azurerm_storage_share.share[0].name} | sudo tee -a /home/admin/processor/first_run.cfg",
"echo azure_storage_account = ${data.azurerm_storage_account.storage.name} | sudo tee -a /home/admin/processor/first_run.cfg"
] : [],
[
for k, v in var.tags :
"echo CUSTOM_TAG_${k} = ${v} | sudo tee -a /home/admin/processor/first_run.cfg"
Expand Down
7 changes: 7 additions & 0 deletions azure/cado/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@ provider "azurerm" {

// default variables enclosed in < > should be updated

variable "deploy_nfs" {
type = bool
description = "Deploy NFS for storing files after processing. Setting to false will disable the re-running of analysis pipelines and downloading files."
default = true
}

variable "image_id" {
type = string
description = "Cado Response VHD blobstore URL"
Expand Down Expand Up @@ -137,6 +143,7 @@ module "azure_transient" {
resource_group = var.resource_group
image_id = var.image_id
ip_pattern_https = var.ip_pattern_https
deploy_nfs = var.deploy_nfs
ip_pattern_all = var.ip_pattern_all
instance_type = var.instance_type
main_size = var.main_size
Expand Down
3 changes: 2 additions & 1 deletion gcp/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,12 @@ No resources.
| <a name="input_create_cloud_build_role_service_account"></a> [create\_cloud\_build\_role\_service\_account](#input\_create\_cloud\_build\_role\_service\_account) | Create a custom Cloud Build role | `bool` | `true` | no |
| <a name="input_credentials_file"></a> [credentials\_file](#input\_credentials\_file) | Path to the credentials file | `string` | `""` | no |
| <a name="input_custom_networking"></a> [custom\_networking](#input\_custom\_networking) | Custom networking configuration. Set to null to create new resources. | <pre>object({<br> vpc_name = string<br> public_subnet_name = string<br> })</pre> | `null` | no |
| <a name="input_deploy_nfs"></a> [deploy\_nfs](#input\_deploy\_nfs) | Deploy NFS for storing files after processing. Setting to false will disable the re-running of analysis pipelines and downloading files. | `bool` | `true` | no |
| <a name="input_finalize_cmd"></a> [finalize\_cmd](#input\_finalize\_cmd) | Command to run on the VM after deployment | `string` | `"sudo /home/admin/processor/release/finalize.sh --main"` | no |
| <a name="input_image"></a> [image](#input\_image) | Cado Response VM image path | `string` | `"projects/cado-public/global/images/cadoresponse"` | no |
| <a name="input_inbound_ports"></a> [inbound\_ports](#input\_inbound\_ports) | The list of ports to open | `list(string)` | <pre>[<br> "22",<br> "443"<br>]</pre> | no |
| <a name="input_instance_worker_type"></a> [instance\_worker\_type](#input\_instance\_worker\_type) | Set Worker instance type | `string` | `"n2-standard-8"` | no |
| <a name="input_local_ports"></a> [local\_ports](#input\_local\_ports) | The list of ports to open to speak on the local subnet | `list(string)` | <pre>[<br> "5432",<br> "9200",<br> "6379"<br>]</pre> | no |
| <a name="input_local_ports"></a> [local\_ports](#input\_local\_ports) | The list of ports to open to speak on the local subnet | `list(string)` | <pre>[<br> "5432",<br> "9200",<br> "6379",<br> "24224"<br>]</pre> | no |
| <a name="input_nfs_protocol"></a> [nfs\_protocol](#input\_nfs\_protocol) | The Filestore NFS Protocol to use | `string` | `"NFS_V3"` | no |
| <a name="input_project_id"></a> [project\_id](#input\_project\_id) | Project id to deploy to | `string` | n/a | yes |
| <a name="input_proxy"></a> [proxy](#input\_proxy) | Proxy URL to use for outbound connections in format / User Pass - https://user:[email protected]:1234 \| IP Auth - https://1.2.3.4:1234 | `string` | `""` | no |
Expand Down
1 change: 1 addition & 0 deletions gcp/gcpVars.tfvars
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ vol_size = 100 # Desired disk volume size in GB. Default is 100.
allowed_ips = ["1.2.3.4/32", "2.3.4.5/32"] # List IPs you wish to whitelist.

nfs_protocol = "NFS_V3" # Choose the NFS protocol version. Default is "NFS_V3". "NFS_V4_1" is in GCP beta.
deploy_nfs = true # Deploy NFS for storing files after processing. Setting to false will disable the re-running of analysis pipelines and downloading files.

# If you'd like to use custom networking, uncomment the following block and provide the necessary information.
# If you're unsure, leave this section commented out and the deployment will use default networking settings.
Expand Down
1 change: 1 addition & 0 deletions gcp/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -61,4 +61,5 @@ module "deploy" {
proxy_cert_url = var.proxy_cert_url
instance_worker_type = var.instance_worker_type
use_beta = local.use_beta
deploy_nfs = var.deploy_nfs
}
7 changes: 4 additions & 3 deletions gcp/modules/deploy/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ No requirements.

| Name | Version |
|------|---------|
| <a name="provider_google"></a> [google](#provider\_google) | n/a |
| <a name="provider_google-beta"></a> [google-beta](#provider\_google-beta) | n/a |
| <a name="provider_google"></a> [google](#provider\_google) | 5.41.0 |
| <a name="provider_google-beta"></a> [google-beta](#provider\_google-beta) | 5.41.0 |

## Modules

Expand All @@ -32,6 +32,7 @@ No modules.
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_boot_disk_image"></a> [boot\_disk\_image](#input\_boot\_disk\_image) | The image to use for the VM's boot disk | `string` | n/a | yes |
| <a name="input_deploy_nfs"></a> [deploy\_nfs](#input\_deploy\_nfs) | Deploy NFS for storing files after processing. Setting to false will disable the re-running of analysis pipelines and downloading files. | `bool` | n/a | yes |
| <a name="input_finalize_cmd"></a> [finalize\_cmd](#input\_finalize\_cmd) | Command to run on the VM after deployment | `string` | n/a | yes |
| <a name="input_instance_worker_type"></a> [instance\_worker\_type](#input\_instance\_worker\_type) | Set Worker instance type | `string` | n/a | yes |
| <a name="input_network_config"></a> [network\_config](#input\_network\_config) | The network configuration for the VM | `string` | n/a | yes |
Expand All @@ -44,7 +45,7 @@ No modules.
| <a name="input_subnetwork_config"></a> [subnetwork\_config](#input\_subnetwork\_config) | The subnetwork configuration for the VM | `any` | n/a | yes |
| <a name="input_tags"></a> [tags](#input\_tags) | Tags to apply to main vm and any spawned workers | `map(string)` | n/a | yes |
| <a name="input_unique_name"></a> [unique\_name](#input\_unique\_name) | n/a | `string` | n/a | yes |
| <a name="input_use_beta"></a> [use\_beta](#input\_use\_beta) | n/a | `bool` | n/a | yes |
| <a name="input_use_beta"></a> [use\_beta](#input\_use\_beta) | Use beta filestore to utilize NFS\_V4\_1 | `bool` | n/a | yes |
| <a name="input_vm_size"></a> [vm\_size](#input\_vm\_size) | The size of the VM to deploy | `string` | n/a | yes |
| <a name="input_vol_size"></a> [vol\_size](#input\_vol\_size) | The size of the volume to attach to the VM | `number` | n/a | yes |

Expand Down
13 changes: 8 additions & 5 deletions gcp/modules/deploy/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ resource "google_compute_instance" "vm_instance" {
"#!/bin/bash -x",
"storage_bucket=${google_storage_bucket.bucket.name}",
"echo [FIRST_RUN] > /home/admin/processor/first_run.cfg",
"echo filestore_ip = ${local.filestore_instance.networks[0].ip_addresses[0]} >> /home/admin/processor/first_run.cfg",
"echo filestore_name = ${local.filestore_instance.file_shares[0].name} >> /home/admin/processor/first_run.cfg",
var.deploy_nfs ? "echo filestore_ip = ${local.filestore_instance.networks[0].ip_addresses[0]} >> /home/admin/processor/first_run.cfg" : "",
var.deploy_nfs ? "echo filestore_name = ${local.filestore_instance.file_shares[0].name} >> /home/admin/processor/first_run.cfg" : "",
"echo bucket = $storage_bucket >> /home/admin/processor/first_run.cfg",
"echo service_account_email = ${var.service_account} >> /home/admin/processor/first_run.cfg",
"echo processing_mode = scalable-vm >> /home/admin/processor/first_run.cfg",
Expand Down Expand Up @@ -85,7 +85,7 @@ resource "google_compute_attached_disk" "attached_data_disk" {
}

resource "google_filestore_instance" "beta_filestore_instance" {
count = var.use_beta ? 1 : 0
count = (var.use_beta && var.deploy_nfs) ? 1 : 0
provider = google-beta
project = var.project_id
name = "cadoresponse-fileshare-${var.unique_name}"
Expand All @@ -106,7 +106,7 @@ resource "google_filestore_instance" "beta_filestore_instance" {
}

resource "google_filestore_instance" "filestore_instance" {
count = var.use_beta ? 0 : 1
count = (var.use_beta && var.deploy_nfs) ? 0 : 1
name = "cadoresponse-fileshare-${var.unique_name}"
location = data.google_compute_zones.available.names[0]
tier = "BASIC_HDD"
Expand All @@ -124,9 +124,12 @@ resource "google_filestore_instance" "filestore_instance" {
}

locals {
filestore_instance = var.use_beta ? google_filestore_instance.beta_filestore_instance[0] : google_filestore_instance.filestore_instance[0]
filestore_instance = var.use_beta ? google_filestore_instance.beta_filestore_instance[0] : (
var.deploy_nfs ? google_filestore_instance.filestore_instance[0] : null
)
}


resource "google_storage_bucket" "bucket" {
name = "cadoresponse-bucket-${var.unique_name}"
location = var.region
Expand Down
5 changes: 5 additions & 0 deletions gcp/modules/deploy/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -73,3 +73,8 @@ variable "use_beta" {
type = bool
description = "Use beta filestore to utilize NFS_V4_1"
}

variable "deploy_nfs" {
description = "Deploy NFS for storing files after processing. Setting to false will disable the re-running of analysis pipelines and downloading files."
type = bool
}
6 changes: 6 additions & 0 deletions gcp/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,9 @@ variable "custom_networking" {
})
default = null
}

variable "deploy_nfs" {
description = "Deploy NFS for storing files after processing. Setting to false will disable the re-running of analysis pipelines and downloading files."
type = bool
default = true
}

0 comments on commit 53c6b03

Please sign in to comment.