diff --git a/.github/workflows/deploy_module.yml b/.github/workflows/deploy_module.yml
index 5775a51bb..350f5d3d8 100644
--- a/.github/workflows/deploy_module.yml
+++ b/.github/workflows/deploy_module.yml
@@ -43,6 +43,7 @@ jobs:
- source_module: "aws/poc-db-onboarder"
destination_repo: "terraform-aws-dsf-poc-db-onboarder"
public_submodule: "aws/rds-mysql-db aws/rds-mssql-db aws/rds-postgres-db"
+ hidden_submodules: "null/poc-db-onboarder"
begin_tag: 1.0.0
- source_module: "aws/sonar-upgrader"
@@ -59,7 +60,13 @@ jobs:
hidden_submodules: "azurerm/sonar-base-instance azurerm/statistics null/statistics"
begin_tag: 1.0.0
- ## null provider
+ - source_module: "azurerm/poc-db-onboarder"
+ destination_repo: "terraform-azurerm-dsf-poc-db-onboarder"
+ public_submodule: "azurerm/mssql-db"
+ hidden_submodules: "null/poc-db-onboarder"
+ begin_tag: 1.0.0
+
+ ## null provider
- source_module: "null/hadr"
destination_repo: "terraform-null-dsf-hadr"
begin_tag: 1.0.0
@@ -110,6 +117,16 @@ jobs:
hidden_submodules: "aws/statistics null/statistics"
begin_tag: 1.0.0
+ ## azurerm provider
+ - source_module: "azurerm/dra-admin"
+ destination_repo: "terraform-azurerm-dsf-dra-admin"
+ hidden_submodules: "azurerm/statistics null/statistics"
+ begin_tag: 1.7.6
+ - source_module: "azurerm/dra-analytics"
+ destination_repo: "terraform-azurerm-dsf-dra-analytics"
+ hidden_submodules: "azurerm/statistics null/statistics"
+ begin_tag: 1.7.6
+
# Globals
## aws provider
- source_module: "aws/core/globals"
diff --git a/.github/workflows/dsf_poc_cli.yml b/.github/workflows/dsf_poc_cli.yml
index b593bea86..8afa52a9d 100644
--- a/.github/workflows/dsf_poc_cli.yml
+++ b/.github/workflows/dsf_poc_cli.yml
@@ -1,4 +1,4 @@
-name: 'DSF POC CLI'
+name: 'DSF POC CLI - AWS'
on:
workflow_call:
@@ -25,6 +25,31 @@ on:
type: boolean
required: false
+ push:
+ branches:
+ - 'dev'
+ paths:
+ - 'modules/aws/**'
+ - '!modules/aws/sonar-upgrader/*'
+ - '!modules/aws/statistics/*'
+ - 'modules/null/**'
+ - '!modules/null/statistics/*'
+ - 'examples/aws/poc/dsf_deployment/*'
+
+ pull_request:
+ types:
+ - 'opened'
+ - 'reopened'
+ branches:
+ - 'dev'
+ paths:
+ - 'modules/aws/**'
+ - '!modules/aws/sonar-upgrader/*'
+ - '!modules/aws/statistics/*'
+ - 'modules/null/**'
+ - '!modules/null/statistics/*'
+ - 'examples/aws/poc/dsf_deployment/*'
+
env:
TF_CLI_ARGS: "-no-color"
TF_INPUT: 0
@@ -130,7 +155,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Setup jq
uses: sergeysova/jq-action@v2
diff --git a/.github/workflows/dsf_poc_cli_azure.yml b/.github/workflows/dsf_poc_cli_azure.yml
index 8e4879806..73cddc16f 100644
--- a/.github/workflows/dsf_poc_cli_azure.yml
+++ b/.github/workflows/dsf_poc_cli_azure.yml
@@ -27,6 +27,25 @@ on:
type: boolean
required: false
+ push:
+ branches:
+ - 'dev'
+ paths:
+ - 'modules/azurerm/**'
+ - 'modules/null/**'
+ - 'examples/azure/poc/dsf_deployment/*'
+
+ pull_request:
+ types:
+ - 'opened'
+ - 'reopened'
+ branches:
+ - 'dev'
+ paths:
+ - 'modules/azurerm/**'
+ - 'modules/null/**'
+ - 'examples/azure/poc/dsf_deployment/*'
+
env:
TF_CLI_ARGS: "-no-color"
TF_INPUT: 0
@@ -49,6 +68,22 @@ jobs:
workspace: azure_cli-all-
enable_sonar: true
enable_dam: true
+ enable_dra: true
+ - name: DSF POC - SONAR
+ workspace: azure_cli-sonar-
+ enable_sonar: true
+ enable_dam: false
+ enable_dra: false
+ - name: DSF POC - DAM
+ workspace: azure_cli-dam-
+ enable_sonar: false
+ enable_dam: true
+ enable_dra: false
+ - name: DSF POC - DRA
+ workspace: azure_cli-dra-
+ enable_sonar: false
+ enable_dam: false
+ enable_dra: true
name: '${{ matrix.name }}'
runs-on: ubuntu-latest
@@ -56,6 +91,7 @@ jobs:
EXAMPLE_DIR: ./examples/azure/poc/dsf_deployment
TF_VAR_enable_sonar: ${{ matrix.enable_sonar }}
TF_VAR_enable_dam: ${{ matrix.enable_dam }}
+ TF_VAR_enable_dra: ${{ matrix.enable_dra }}
environment: test
# Use the Bash shell regardless whether the GitHub Actions runner is ubuntu-latest, macos-latest, or windows-latest
@@ -127,6 +163,16 @@ jobs:
az_blob = "Imperva-ragent-UBN-px86_64-b14.6.0.60.0.636085.bsx"
}
simulation_db_types_for_agent=["PostgreSql", "MySql"]
+ dra_admin_vhd_details = {
+ storage_account_name = "dsfinstallation"
+ container_name = "dra"
+ path_to_vhd = "DRA-4.13.0.20.0.3_30207_x86_64-Admin.vhd"
+ }
+ dra_analytics_vhd_details = {
+ storage_account_name = "dsfinstallation"
+ container_name = "dra"
+ path_to_vhd = "DRA-4.13.0.20.0.3_30207_x86_64-Analytics.vhd"
+ }
EOF
# Install the latest version of Terraform CLI and configure the Terraform CLI configuration file with a Terraform Cloud user API token
@@ -134,7 +180,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Setup jq
uses: sergeysova/jq-action@v2
@@ -159,7 +205,9 @@ jobs:
mv $EXAMPLE_DIR/outputs.tf{,_}
mv $EXAMPLE_DIR/sonar.tf{,_}
mv $EXAMPLE_DIR/dam.tf{,_}
+ mv $EXAMPLE_DIR/dra.tf{,_}
mv $EXAMPLE_DIR/agent_sources.tf{,_}
+ mv $EXAMPLE_DIR/agentless_sources.tf{,_}
mv $EXAMPLE_DIR/networking.tf{,_}
ls -la $EXAMPLE_DIR
terraform -chdir=$EXAMPLE_DIR destroy -var dam_license=license.mprv -auto-approve
@@ -167,7 +215,9 @@ jobs:
mv $EXAMPLE_DIR/outputs.tf{_,}
mv $EXAMPLE_DIR/sonar.tf{_,}
mv $EXAMPLE_DIR/dam.tf{_,}
+ mv $EXAMPLE_DIR/dra.tf{_,}
mv $EXAMPLE_DIR/agent_sources.tf{_,}
+ mv $EXAMPLE_DIR/agentless_sources.tf{_,}
mv $EXAMPLE_DIR/networking.tf{_,}
fi
diff --git a/.github/workflows/dsf_single_account_cli.yml b/.github/workflows/dsf_single_account_cli.yml
index 899adf504..c13ea1f24 100644
--- a/.github/workflows/dsf_single_account_cli.yml
+++ b/.github/workflows/dsf_single_account_cli.yml
@@ -1,4 +1,4 @@
-name: 'DSF Single Account CLI'
+name: 'DSF Single Account CLI - AWS'
concurrency:
group: dsf_single_account
@@ -31,6 +31,37 @@ on:
DAM_LICENSE:
required: true
+ push:
+ branches:
+ - 'dev'
+ paths:
+ - 'modules/aws/**'
+ - '!modules/aws/db-with-agent/*'
+ - '!modules/aws/poc-db-onboarder/*'
+ - '!modules/aws/rds-mssql-db/*'
+ - '!modules/aws/rds-mysql-db/*'
+ - '!modules/aws/rds-postgres-db/*'
+ - '!modules/aws/sonar-upgrader/*'
+ - 'modules/null/**'
+ - 'examples/aws/poc/dsf_single_account_deployment/*'
+
+ pull_request:
+ types:
+ - 'opened'
+ - 'reopened'
+ branches:
+ - 'dev'
+ paths:
+ - 'modules/aws/**'
+ - '!modules/aws/db-with-agent/*'
+ - '!modules/aws/poc-db-onboarder/*'
+ - '!modules/aws/rds-mssql-db/*'
+ - '!modules/aws/rds-mysql-db/*'
+ - '!modules/aws/rds-postgres-db/*'
+ - '!modules/aws/sonar-upgrader/*'
+ - 'modules/null/**'
+ - 'examples/aws/poc/dsf_single_account_deployment/*'
+
env:
TF_CLI_ARGS: "-no-color"
TF_INPUT: 0
@@ -147,7 +178,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Setup jq
uses: sergeysova/jq-action@v2
diff --git a/.github/workflows/nightly_sonar_poc_basic_cli.yml b/.github/workflows/nightly_sonar_poc_basic_cli.yml
index 49ceedba5..22cdde4af 100644
--- a/.github/workflows/nightly_sonar_poc_basic_cli.yml
+++ b/.github/workflows/nightly_sonar_poc_basic_cli.yml
@@ -89,7 +89,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Setup jq
uses: sergeysova/jq-action@v2
diff --git a/.github/workflows/plan_cli.yml b/.github/workflows/plan_cli.yml
index 191de7bb7..582f06f0b 100644
--- a/.github/workflows/plan_cli.yml
+++ b/.github/workflows/plan_cli.yml
@@ -78,6 +78,14 @@ jobs:
az_blob = "dummy-blob"
}
dam_license="license.mprv"
+ dra_admin_image_details = {
+ resource_group_name = "dummy-resource-group"
+ image_id = "dummy-admin-image-id"
+ }
+ dra_analytics_image_details = {
+ resource_group_name = "dummy-resource-group"
+ image_id = "dummy-analytics-image-id"
+ }
- name: AWS - POC - DSF
example: ./examples/aws/poc/dsf_deployment
terraformvars: |
@@ -128,7 +136,7 @@ jobs:
- name: Setup Terraform
uses: hashicorp/setup-terraform@v2
with:
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Create License File
env:
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index aa1bd1446..32afb9d12 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -72,7 +72,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Format version for zip file name
run: |
@@ -176,6 +176,20 @@ jobs:
secrets:
PUSH_TO_OTHER_REPOS_TOKEN_ADMIN: ${{ secrets.PUSH_TO_OTHER_REPOS_TOKEN_ADMIN }}
+ test_plan:
+ needs: deploy_modules
+ uses: ./.github/workflows/plan_cli.yml
+ with:
+ use_modules_from_terraform_registry: true
+ explicit_ref: master
+ secrets:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_ACCESS_KEY_ID_STAGE: ${{ secrets.AWS_ACCESS_KEY_ID_STAGE }}
+ AWS_SECRET_ACCESS_KEY_STAGE: ${{ secrets.AWS_SECRET_ACCESS_KEY_STAGE }}
+ ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }}
+ DAM_LICENSE: ${{ secrets.DAM_LICENSE }}
+
release:
needs: deploy_modules
runs-on: ubuntu-latest
@@ -191,22 +205,8 @@ jobs:
env:
GH_TOKEN: ${{ github.token }}
- test_plan:
- needs: release
- uses: ./.github/workflows/plan_cli.yml
- with:
- use_modules_from_terraform_registry: true
- explicit_ref: master
- secrets:
- AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
- AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
- AWS_ACCESS_KEY_ID_STAGE: ${{ secrets.AWS_ACCESS_KEY_ID_STAGE }}
- AWS_SECRET_ACCESS_KEY_STAGE: ${{ secrets.AWS_SECRET_ACCESS_KEY_STAGE }}
- ARM_CLIENT_SECRET: ${{ secrets.ARM_CLIENT_SECRET }}
- DAM_LICENSE: ${{ secrets.DAM_LICENSE }}
-
test_apply:
- needs: test_plan
+ needs: release
uses: ./.github/workflows/sonar_poc_cli.yml
with:
use_modules_from_terraform_registry: true
diff --git a/.github/workflows/sonar_multi_account_cli.yml b/.github/workflows/sonar_multi_account_cli.yml
index 5bddbe1c4..5eef224d6 100644
--- a/.github/workflows/sonar_multi_account_cli.yml
+++ b/.github/workflows/sonar_multi_account_cli.yml
@@ -121,7 +121,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Setup jq
uses: sergeysova/jq-action@v2
diff --git a/.github/workflows/sonar_poc_cli.yml b/.github/workflows/sonar_poc_cli.yml
index 328dc22fb..41438276d 100644
--- a/.github/workflows/sonar_poc_cli.yml
+++ b/.github/workflows/sonar_poc_cli.yml
@@ -1,4 +1,4 @@
-name: 'Sonar POC CLI'
+name: 'Sonar POC CLI - AWS'
on:
workflow_call:
@@ -25,18 +25,9 @@ on:
push:
branches:
- - 'master'
- 'dev'
paths:
- - 'modules/aws/core/*'
- - 'modules/aws/sonar-base-instance/*'
- - 'modules/aws/hub/*'
- - 'modules/aws/agentless-gw/*'
- - 'modules/aws/poc-db-onboarder/*'
- - 'modules/aws/rds-mssql-db/*'
- - 'modules/aws/rds-mysql-db/*'
- - 'modules/null/federation/*'
- - 'modules/null/hadr/*'
+ - 'examples/aws/poc/dsf_deployment/*'
- 'examples/aws/poc/sonar_basic_deployment/*'
- 'examples/aws/poc/sonar_hadr_deployment/*'
@@ -45,18 +36,9 @@ on:
- 'opened'
- 'reopened'
branches:
- - 'master'
- 'dev'
paths:
- - 'modules/aws/core/*'
- - 'modules/aws/sonar-base-instance/*'
- - 'modules/aws/hub/*'
- - 'modules/aws/agentless-gw/*'
- - 'modules/aws/poc-db-onboarder/*'
- - 'modules/aws/rds-mssql-db/*'
- - 'modules/aws/rds-mysql-db/*'
- - 'modules/null/federation/*'
- - 'modules/null/hadr/*'
+ - 'examples/aws/poc/dsf_deployment/*'
- 'examples/aws/poc/sonar_basic_deployment/*'
- 'examples/aws/poc/sonar_hadr_deployment/*'
@@ -145,7 +127,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Setup jq
uses: sergeysova/jq-action@v2
diff --git a/.github/workflows/sonar_upgrade.yml b/.github/workflows/sonar_upgrade.yml
index e92a961d2..3b2b875f5 100644
--- a/.github/workflows/sonar_upgrade.yml
+++ b/.github/workflows/sonar_upgrade.yml
@@ -137,7 +137,7 @@ jobs:
uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- terraform_version: ~1.6.0
+ terraform_version: ~1.7.0
- name: Setup jq
uses: sergeysova/jq-action@v2
diff --git a/.github/workflows/sonar_upgrade_flow.yml b/.github/workflows/sonar_upgrade_flow.yml
index 4b1c7e737..d40488a33 100644
--- a/.github/workflows/sonar_upgrade_flow.yml
+++ b/.github/workflows/sonar_upgrade_flow.yml
@@ -15,7 +15,6 @@ on:
push:
branches:
- - 'master'
- 'dev'
paths:
- 'modules/aws/sonar-upgrader/**'
@@ -26,7 +25,6 @@ on:
- 'opened'
- 'reopened'
branches:
- - 'master'
- 'dev'
paths:
- 'modules/aws/sonar-upgrader/**'
diff --git a/README.md b/README.md
index c1f2f0e65..ac2e41838 100644
--- a/README.md
+++ b/README.md
@@ -126,7 +126,7 @@ This guide references the following information and links, some of which are ava
- eDSF Kit GitHub Repository
+ | eDSF Kit GitHub Repository
|
|
@@ -139,15 +139,21 @@ This guide references the following information and links, some of which are ava
Download Terraform
|
-Latest Supported Terraform Version: 1.5.x. Using a higher version may result in unexpected behavior or errors.
+Latest Supported Terraform Version: 1.7.x. Using a higher version may result in unexpected behavior or errors.
|
- Request access to DSF installation software on AWS - Request Form
+ | Request access to DSF installation software on AWS
|
Grants access for a specific AWS account to the DSF installation software.
|
+
+ Request access to DSF installation software on Azure
+ |
+ Copies DSF installation to Azure storage account and configures programmatic deployment for Azure images.
+ |
+
#### Version History
@@ -411,6 +417,26 @@ The following table lists the _latest_ eDSF Kit releases, their release date and
3. Improvements and bug fixes.
+
+ 24 Jan 2024
+ |
+ 1.7.8
+ |
+
+ 1. Added support for DRA in Azure.
+ 2. Added a new agentless source for Azure - MSSQL.
+ 3. Improvements and bug fixes.
+ |
+
+
+ Coming soon
+ |
+
+ |
+
+ 1. Added support for Terraform version 1.7.x.
+ |
+
# Getting Ready to Deploy
@@ -441,7 +467,7 @@ Before using eDSF Kit to deploy DSF, it is necessary to satisfy a set of prerequ
1. Only if you chose the [CLI Deployment Mode](#cli-deployment-mode), install [Git](https://git-scm.com).
2. Only if you chose the [CLI Deployment Mode](#cli-deployment-mode), install [Terraform](https://www.terraform.io). It is recommended on MacOS systems to use the "Package Manager" option during installation.
-3. Latest Supported Terraform Version: 1.6.x. Using a higher version may result in unexpected behavior or errors.
+3. Latest Supported Terraform Version: 1.7.x. Using a higher version may result in unexpected behavior or errors.
4. [jq](https://jqlang.github.io/jq/) - Command-line JSON processor.
5. [curl](https://curl.se/) - Command-line tool for transferring data.
@@ -452,12 +478,9 @@ Before using eDSF Kit to deploy DSF, it is necessary to satisfy a set of prerequ
### Azure Prerequisites
-1. [Establish an Azure App Registration](https://learn.microsoft.com/en-us/azure/healthcare-apis/register-application) and [assign it a custom role](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=delegate-condition)
+1. [Establish an Azure App Registration](https://learn.microsoft.com/en-us/azure/healthcare-apis/register-application) and [assign it a custom role](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal?tabs=delegate-condition) (without role assignment conditions)
under the associated subscription, ensuring the custom role includes the required IAM permissions (see [IAM Permissions for Running eDSF Kit section](#iam-permissions-for-azure)).
-2. Configure programmatic deployment for the desired version of Imperva DAM by [enabling it on the relevant DAM image from the Azure Marketplace](https://portal.azure.com/#view/Microsoft_Azure_Marketplace/LegalTermsSkuProgrammaticAccessBlade/legalTermsSkuProgrammaticAccessData~/%7B%22product%22%3A%7B%22publisherId%22%3A%22imperva%22%2C%22offerId%22%3A%22imperva-dam-v14%22%2C%22planId%22%3A%22securesphere-imperva-dam-14%22%2C%22standardContractAmendmentsRevisionId%22%3Anull%2C%22isCspEnabled%22%3Atrue%7D%7D). For DAM LTS version, use [DAM LTS Azure Marketplace image](https://portal.azure.com/#view/Microsoft_Azure_Marketplace/LegalTermsSkuProgrammaticAccessBlade/legalTermsSkuProgrammaticAccessData~/%7B%22product%22%3A%7B%22publisherId%22%3A%22imperva%22%2C%22offerId%22%3A%22imperva-dam-v14-lts%22%2C%22planId%22%3A%22securesphere-imperva-dam-14%22%2C%22standardContractAmendmentsRevisionId%22%3Anull%2C%22isCspEnabled%22%3Atrue%7D%7D).
-For the POC example, configure programmatic deployment also for [Ubuntu Pro 20.04 LTS image](https://portal.azure.com/#view/Microsoft_Azure_Marketplace/LegalTermsSkuProgrammaticAccessBlade/legalTermsSkuProgrammaticAccessData~/%7B%22product%22%3A%7B%22publisherId%22%3A%22canonical%22%2C%22offerId%22%3A%220001-com-ubuntu-pro-focal%22%2C%22planId%22%3A%22pro-20_04-lts%22%2C%22standardContractAmendmentsRevisionId%22%3Anull%2C%22isCspEnabled%22%3Atrue%7D%7D).
-3. The deployment requires access to the Sonar and DAM Agent installation binaries. Establish an Azure Storage account along with a container, and proceed to upload the Sonar and DAM Agent installation binaries to this storage location as a blob.
-
+2. The deployment requires access to the DSF installation software. [Click here to request access](https://docs.google.com/document/d/12r8olpvT1H4A_ZKUJBO_2AbTO7qmyihe1hQjHo-N3qg).
## Choosing the Example/Recipe that Fits Your Use Case
@@ -467,7 +490,7 @@ e.g., with or without DRA, the number of Agentless Gateways, with or without HAD
We provide several of out-of-the-box Terraform recipes we call "examples" which are already configured to deploy common DSF environments.
You can use the example as is, or customize it to accommodate your deployment requirements.
-These examples can be found in the eDSF Kit GitHub Repository under the examples directory.
+These examples can be found in the eDSF Kit GitHub Repository under the examples directory.
Some examples are intended for Lab or POC and others for actual DSF deployments by Professional Services and customers.
For more details about each example, click on the example name.
@@ -486,73 +509,73 @@ For more details about each example, click on the example name.
- Sonar Basic Deployment
+ | Sonar Basic Deployment
|
Lab/POC
|
A DSF deployment with a DSF Hub, an Agentless Gateway, federation, networking and onboarding of a MySQL DB.
|
- sonar_basic_deployment_1_7_5.zip
+ | sonar_basic_deployment_1_7_8.zip
|
- Sonar HADR Deployment
+ | Sonar HADR Deployment
|
Lab/POC
|
A DSF deployment with a DSF Hub, an Agentless Gateway, DSF Hub and Agentless Gateway HADR, federation, networking and onboarding of a MySQL DB.
|
- sonar_hadr_deployment_1_7_5.zip
+ | sonar_hadr_deployment_1_7_8.zip
|
- Sonar Single Account Deployment
+ | Sonar Single Account Deployment
|
PS/Customer
|
A DSF deployment with a DSF Hub HADR, an Agentless Gateway and federation. The DSF nodes (Hubs and Agentless Gateway) are in the same AWS account and the same region. It is mandatory to provide as input to this example the subnets to deploy the DSF nodes on.
|
- sonar_single_account_deployment_1_7_5.zip
+ | sonar_single_account_deployment_1_7_8.zip
|
- Sonar Multi Account Deployment
+ | Sonar Multi Account Deployment
|
PS/Customer
|
A DSF deployment with a DSF Hub, an Agentless Gateway and federation. The DSF nodes (Hub and Agentless Gateway) are in different AWS accounts. It is mandatory to provide as input to this example the subnets to deploy the DSF nodes on.
|
- sonar_multi_account_deployment_1_7_5.zip
+ | sonar_multi_account_deployment_1_7_8.zip
|
- DSF Deployment
+ | DSF Deployment
|
Lab/POC
|
A full DSF deployment with DSF Hub and Agentless Gateways (formerly Sonar), DAM (MX and Agent Gateways), DRA (Admin and DRA Analytics), and Agent and Agentless audit sources.
|
- dsf_deployment_1_7_5.zip
+ | dsf_deployment_1_7_8.zip
|
- DSF Single Account Deployment
+ | DSF Single Account Deployment
|
PS/Customer
|
A full DSF deployment with DSF Hub and Agentless Gateways (formerly Sonar), DAM (MX and Agent Gateways) and DRA (Admin and DRA Analytics).
|
- dsf_single_account_deployment_1_7_5.zip
+ | dsf_single_account_deployment_1_7_8.zip
|
- Sonar Upgrade (Alpha)
+ | Sonar Upgrade (Alpha)
|
All
|
Upgrade of DSF Hub and Agentless Gateway (formerly Sonar).
|
- sonar_upgrade_1_7_5.zip
+ | sonar_upgrade_1_7_8.zip
|
@@ -571,13 +594,13 @@ For more details about each example, click on the example name.
- DSF Deployment
+ | DSF Deployment
|
Lab/POC
|
A DSF deployment with a DSF Hub, an Agentless Gateway, DSF Hub and Agentless Gateway HADR, federation and networking, DAM (MX and Agent Gateways), and Agent audit sources.
|
- dsf_deployment_1_7_5.zip
+ | dsf_deployment_1_7_8.zip
|
@@ -652,7 +675,7 @@ After you have [chosen the deployment mode](#choosing-the-deployment-mode), foll
This mode offers a straightforward deployment option that relies on running a Terraform script on the user's computer which must be a Linux/Unix machine, e.g, Mac.
This mode makes use of the Terraform Command Line Interface (CLI) to deploy and manage environments.
-1. Download the zip file of the example you've chosen (See the [Choosing the Example/Recipe that Fits Your Use Case](#choosing-the-examplerecipe-that-fits-your-use-case) section) from the eDSF Kit GitHub Repository, e.g., if you choose the "sonar_basic_deployment" example, you should download sonar_basic_deployment.zip.
+1. Download the zip file of the example you've chosen (See the [Choosing the Example/Recipe that Fits Your Use Case](#choosing-the-examplerecipe-that-fits-your-use-case) section) from the eDSF Kit GitHub Repository, e.g., if you choose the "sonar_basic_deployment" example, you should download sonar_basic_deployment.zip.
2. Unzip the zip file in CLI or using your operating system's UI.
For example, in CLI:
@@ -750,7 +773,7 @@ This mode can be used if a Linux/Unix machine is not available, or eDSF Kit cann
5. In the Network settings panel - make your configurations while keeping in mind that the installer machine should have access to the DSF environment that you want to deploy, and that your computer should have access to the installer machine.
-6. In the “Advanced details” panel, copy and paste the contents of this [bash script](https://github.com/imperva/dsfkit/blob/1.7.5/installer_machine/installer_machine_user_data.sh) into the [User data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) textbox.
+6. In the “Advanced details” panel, copy and paste the contents of this [bash script](https://github.com/imperva/dsfkit/blob/1.7.8/installer_machine/installer_machine_user_data.sh) into the [User data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) textbox.
7. Click on **Launch Instance**. At this stage, the installer machine is initializing and downloading the necessary dependencies.
@@ -769,30 +792,30 @@ This mode can be used if a Linux/Unix machine is not available, or eDSF Kit cann
For example: `chmode 400 a_key_pair.pem`
-9. Download the zip file of the example you've chosen (See the [Choosing the Example/Recipe that Fits Your Use Case](#choosing-the-examplerecipe-that-fits-your-use-case) section) from the eDSF Kit GitHub Repository, e.g., if you choose the "sonar_basic_deployment" example, you should download sonar_basic_deployment.zip.
+9. Download the zip file of the example you've chosen (See the [Choosing the Example/Recipe that Fits Your Use Case](#choosing-the-examplerecipe-that-fits-your-use-case) section) from the eDSF Kit GitHub Repository, e.g., if you choose the "sonar_basic_deployment" example, you should download sonar_basic_deployment.zip.
Run:
```bash
- wget https://github.com/imperva/dsfkit/raw/1.7.5/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_5.zip
+ wget https://github.com/imperva/dsfkit/raw/1.7.8/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_8.zip
or
- wget https://github.com/imperva/dsfkit/raw/1.7.5/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_5.zip
+ wget https://github.com/imperva/dsfkit/raw/1.7.8/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_8.zip
or
- wget https://github.com/imperva/dsfkit/raw/1.7.5/examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_5.zip
+ wget https://github.com/imperva/dsfkit/raw/1.7.8/examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_8.zip
or
- wget https://github.com/imperva/dsfkit/raw/1.7.5/examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_5.zip
+ wget https://github.com/imperva/dsfkit/raw/1.7.8/examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_8.zip
or
- wget https://github.com/imperva/dsfkit/raw/1.7.5/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_5.zip
+ wget https://github.com/imperva/dsfkit/raw/1.7.8/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_8.zip
or
- wget https://github.com/imperva/dsfkit/raw/1.7.5/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_5.zip
+ wget https://github.com/imperva/dsfkit/raw/1.7.8/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_8.zip
```
10. Continue by following the [CLI Deployment Mode](#cli-deployment-mode) beginning at step 2.
@@ -1057,7 +1080,7 @@ Before using eDSF Kit to upgrade DSF Hubs and Agentless Gateways, it is necessar
2. Only if you chose the [CLI Upgrade Mode](#cli-upgrade-mode), install [Git](https://git-scm.com).
3. Only if you chose the [CLI Upgrade Mode](#cli-upgrade-mode), install [Terraform](https://www.terraform.io). It is recommended on MacOS systems to use the "Package Manager" option during installation.
4. Only if you chose the [CLI Upgrade Mode](#cli-upgrade-mode), install [Python 3](https://www.python.org).
-5. Latest Supported Terraform Version: 1.6.x. Using a higher version may result in unexpected behavior or errors.
+5. Latest Supported Terraform Version: 1.7.x. Using a higher version may result in unexpected behavior or errors.
6. The upgrade requires permission and network access (SSH) from your computer or the installer machine (depending on your choice of upgrade mode) to the deployed environment on AWS.
### Additional Prerequisites
@@ -1091,7 +1114,7 @@ After you have [chosen the upgrade mode](#choosing-the-upgrade-mode), follow the
This mode offers a straightforward deployment option that relies on running a Terraform script on the user's computer which must be a Linux/Unix machine, e.g, Mac.
This mode makes use of the Terraform Command Line Interface (CLI) to deploy and manage environments.
-1. Download the zip file of the Sonar upgrade example: sonar_upgrade_1_7_5.zip.
+1. Download the zip file of the Sonar upgrade example: sonar_upgrade_1_7_8.zip.
2. Unzip the zip file in CLI or using your operating system's UI.
For example, in CLI:
@@ -1157,7 +1180,7 @@ This mode can be used if a Linux/Unix machine is not available, or eDSF Kit cann
5. In the Network settings panel - make your configurations while keeping in mind that the installer machine should have access to the DSF environment that you want to deploy, and that your computer should have access to the installer machine.
-6. In the “Advanced details” panel, copy and paste the contents of this [bash script](https://github.com/imperva/dsfkit/blob/1.7.5/installer_machine/upgrade_installer_machine_user_data.sh) into the [User data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) textbox.
+6. In the “Advanced details” panel, copy and paste the contents of this [bash script](https://github.com/imperva/dsfkit/blob/1.7.8/installer_machine/upgrade_installer_machine_user_data.sh) into the [User data](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) textbox.
9. Click on **Launch Instance**. At this stage, the installer machine is initializing and downloading the necessary dependencies.
@@ -1186,13 +1209,13 @@ This mode can be used if a Linux/Unix machine is not available, or eDSF Kit cann
If you do not wish to use Terraform to run the upgrade, it is possible to bypass it and run the Python utility directly.
-Use the Python Upgrader utility.
+Use the Python Upgrader utility.
# More Information
Information about additional topics can be found in specific examples, when relevant.
-For example: Sonar Single Account Deployment
+For example: Sonar Single Account Deployment
These topics include:
- Storing Terraform state in S3 bucket
diff --git a/examples/aws/installation/dsf_single_account_deployment/dam.tf b/examples/aws/installation/dsf_single_account_deployment/dam.tf
index a4937408f..371adf71d 100644
--- a/examples/aws/installation/dsf_single_account_deployment/dam.tf
+++ b/examples/aws/installation/dsf_single_account_deployment/dam.tf
@@ -8,7 +8,7 @@ locals {
module "mx" {
source = "imperva/dsf-mx/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_dam ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "mx"])
@@ -38,7 +38,7 @@ module "mx" {
module "agent_gw" {
source = "imperva/dsf-agent-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.agent_gw_count
friendly_name = join("-", [local.deployment_name_salted, "agent", "gw", count.index])
@@ -68,7 +68,7 @@ module "agent_gw" {
module "agent_gw_cluster_setup" {
source = "imperva/dsf-agent-gw-cluster-setup/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.create_agent_gw_cluster
cluster_name = var.cluster_name != null ? var.cluster_name : join("-", [local.deployment_name_salted, "agent", "gw", "cluster"])
diff --git a/examples/aws/installation/dsf_single_account_deployment/dra.tf b/examples/aws/installation/dsf_single_account_deployment/dra.tf
index 558cd7707..188b2e782 100644
--- a/examples/aws/installation/dsf_single_account_deployment/dra.tf
+++ b/examples/aws/installation/dsf_single_account_deployment/dra.tf
@@ -6,16 +6,16 @@ locals {
module "dra_admin" {
source = "imperva/dsf-dra-admin/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_dra ? 1 : 0
- friendly_name = join("-", [local.deployment_name_salted, "dra", "admin"])
+ name = join("-", [local.deployment_name_salted, "dra", "admin"])
subnet_id = var.subnet_ids.dra_admin_subnet_id
security_group_ids = var.security_group_ids_dra_admin
dra_version = module.globals.dra_version
ebs = var.dra_admin_ebs_details
admin_registration_password = local.password
- admin_password = local.password
+ admin_ssh_password = local.password
allowed_web_console_cidrs = var.web_console_cidr
allowed_analytics_cidrs = [data.aws_subnet.dra_analytics.cidr_block]
allowed_hub_cidrs = local.hub_cidr_list
@@ -28,16 +28,16 @@ module "dra_admin" {
module "dra_analytics" {
source = "imperva/dsf-dra-analytics/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.dra_analytics_count
- friendly_name = join("-", [local.deployment_name_salted, "dra", "analytics", count.index])
+ name = join("-", [local.deployment_name_salted, "dra", "analytics", count.index])
subnet_id = var.subnet_ids.dra_analytics_subnet_id
security_group_ids = var.security_group_ids_dra_analytics
dra_version = module.globals.dra_version
ebs = var.dra_analytics_ebs_details
admin_registration_password = local.password
- admin_password = local.password
+ analytics_ssh_password = local.password
allowed_admin_cidrs = [data.aws_subnet.dra_admin.cidr_block]
allowed_agent_gateways_cidrs = local.agent_gw_cidr_list
allowed_hub_cidrs = local.hub_cidr_list
diff --git a/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_5.zip b/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_5.zip
deleted file mode 100644
index dbd6b740d..000000000
Binary files a/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_5.zip and /dev/null differ
diff --git a/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_8.zip b/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_8.zip
new file mode 100644
index 000000000..762172e2f
Binary files /dev/null and b/examples/aws/installation/dsf_single_account_deployment/dsf_single_account_deployment_1_7_8.zip differ
diff --git a/examples/aws/installation/dsf_single_account_deployment/main.tf b/examples/aws/installation/dsf_single_account_deployment/main.tf
index 70af6df35..4fa38fed3 100644
--- a/examples/aws/installation/dsf_single_account_deployment/main.tf
+++ b/examples/aws/installation/dsf_single_account_deployment/main.tf
@@ -1,6 +1,6 @@
module "globals" {
source = "imperva/dsf-globals/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = var.sonar_version
dra_version = var.dra_version
@@ -39,7 +39,7 @@ locals {
module "key_pair_hub_main" {
count = var.hub_main_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-hub-main"
private_key_filename = "ssh_keys/dsf_ssh_key-hub-main-${terraform.workspace}"
tags = local.tags
@@ -51,7 +51,7 @@ module "key_pair_hub_main" {
module "key_pair_hub_dr" {
count = var.hub_dr_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-hub-dr"
private_key_filename = "ssh_keys/dsf_ssh_key-hub-dr-${terraform.workspace}"
tags = local.tags
@@ -63,7 +63,7 @@ module "key_pair_hub_dr" {
module "key_pair_agentless_gw_main" {
count = var.agentless_gw_main_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-gw-main"
private_key_filename = "ssh_keys/dsf_ssh_key-agentless-gw-main-${terraform.workspace}"
tags = local.tags
@@ -75,7 +75,7 @@ module "key_pair_agentless_gw_main" {
module "key_pair_agentless_gw_dr" {
count = var.agentless_gw_dr_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-gw-dr"
private_key_filename = "ssh_keys/dsf_ssh_key-agentless-gw-dr-${terraform.workspace}"
tags = local.tags
@@ -87,7 +87,7 @@ module "key_pair_agentless_gw_dr" {
module "key_pair_mx" {
count = var.mx_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-mx"
private_key_filename = "ssh_keys/dsf_ssh_key-mx-${terraform.workspace}"
tags = local.tags
@@ -99,7 +99,7 @@ module "key_pair_mx" {
module "key_pair_agent_gw" {
count = var.agent_gw_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-agent-gw"
private_key_filename = "ssh_keys/dsf_ssh_key-agent-gw-${terraform.workspace}"
tags = local.tags
@@ -111,7 +111,7 @@ module "key_pair_agent_gw" {
module "key_pair_dra_admin" {
count = var.dra_admin_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-dra-admin"
private_key_filename = "ssh_keys/dsf_ssh_key-dra-admin-${terraform.workspace}"
tags = local.tags
@@ -123,7 +123,7 @@ module "key_pair_dra_admin" {
module "key_pair_dra_analytics" {
count = var.dra_analytics_key_pair == null ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-dra-analytics"
private_key_filename = "ssh_keys/dsf_ssh_key-dra-analytics-${terraform.workspace}"
tags = local.tags
diff --git a/examples/aws/installation/dsf_single_account_deployment/sonar.tf b/examples/aws/installation/dsf_single_account_deployment/sonar.tf
index 59117bd86..e5aa80c2a 100644
--- a/examples/aws/installation/dsf_single_account_deployment/sonar.tf
+++ b/examples/aws/installation/dsf_single_account_deployment/sonar.tf
@@ -12,7 +12,7 @@ locals {
module "hub_main" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_sonar ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "hub", "main"])
@@ -53,8 +53,8 @@ module "hub_main" {
dra_details = var.enable_dra ? {
name = module.dra_admin[0].display_name
address = module.dra_admin[0].public_ip
- username = module.dra_admin[0].ssh_user
password = local.password
+ archiver_username = module.dra_analytics[0].archiver_user
archiver_password = module.dra_analytics[0].archiver_password
} : null
generate_access_tokens = true
@@ -67,7 +67,7 @@ module "hub_main" {
module "hub_dr" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_sonar && var.hub_hadr ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "hub", "DR"])
@@ -112,7 +112,7 @@ module "hub_dr" {
module "hub_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(module.hub_dr) > 0 ? 1 : 0
sonar_version = module.globals.tarball_location.version
@@ -137,7 +137,7 @@ module "hub_hadr" {
module "agentless_gw_main" {
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.agentless_gw_count
friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", count.index, "main"])
@@ -177,7 +177,7 @@ module "agentless_gw_main" {
module "agentless_gw_dr" {
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.agentless_gw_hadr ? local.agentless_gw_count : 0
friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", count.index, "DR"])
@@ -220,7 +220,7 @@ module "agentless_gw_dr" {
module "agentless_gw_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(module.agentless_gw_dr)
sonar_version = module.globals.tarball_location.version
@@ -266,7 +266,7 @@ locals {
module "federation" {
source = "imperva/dsf-federation/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
for_each = local.hub_gw_combinations
hub_info = {
diff --git a/examples/aws/installation/dsf_single_account_deployment/variables.tf b/examples/aws/installation/dsf_single_account_deployment/variables.tf
index b163ebd09..be8529d91 100644
--- a/examples/aws/installation/dsf_single_account_deployment/variables.tf
+++ b/examples/aws/installation/dsf_single_account_deployment/variables.tf
@@ -475,6 +475,7 @@ variable "dam_license" {
2. License file path
EOF
type = string
+ default = null
}
variable "large_scale_mode" {
diff --git a/examples/aws/installation/dsf_single_account_deployment/versions.tf b/examples/aws/installation/dsf_single_account_deployment/versions.tf
index 23724e137..8ed85317d 100644
--- a/examples/aws/installation/dsf_single_account_deployment/versions.tf
+++ b/examples/aws/installation/dsf_single_account_deployment/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/examples/aws/installation/sonar_multi_account_deployment/main.tf b/examples/aws/installation/sonar_multi_account_deployment/main.tf
index a68a14886..ba23b69a5 100644
--- a/examples/aws/installation/sonar_multi_account_deployment/main.tf
+++ b/examples/aws/installation/sonar_multi_account_deployment/main.tf
@@ -1,6 +1,6 @@
module "globals" {
source = "imperva/dsf-globals/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = var.sonar_version
}
@@ -25,7 +25,7 @@ locals {
module "key_pair_hub_main" {
count = local.should_create_hub_main_key_pair ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-hub-main"
private_key_filename = "ssh_keys/dsf_ssh_key-hub-main-${terraform.workspace}"
tags = local.tags
@@ -37,7 +37,7 @@ module "key_pair_hub_main" {
module "key_pair_hub_dr" {
count = local.should_create_hub_dr_key_pair ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-hub-dr"
private_key_filename = "ssh_keys/dsf_ssh_key-hub-dr-${terraform.workspace}"
tags = local.tags
@@ -49,7 +49,7 @@ module "key_pair_hub_dr" {
module "key_pair_gw_main" {
count = local.should_create_gw_main_key_pair ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-gw"
private_key_filename = "ssh_keys/dsf_ssh_key-gw-main-${terraform.workspace}"
tags = local.tags
@@ -61,7 +61,7 @@ module "key_pair_gw_main" {
module "key_pair_gw_dr" {
count = local.should_create_gw_dr_key_pair ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-gw-dr"
private_key_filename = "ssh_keys/dsf_ssh_key-gw-dr-${terraform.workspace}"
tags = local.tags
@@ -106,7 +106,7 @@ locals {
##############################
module "hub_main" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "hub", "main"])
subnet_id = var.subnet_hub_main
security_group_ids = var.security_group_ids_hub_main
@@ -144,7 +144,7 @@ module "hub_main" {
module "hub_dr" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "hub", "DR"])
subnet_id = var.subnet_hub_dr
security_group_ids = var.security_group_ids_hub_dr
@@ -186,7 +186,7 @@ module "hub_dr" {
module "agentless_gw_main" {
count = var.gw_count
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "gw", count.index, "main"])
subnet_id = var.subnet_gw_main
security_group_ids = var.security_group_ids_gw_main
@@ -225,7 +225,7 @@ module "agentless_gw_main" {
module "agentless_gw_dr" {
count = var.gw_count
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "gw", count.index, "DR"])
subnet_id = var.subnet_gw_dr
security_group_ids = var.security_group_ids_gw_dr
@@ -266,7 +266,7 @@ module "agentless_gw_dr" {
module "hub_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = module.globals.tarball_location.version
dsf_main_ip = module.hub_main.private_ip
dsf_main_private_ip = module.hub_main.private_ip
@@ -290,7 +290,7 @@ module "hub_hadr" {
module "agentless_gw_hadr" {
count = var.gw_count
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = module.globals.tarball_location.version
dsf_main_ip = module.agentless_gw_main[count.index].private_ip
dsf_main_private_ip = module.agentless_gw_main[count.index].private_ip
@@ -324,7 +324,7 @@ locals {
module "federation" {
count = length(local.hub_gws_combinations)
source = "imperva/dsf-federation/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
gw_info = {
gw_ip_address = local.hub_gws_combinations[count.index][1].instance.private_ip
gw_federation_ip_address = local.hub_gws_combinations[count.index][1].instance.private_ip
diff --git a/examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_5.zip b/examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_8.zip
similarity index 81%
rename from examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_5.zip
rename to examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_8.zip
index 62beaa1b0..20fd96801 100644
Binary files a/examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_5.zip and b/examples/aws/installation/sonar_multi_account_deployment/sonar_multi_account_deployment_1_7_8.zip differ
diff --git a/examples/aws/installation/sonar_multi_account_deployment/versions.tf b/examples/aws/installation/sonar_multi_account_deployment/versions.tf
index 87c868ff4..581f7504f 100644
--- a/examples/aws/installation/sonar_multi_account_deployment/versions.tf
+++ b/examples/aws/installation/sonar_multi_account_deployment/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/examples/aws/installation/sonar_single_account_deployment/main.tf b/examples/aws/installation/sonar_single_account_deployment/main.tf
index 28727f8bc..4274dd9cd 100644
--- a/examples/aws/installation/sonar_single_account_deployment/main.tf
+++ b/examples/aws/installation/sonar_single_account_deployment/main.tf
@@ -5,7 +5,7 @@ provider "aws" {
module "globals" {
source = "imperva/dsf-globals/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = var.sonar_version
}
@@ -33,7 +33,7 @@ locals {
module "key_pair_hub" {
count = local.should_create_hub_key_pair ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-hub"
private_key_filename = "ssh_keys/dsf_ssh_key-hub-${terraform.workspace}"
tags = local.tags
@@ -42,7 +42,7 @@ module "key_pair_hub" {
module "key_pair_gw" {
count = local.should_create_gw_key_pair ? 1 : 0
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-gw"
private_key_filename = "ssh_keys/dsf_ssh_key-gw-${terraform.workspace}"
tags = local.tags
@@ -72,7 +72,7 @@ data "aws_subnet" "subnet_gw" {
##############################
module "hub_main" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "hub", "main"])
subnet_id = var.subnet_hub_main
security_group_ids = var.security_group_ids_hub
@@ -103,7 +103,7 @@ module "hub_main" {
module "hub_dr" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "hub", "DR"])
subnet_id = var.subnet_hub_dr
security_group_ids = var.security_group_ids_hub
@@ -138,7 +138,7 @@ module "hub_dr" {
module "agentless_gw" {
count = var.gw_count
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "gw", count.index])
subnet_id = var.subnet_gw
security_group_ids = var.security_group_ids_gw
@@ -172,7 +172,7 @@ module "agentless_gw" {
module "hub_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = module.globals.tarball_location.version
dsf_main_ip = module.hub_main.private_ip
dsf_main_private_ip = module.hub_main.private_ip
@@ -199,7 +199,7 @@ locals {
module "federation" {
count = length(local.hub_gw_combinations)
source = "imperva/dsf-federation/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
hub_info = {
hub_ip_address = local.hub_gw_combinations[count.index][0].private_ip
diff --git a/examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_5.zip b/examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_8.zip
similarity index 81%
rename from examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_5.zip
rename to examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_8.zip
index 2a61792ab..06de73617 100644
Binary files a/examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_5.zip and b/examples/aws/installation/sonar_single_account_deployment/sonar_single_account_deployment_1_7_8.zip differ
diff --git a/examples/aws/installation/sonar_single_account_deployment/versions.tf b/examples/aws/installation/sonar_single_account_deployment/versions.tf
index 87c868ff4..581f7504f 100644
--- a/examples/aws/installation/sonar_single_account_deployment/versions.tf
+++ b/examples/aws/installation/sonar_single_account_deployment/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/examples/aws/poc/dsf_deployment/README.md b/examples/aws/poc/dsf_deployment/README.md
index 7190a0e93..a1c4252e5 100644
--- a/examples/aws/poc/dsf_deployment/README.md
+++ b/examples/aws/poc/dsf_deployment/README.md
@@ -17,7 +17,7 @@ The deployment is modular and allows users to deploy one or more of the followin
- Agent Gateways
4. DRA
- Admin server
- - Analytic servers
+ - Analytics servers
5. Audit sources
- Agent audit sources (EC2 instances)
- Agentless audit sources (RDS instances)
@@ -61,7 +61,6 @@ This configuration will enable the Sonar module, including the DSF Hub, while di
Feel free to customize your deployment by setting the appropriate variables based on your requirements.
-
## Variables
Several variables in the `variables.tf` file are important for configuring the deployment. The following variables dictate the deployment content and should be paid more attention to:
@@ -71,7 +70,7 @@ Several variables in the `variables.tf` file are important for configuring the d
- `enable_dra`: Enable DRA sub-product
### Server Count
-- `dra_analytics_count`: Number of DRA Analytic servers
+- `dra_analytics_count`: Number of DRA Analytics servers
- `agentless_gw_count`: Number of Agentless Gateways
- `agent_gw_count`: Number of Agent Gateways
diff --git a/examples/aws/poc/dsf_deployment/agent_sources.tf b/examples/aws/poc/dsf_deployment/agent_sources.tf
index 553c6f552..7c2134641 100644
--- a/examples/aws/poc/dsf_deployment/agent_sources.tf
+++ b/examples/aws/poc/dsf_deployment/agent_sources.tf
@@ -4,7 +4,7 @@ locals {
module "db_with_agent" {
source = "imperva/dsf-db-with-agent/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(local.db_types_for_agent)
friendly_name = join("-", [local.deployment_name_salted, "db", "with", "agent", count.index])
diff --git a/examples/aws/poc/dsf_deployment/agentless_sources.tf b/examples/aws/poc/dsf_deployment/agentless_sources.tf
index d4db87298..faf773666 100644
--- a/examples/aws/poc/dsf_deployment/agentless_sources.tf
+++ b/examples/aws/poc/dsf_deployment/agentless_sources.tf
@@ -4,7 +4,7 @@ locals {
module "rds_mysql" {
source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mysql-db"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = contains(local.db_types_for_agentless, "RDS MySQL") ? 1 : 0
rds_subnet_ids = local.db_subnet_ids
@@ -14,7 +14,7 @@ module "rds_mysql" {
module "rds_postgres" {
source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-postgres-db"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = contains(local.db_types_for_agentless, "RDS PostgreSQL") ? 1 : 0
rds_subnet_ids = local.db_subnet_ids
@@ -24,7 +24,7 @@ module "rds_postgres" {
module "rds_mssql" {
source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mssql-db"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = contains(local.db_types_for_agentless, "RDS MsSQL") ? 1 : 0
rds_subnet_ids = local.db_subnet_ids
@@ -39,10 +39,9 @@ module "rds_mssql" {
module "db_onboarding" {
source = "imperva/dsf-poc-db-onboarder/aws"
- version = "1.7.5" # latest release tag
- for_each = { for idx, val in concat(module.rds_mysql, module.rds_mssql) : idx => val }
+ version = "1.7.8" # latest release tag
+ for_each = { for idx, val in concat(module.rds_mysql, module.rds_mssql, module.rds_postgres) : idx => val }
- sonar_version = module.globals.tarball_location.version
usc_access_token = module.hub_main[0].access_tokens.usc.token
hub_info = {
hub_ip_address = module.hub_main[0].public_ip
@@ -66,6 +65,7 @@ module "db_onboarding" {
depends_on = [
module.federation,
module.rds_mysql,
+ module.rds_postgres,
module.rds_mssql
]
}
diff --git a/examples/aws/poc/dsf_deployment/dam.tf b/examples/aws/poc/dsf_deployment/dam.tf
index acf405f2c..d63fb27c3 100644
--- a/examples/aws/poc/dsf_deployment/dam.tf
+++ b/examples/aws/poc/dsf_deployment/dam.tf
@@ -8,7 +8,7 @@ locals {
module "mx" {
source = "imperva/dsf-mx/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_dam ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "mx"])
@@ -41,7 +41,7 @@ module "mx" {
module "agent_gw" {
source = "imperva/dsf-agent-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.agent_gw_count
friendly_name = join("-", [local.deployment_name_salted, "agent", "gw", count.index])
@@ -67,7 +67,7 @@ module "agent_gw" {
module "agent_gw_cluster_setup" {
source = "imperva/dsf-agent-gw-cluster-setup/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.create_agent_gw_cluster
cluster_name = join("-", [local.deployment_name_salted, "agent", "gw", "cluster"])
diff --git a/examples/aws/poc/dsf_deployment/dra.tf b/examples/aws/poc/dsf_deployment/dra.tf
index 96bdb30cd..632ce1509 100644
--- a/examples/aws/poc/dsf_deployment/dra.tf
+++ b/examples/aws/poc/dsf_deployment/dra.tf
@@ -6,21 +6,23 @@ locals {
module "dra_admin" {
source = "imperva/dsf-dra-admin/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_dra ? 1 : 0
- friendly_name = join("-", [local.deployment_name_salted, "dra", "admin"])
+ name = join("-", [local.deployment_name_salted, "dra", "admin"])
subnet_id = local.dra_admin_subnet_id
dra_version = module.globals.dra_version
ebs = var.dra_admin_ebs_details
+ key_pair = module.key_pair.key_pair.key_pair_name
admin_registration_password = local.password
- admin_password = local.password
+ admin_ssh_password = local.password
allowed_web_console_cidrs = var.web_console_cidr
allowed_analytics_cidrs = [data.aws_subnet.dra_analytics.cidr_block]
allowed_hub_cidrs = local.hub_cidr_list
allowed_ssh_cidrs = concat(local.workstation_cidr, var.allowed_ssh_cidrs)
- key_pair = module.key_pair.key_pair.key_pair_name
- tags = local.tags
+ attach_persistent_public_ip = true
+
+ tags = local.tags
depends_on = [
module.vpc
]
@@ -28,15 +30,15 @@ module "dra_admin" {
module "dra_analytics" {
source = "imperva/dsf-dra-analytics/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.dra_analytics_count
- friendly_name = join("-", [local.deployment_name_salted, "dra", "analytics", count.index])
+ name = join("-", [local.deployment_name_salted, "dra", "analytics", count.index])
subnet_id = local.dra_analytics_subnet_id
dra_version = module.globals.dra_version
ebs = var.dra_analytics_ebs_details
admin_registration_password = local.password
- admin_password = local.password
+ analytics_ssh_password = local.password
allowed_admin_cidrs = [data.aws_subnet.dra_admin.cidr_block]
allowed_ssh_cidrs = concat(local.hub_cidr_list, var.allowed_ssh_cidrs)
key_pair = module.key_pair.key_pair.key_pair_name
diff --git a/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_5.zip b/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_5.zip
deleted file mode 100644
index 89205b88d..000000000
Binary files a/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_5.zip and /dev/null differ
diff --git a/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_8.zip b/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_8.zip
new file mode 100644
index 000000000..fb1eb1a78
Binary files /dev/null and b/examples/aws/poc/dsf_deployment/dsf_deployment_1_7_8.zip differ
diff --git a/examples/aws/poc/dsf_deployment/main.tf b/examples/aws/poc/dsf_deployment/main.tf
index dbb4cab71..35df6bf31 100644
--- a/examples/aws/poc/dsf_deployment/main.tf
+++ b/examples/aws/poc/dsf_deployment/main.tf
@@ -8,7 +8,7 @@ provider "aws" {
module "globals" {
source = "imperva/dsf-globals/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = var.sonar_version
dra_version = var.dra_version
@@ -16,7 +16,7 @@ module "globals" {
module "key_pair" {
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-"
private_key_filename = "ssh_keys/dsf_ssh_key-${terraform.workspace}"
diff --git a/examples/aws/poc/dsf_deployment/sonar.tf b/examples/aws/poc/dsf_deployment/sonar.tf
index 6ae8baddf..e481cda38 100644
--- a/examples/aws/poc/dsf_deployment/sonar.tf
+++ b/examples/aws/poc/dsf_deployment/sonar.tf
@@ -1,5 +1,4 @@
locals {
- database_cidr = var.database_cidr != null ? var.database_cidr : local.workstation_cidr_24
tarball_location = var.tarball_location != null ? var.tarball_location : module.globals.tarball_location
agentless_gw_count = var.enable_sonar ? var.agentless_gw_count : 0
@@ -11,7 +10,7 @@ locals {
module "hub_main" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_sonar ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "hub", "main"])
@@ -42,8 +41,8 @@ module "hub_main" {
dra_details = var.enable_dra ? {
name = module.dra_admin[0].display_name
address = module.dra_admin[0].public_ip
- username = module.dra_admin[0].ssh_user
password = local.password
+ archiver_username = module.dra_analytics[0].archiver_user
archiver_password = module.dra_analytics[0].archiver_password
} : null
tags = local.tags
@@ -54,7 +53,7 @@ module "hub_main" {
module "hub_dr" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_sonar && var.hub_hadr ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "hub", "DR"])
@@ -87,7 +86,7 @@ module "hub_dr" {
module "hub_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(module.hub_dr) > 0 ? 1 : 0
sonar_version = module.globals.tarball_location.version
@@ -105,7 +104,7 @@ module "hub_hadr" {
module "agentless_gw_main" {
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.agentless_gw_count
friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", count.index, "main"])
@@ -136,7 +135,7 @@ module "agentless_gw_main" {
module "agentless_gw_dr" {
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.agentless_gw_hadr ? local.agentless_gw_count : 0
friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", count.index, "DR"])
@@ -170,7 +169,7 @@ module "agentless_gw_dr" {
module "agentless_gw_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(module.agentless_gw_dr)
sonar_version = module.globals.tarball_location.version
@@ -214,7 +213,7 @@ locals {
module "federation" {
source = "imperva/dsf-federation/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
for_each = local.hub_gw_combinations
hub_info = {
diff --git a/examples/aws/poc/dsf_deployment/variables.tf b/examples/aws/poc/dsf_deployment/variables.tf
index b67b8e83d..5b7069171 100644
--- a/examples/aws/poc/dsf_deployment/variables.tf
+++ b/examples/aws/poc/dsf_deployment/variables.tf
@@ -137,6 +137,7 @@ variable "dam_license" {
2. License file path (Make sure it allows AWS DAM models (AV2500/AV6500))
EOF
type = string
+ default = null
}
variable "large_scale_mode" {
@@ -250,12 +251,12 @@ variable "hub_ebs_details" {
provisioned_iops = number
throughput = number
})
- description = "DSF Hub compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.10-sonar-installation-and-setup-guide/page/78729.htm"
default = {
disk_size = 250
provisioned_iops = 0
throughput = 125
}
+ description = "DSF Hub compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.10-sonar-installation-and-setup-guide/page/78729.htm"
}
variable "agentless_gw_ebs_details" {
@@ -264,12 +265,12 @@ variable "agentless_gw_ebs_details" {
provisioned_iops = number
throughput = number
})
- description = "DSF Agentless Gateway compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.10-sonar-installation-and-setup-guide/page/78729.htm"
default = {
disk_size = 150
provisioned_iops = 0
throughput = 125
}
+ description = "DSF Agentless Gateway compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.10-sonar-installation-and-setup-guide/page/78729.htm"
}
variable "additional_install_parameters" {
@@ -280,7 +281,7 @@ variable "additional_install_parameters" {
variable "simulation_db_types_for_agentless" {
type = list(string)
default = ["RDS MsSQL"]
- description = "Types of databases to provision and onboard to an Agentless Gateway for simulation purposes. Available types are: 'RDS MySQL' and 'RDS MsSQL'. 'RDS MsSQL' includes simulation data."
+ description = "Types of databases to provision and onboard to an Agentless Gateway for simulation purposes. Available types are: 'RDS MySQL', 'RDS PostgreSQL' and 'RDS MsSQL'. 'RDS MsSQL' includes simulation data."
validation {
condition = alltrue([
for db_type in var.simulation_db_types_for_agentless : contains(["RDS MySQL", "RDS MsSQL", "RDS PostgreSQL"], db_type)
@@ -289,12 +290,6 @@ variable "simulation_db_types_for_agentless" {
}
}
-variable "database_cidr" {
- type = list(string)
- default = null # workstation ip
- description = "CIDR blocks allowing dummy database access"
-}
-
##############################
#### DRA variables ####
##############################
diff --git a/examples/aws/poc/dsf_deployment/versions.tf b/examples/aws/poc/dsf_deployment/versions.tf
index 23724e137..8ed85317d 100644
--- a/examples/aws/poc/dsf_deployment/versions.tf
+++ b/examples/aws/poc/dsf_deployment/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/examples/aws/poc/sonar_basic_deployment/main.tf b/examples/aws/poc/sonar_basic_deployment/main.tf
index af1c7ee7e..5c06e97f3 100644
--- a/examples/aws/poc/sonar_basic_deployment/main.tf
+++ b/examples/aws/poc/sonar_basic_deployment/main.tf
@@ -8,14 +8,14 @@ provider "aws" {
module "globals" {
source = "imperva/dsf-globals/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = var.sonar_version
}
module "key_pair" {
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-"
private_key_filename = "ssh_keys/dsf_ssh_key-${terraform.workspace}"
@@ -33,7 +33,6 @@ locals {
locals {
password = var.password != null ? var.password : module.globals.random_password
workstation_cidr = var.workstation_cidr != null ? var.workstation_cidr : local.workstation_cidr_24
- database_cidr = var.database_cidr != null ? var.database_cidr : local.workstation_cidr_24
tarball_location = module.globals.tarball_location
tags = merge(module.globals.tags, { "deployment_name" = local.deployment_name_salted })
hub_subnet_id = var.subnet_ids != null ? var.subnet_ids.hub_subnet_id : module.vpc[0].public_subnets[0]
@@ -80,7 +79,7 @@ data "aws_subnet" "gw" {
module "hub" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "hub"])
instance_type = var.hub_instance_type
@@ -107,7 +106,7 @@ module "hub" {
module "agentless_gw" {
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.gw_count
friendly_name = join("-", [local.deployment_name_salted, "gw", count.index])
@@ -137,7 +136,7 @@ module "agentless_gw" {
module "federation" {
source = "imperva/dsf-federation/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
for_each = { for idx, val in module.agentless_gw : idx => val }
hub_info = {
@@ -165,7 +164,7 @@ module "federation" {
module "rds_mysql" {
source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mysql-db"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = contains(var.db_types_to_onboard, "RDS MySQL") ? 1 : 0
rds_subnet_ids = local.db_subnet_ids
@@ -175,7 +174,7 @@ module "rds_mysql" {
module "rds_mssql" {
source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mssql-db"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = contains(var.db_types_to_onboard, "RDS MsSQL") ? 1 : 0
rds_subnet_ids = local.db_subnet_ids
@@ -190,10 +189,9 @@ module "rds_mssql" {
module "db_onboarding" {
source = "imperva/dsf-poc-db-onboarder/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
for_each = { for idx, val in concat(module.rds_mysql, module.rds_mssql) : idx => val }
- sonar_version = module.globals.tarball_location.version
usc_access_token = module.hub.access_tokens.usc.token
hub_info = {
hub_ip_address = module.hub.public_ip
diff --git a/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_5.zip b/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_5.zip
deleted file mode 100644
index 30f34b9b7..000000000
Binary files a/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_5.zip and /dev/null differ
diff --git a/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_8.zip b/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_8.zip
new file mode 100644
index 000000000..cc5ad004e
Binary files /dev/null and b/examples/aws/poc/sonar_basic_deployment/sonar_basic_deployment_1_7_8.zip differ
diff --git a/examples/aws/poc/sonar_basic_deployment/variables.tf b/examples/aws/poc/sonar_basic_deployment/variables.tf
index 82688bdb1..ea0a4aff7 100644
--- a/examples/aws/poc/sonar_basic_deployment/variables.tf
+++ b/examples/aws/poc/sonar_basic_deployment/variables.tf
@@ -33,12 +33,6 @@ variable "web_console_cidr" {
description = "DSF Hub web console IPs range. Note that when running the deployment specify IPs in the following format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]. The default configuration opens the DSF Hub web console as a public website. It is recommended to specify a more restricted IP and CIDR range."
}
-variable "database_cidr" {
- type = list(string)
- default = null # workstation ip
- description = "CIDR blocks allowing dummy database access"
-}
-
variable "workstation_cidr" {
type = list(string)
default = null
diff --git a/examples/aws/poc/sonar_basic_deployment/versions.tf b/examples/aws/poc/sonar_basic_deployment/versions.tf
index 87c868ff4..581f7504f 100644
--- a/examples/aws/poc/sonar_basic_deployment/versions.tf
+++ b/examples/aws/poc/sonar_basic_deployment/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/examples/aws/poc/sonar_hadr_deployment/main.tf b/examples/aws/poc/sonar_hadr_deployment/main.tf
index 656d1e451..e36280cf6 100644
--- a/examples/aws/poc/sonar_hadr_deployment/main.tf
+++ b/examples/aws/poc/sonar_hadr_deployment/main.tf
@@ -8,13 +8,13 @@ provider "aws" {
module "globals" {
source = "imperva/dsf-globals/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = var.sonar_version
}
module "key_pair" {
source = "imperva/dsf-globals/aws//modules/key_pair"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
key_name_prefix = "imperva-dsf-"
private_key_filename = "ssh_keys/dsf_ssh_key-${terraform.workspace}"
tags = local.tags
@@ -31,7 +31,6 @@ locals {
locals {
password = var.password != null ? var.password : module.globals.random_password
workstation_cidr = var.workstation_cidr != null ? var.workstation_cidr : local.workstation_cidr_24
- database_cidr = var.database_cidr != null ? var.database_cidr : local.workstation_cidr_24
tarball_location = var.tarball_location != null ? var.tarball_location : module.globals.tarball_location
tags = merge(module.globals.tags, { "deployment_name" = local.deployment_name_salted })
main_hub_subnet_id = var.subnet_ids != null ? var.subnet_ids.main_hub_subnet_id : module.vpc[0].public_subnets[0]
@@ -87,7 +86,7 @@ module "vpc" {
##############################
module "hub_main" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "hub", "main"])
instance_type = var.hub_instance_type
@@ -115,7 +114,7 @@ module "hub_main" {
module "hub_dr" {
source = "imperva/dsf-hub/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
friendly_name = join("-", [local.deployment_name_salted, "hub", "DR"])
instance_type = var.hub_instance_type
@@ -145,7 +144,7 @@ module "hub_dr" {
module "agentless_gw_main" {
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.gw_count
friendly_name = join("-", [local.deployment_name_salted, "gw", count.index, "main"])
@@ -176,7 +175,7 @@ module "agentless_gw_main" {
module "agentless_gw_dr" {
source = "imperva/dsf-agentless-gw/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.gw_count
friendly_name = join("-", [local.deployment_name_salted, "gw", count.index, "DR"])
@@ -210,7 +209,7 @@ module "agentless_gw_dr" {
module "hub_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
sonar_version = module.globals.tarball_location.version
dsf_main_ip = module.hub_main.public_ip
@@ -227,7 +226,7 @@ module "hub_hadr" {
module "agentless_gw_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.gw_count
sonar_version = module.globals.tarball_location.version
@@ -260,7 +259,7 @@ locals {
module "federation" {
source = "imperva/dsf-federation/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(local.hub_gw_combinations)
hub_info = {
@@ -288,7 +287,7 @@ module "federation" {
module "rds_mysql" {
source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mysql-db"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = contains(var.db_types_to_onboard, "RDS MySQL") ? 1 : 0
rds_subnet_ids = local.db_subnet_ids
@@ -299,7 +298,7 @@ module "rds_mysql" {
# create a RDS SQL Server DB
module "rds_mssql" {
source = "imperva/dsf-poc-db-onboarder/aws//modules/rds-mssql-db"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = contains(var.db_types_to_onboard, "RDS MsSQL") ? 1 : 0
rds_subnet_ids = local.db_subnet_ids
@@ -314,10 +313,9 @@ module "rds_mssql" {
module "db_onboarding" {
source = "imperva/dsf-poc-db-onboarder/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
for_each = { for idx, val in concat(module.rds_mysql, module.rds_mssql) : idx => val }
- sonar_version = module.globals.tarball_location.version
usc_access_token = module.hub_main.access_tokens.usc.token
hub_info = {
hub_ip_address = module.hub_main.public_ip
diff --git a/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_5.zip b/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_5.zip
deleted file mode 100644
index 83b631ae0..000000000
Binary files a/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_5.zip and /dev/null differ
diff --git a/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_8.zip b/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_8.zip
new file mode 100644
index 000000000..9371f1070
Binary files /dev/null and b/examples/aws/poc/sonar_hadr_deployment/sonar_hadr_deployment_1_7_8.zip differ
diff --git a/examples/aws/poc/sonar_hadr_deployment/variables.tf b/examples/aws/poc/sonar_hadr_deployment/variables.tf
index 200fc9fa9..295bca207 100644
--- a/examples/aws/poc/sonar_hadr_deployment/variables.tf
+++ b/examples/aws/poc/sonar_hadr_deployment/variables.tf
@@ -47,12 +47,6 @@ variable "web_console_cidr" {
description = "DSF Hub web console CIDR blocks in the following format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]. The default configuration opens the DSF Hub web console as a public website. It is recommended to specify a more restricted IP and CIDR range."
}
-variable "database_cidr" {
- type = list(string)
- default = null # workstation ip
- description = "CIDR blocks allowing dummy database access in the following format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]"
-}
-
variable "workstation_cidr" {
type = list(string)
default = null
diff --git a/examples/aws/poc/sonar_hadr_deployment/versions.tf b/examples/aws/poc/sonar_hadr_deployment/versions.tf
index 87c868ff4..581f7504f 100644
--- a/examples/aws/poc/sonar_hadr_deployment/versions.tf
+++ b/examples/aws/poc/sonar_hadr_deployment/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/examples/aws/sonar_upgrade/main.tf b/examples/aws/sonar_upgrade/main.tf
index 9e4f09ba5..26fba1829 100644
--- a/examples/aws/sonar_upgrade/main.tf
+++ b/examples/aws/sonar_upgrade/main.tf
@@ -1,6 +1,6 @@
module "sonar_upgrader" {
source = "imperva/dsf-sonar-upgrader/aws"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
# Fill the details of the Agentless Gateways and DSF Hubs that you want to upgrade
agentless_gws = [
diff --git a/examples/aws/sonar_upgrade/sonar_upgrade_1_7_5.zip b/examples/aws/sonar_upgrade/sonar_upgrade_1_7_5.zip
deleted file mode 100644
index e3a16e3a5..000000000
Binary files a/examples/aws/sonar_upgrade/sonar_upgrade_1_7_5.zip and /dev/null differ
diff --git a/examples/aws/sonar_upgrade/sonar_upgrade_1_7_8.zip b/examples/aws/sonar_upgrade/sonar_upgrade_1_7_8.zip
new file mode 100644
index 000000000..2a8fea3aa
Binary files /dev/null and b/examples/aws/sonar_upgrade/sonar_upgrade_1_7_8.zip differ
diff --git a/examples/aws/sonar_upgrade/versions.tf b/examples/aws/sonar_upgrade/versions.tf
index 3ec2b2811..36219a637 100644
--- a/examples/aws/sonar_upgrade/versions.tf
+++ b/examples/aws/sonar_upgrade/versions.tf
@@ -1,3 +1,3 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
}
diff --git a/examples/azure/poc/dsf_deployment/README.md b/examples/azure/poc/dsf_deployment/README.md
index 792e4bb7d..54d7374d8 100644
--- a/examples/azure/poc/dsf_deployment/README.md
+++ b/examples/azure/poc/dsf_deployment/README.md
@@ -1,7 +1,7 @@
# DSF Deployment example
[![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags)
-This example provides DSF (Data Security Fabric) deployment with DSF Hub, Agentless Gateways, DAM (Database Activity Monitoring) and Agent audit sources.
+This example provides a full DSF (Data Security Fabric) deployment with DSF Hub, Agentless Gateways, DAM (Database Activity Monitoring), DRA (Data Risk Analytics) and Agent and Agentless audit sources.
## Modularity
The deployment is modular and allows users to deploy one or more of the following modules:
@@ -15,8 +15,12 @@ The deployment is modular and allows users to deploy one or more of the followin
3. DAM
- MX
- Agent Gateways
-4. Audit sources
- - Agent audit sources (virtual machine instances)
+4. DRA
+ - Admin server
+ - Analytics servers
+5. Audit sources
+ - Agent audit sources (Virtual Machine instances)
+ - Agentless audit source (SQL Server instance)
### Deploying Specific Modules
@@ -28,19 +32,32 @@ To deploy only the DAM module, set the following variables in your Terraform con
```
enable_dam = true
enable_sonar = false
+enable_dra = false
```
-This configuration will enable the DAM module while disabling the Sonar module.
+This configuration will enable the DAM module while disabling the DSF Hub and DRA modules.
-#### 2. Sonar Only Deployment
+#### 2. DRA Only Deployment
+
+To deploy only the DRA module, set the following variables in your Terraform configuration:
+```
+enable_dam = false
+enable_sonar = false
+enable_dra = true
+```
+
+This configuration will enable the DRA module while disabling the DSF Hub and DAM modules.
+
+#### 3. Sonar Only Deployment
To deploy only the Sonar module, set the following variables in your Terraform configuration:
```
enable_dam = false
enable_sonar = true
+enable_dra = false
```
-This configuration will enable the Sonar module, including the DSF Hub, while disabling the DAM module.
+This configuration will enable the Sonar module, including the DSF Hub, while disabling the DAM and DRA modules.
Feel free to customize your deployment by setting the appropriate variables based on your requirements.
@@ -50,8 +67,10 @@ Several variables in the `variables.tf` file are important for configuring the d
### Sub-Products
- `enable_sonar`: Enable Sonar sub-product
- `enable_dam`: Enable DAM sub-product
+- `enable_dra`: Enable DRA sub-product
### Server Count
+- `dra_analytics_count`: Number of DRA Analytics servers
- `agentless_gw_count`: Number of Agentless Gateways
- `agent_gw_count`: Number of Agent Gateways
@@ -60,14 +79,17 @@ Several variables in the `variables.tf` file are important for configuring the d
- `agentless_gw_hadr`: Enable Agentless Gateway High Availability Disaster Recovery (HADR)
### Audit Sources for Simulation Purposes
+- `simulation_db_types_for_agentless`: Types of databases to provision and onboard to an Agentless Gateway
- `simulation_db_types_for_agent`: Types of databases to provision for Agent Gateways
## Mandatory Variables
Before initiating the Terraform deployment, it is essential to set up the following variables:
- `resource_group_location`: The region of the resource group to which all DSF components will be associated.
-- `tarball_location`: Storage account and container location of the DSF installation software. az_blob is the full path to the tarball file within the storage account container.
-- `dam_agent_installation_location`: Storage account and container location of the DAM Agent installation software. az_blob is the full path to the installation file within the storage account container.
-- `dam_license`: DAM license file path.
+- `tarball_location`: Only when deploying Sonar, storage account and container location of the DSF Sonar installation software. 'az_blob' is the full path to the tarball file within the storage account container.
+- `dam_agent_installation_location`: Only when deploying DAM, storage account and container location of the DAM Agent installation software. 'az_blob' is the full path to the installation file within the storage account container.
+- `dam_license`: Only when deploying DAM, DAM license file path.
+- `dra_admin_image_details` or `dra_admin_vhd_details`: Only when deploying DRA, the image or VHD details of the DRA Admin server.
+- `dra_analytics_image_details` or `dra_analytics_vhd_details`: Only when deploying DRA, the image or VHD details of the DRA Analytics server.
## Default Example
To perform the default deployment, run the following command:
diff --git a/examples/azure/poc/dsf_deployment/agent_sources.tf b/examples/azure/poc/dsf_deployment/agent_sources.tf
index a0bfef673..64bc31c92 100644
--- a/examples/azure/poc/dsf_deployment/agent_sources.tf
+++ b/examples/azure/poc/dsf_deployment/agent_sources.tf
@@ -4,7 +4,7 @@ locals {
module "db_with_agent" {
source = "imperva/dsf-db-with-agent/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(local.db_types_for_agent)
friendly_name = join("-", [local.deployment_name_salted, "db", "with", "agent", count.index])
diff --git a/examples/azure/poc/dsf_deployment/agentless_sources.tf b/examples/azure/poc/dsf_deployment/agentless_sources.tf
new file mode 100644
index 000000000..0803d0c08
--- /dev/null
+++ b/examples/azure/poc/dsf_deployment/agentless_sources.tf
@@ -0,0 +1,44 @@
+locals {
+ db_types_for_agentless = local.agentless_gw_count > 0 ? var.simulation_db_types_for_agentless : []
+}
+
+module "mssql" {
+ source = "imperva/dsf-poc-db-onboarder/azurerm//modules/mssql-db"
+ version = "1.7.8" # latest release tag
+ count = contains(local.db_types_for_agentless, "MsSQL") ? 1 : 0
+
+ resource_group = local.resource_group
+ security_group_ingress_cidrs = local.workstation_cidr
+
+ tags = local.tags
+}
+
+module "db_onboarding" {
+ source = "imperva/dsf-poc-db-onboarder/azurerm"
+ version = "1.7.8" # latest release tag
+ for_each = { for idx, val in concat(module.mssql) : idx => val }
+
+ resource_group = local.resource_group
+ usc_access_token = module.hub_main[0].access_tokens.usc.token
+ hub_info = {
+ hub_ip_address = module.hub_main[0].public_ip
+ hub_private_ssh_key_path = local_sensitive_file.ssh_key.filename
+ hub_ssh_user = module.hub_main[0].ssh_user
+ }
+
+ assignee_gw = module.agentless_gw_main[0].jsonar_uid
+ assignee_role = module.agentless_gw_main[0].principal_id
+
+ database_details = {
+ db_server_id = each.value.db_server_id
+ db_port = each.value.db_port
+ db_engine = each.value.db_engine
+ db_identifier = each.value.db_identifier
+ db_address = each.value.db_address
+ }
+ tags = local.tags
+ depends_on = [
+ module.federation,
+ module.mssql
+ ]
+}
diff --git a/examples/azure/poc/dsf_deployment/dam.tf b/examples/azure/poc/dsf_deployment/dam.tf
index a7ed54916..f323aeb10 100644
--- a/examples/azure/poc/dsf_deployment/dam.tf
+++ b/examples/azure/poc/dsf_deployment/dam.tf
@@ -6,7 +6,7 @@ locals {
module "mx" {
source = "imperva/dsf-mx/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_dam ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "mx"])
@@ -21,7 +21,7 @@ module "mx" {
mx_password = local.password
allowed_web_console_and_api_cidrs = var.web_console_cidr
allowed_agent_gw_cidrs = module.network[0].vnet_address_space
- allowed_ssh_cidrs = local.workstation_cidr
+ allowed_ssh_cidrs = concat(local.workstation_cidr, var.allowed_ssh_cidrs)
allowed_hub_cidrs = module.network[0].vnet_address_space
hub_details = var.enable_sonar ? {
@@ -41,7 +41,7 @@ module "mx" {
module "agent_gw" {
source = "imperva/dsf-agent-gw/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.agent_gw_count
friendly_name = join("-", [local.deployment_name_salted, "agent", "gw", count.index])
@@ -55,7 +55,7 @@ module "agent_gw" {
mx_password = local.password
allowed_agent_cidrs = module.network[0].vnet_address_space
allowed_mx_cidrs = module.network[0].vnet_address_space
- allowed_ssh_cidrs = module.network[0].vnet_address_space
+ allowed_ssh_cidrs = concat(module.network[0].vnet_address_space, var.allowed_ssh_cidrs)
allowed_gw_clusters_cidrs = module.network[0].vnet_address_space
management_server_host_for_registration = module.mx[0].private_ip
management_server_host_for_api_access = module.mx[0].public_ip
diff --git a/examples/azure/poc/dsf_deployment/dra.tf b/examples/azure/poc/dsf_deployment/dra.tf
new file mode 100644
index 000000000..9d95e56ad
--- /dev/null
+++ b/examples/azure/poc/dsf_deployment/dra.tf
@@ -0,0 +1,88 @@
+locals {
+ dra_analytics_count = var.enable_dra ? var.dra_analytics_count : 0
+
+ dra_admin_public_ip = var.enable_dra ? [format("%s/32", module.dra_admin[0].public_ip)] : []
+ dra_admin_cidr_list = concat(module.network[0].vnet_address_space, local.dra_admin_public_ip)
+
+ dra_admin_image_exits = var.dra_admin_image_details != null ? true : false
+ dra_admin_vhd_exits = var.dra_admin_vhd_details != null ? true : false
+ dra_analytics_image_exits = var.dra_analytics_image_details != null ? true : false
+ dra_analytics_vhd_exits = var.dra_analytics_vhd_details != null ? true : false
+}
+
+module "dra_admin" {
+ source = "imperva/dsf-dra-admin/azurerm"
+ version = "1.7.8" # latest release tag
+ count = var.enable_dra ? 1 : 0
+
+ name = join("-", [local.deployment_name_salted, "dra", "admin"])
+ subnet_id = module.network[0].vnet_subnets[0]
+ resource_group = local.resource_group
+ storage_details = var.dra_admin_storage_details
+ ssh_public_key = tls_private_key.ssh_key.public_key_openssh
+ admin_registration_password = local.password
+ admin_ssh_password = local.password
+
+ allowed_web_console_cidrs = var.web_console_cidr
+ allowed_analytics_cidrs = module.network[0].vnet_address_space
+ allowed_hub_cidrs = local.hub_cidr_list
+ allowed_ssh_cidrs = concat(local.workstation_cidr, var.allowed_ssh_cidrs)
+
+ image_vhd_details = {
+ image = local.dra_admin_image_exits ? {
+ resource_group_name = var.dra_admin_image_details.resource_group_name
+ image_id = var.dra_admin_image_details.image_id
+ } : null,
+ vhd = local.dra_admin_vhd_exits ? {
+ path_to_vhd = var.dra_admin_vhd_details.path_to_vhd
+ storage_account_name = var.dra_admin_vhd_details.storage_account_name
+ container_name = var.dra_admin_vhd_details.container_name
+ } : null
+ }
+
+ attach_persistent_public_ip = true
+ tags = local.tags
+
+ depends_on = [
+ module.network
+ ]
+}
+
+module "dra_analytics" {
+ source = "imperva/dsf-dra-analytics/azurerm"
+ version = "1.7.8" # latest release tag
+ count = local.dra_analytics_count
+
+ name = join("-", [local.deployment_name_salted, "dra", "analytics", count.index])
+ subnet_id = module.network[0].vnet_subnets[1]
+ resource_group = local.resource_group
+ storage_details = var.dra_analytics_storage_details
+ ssh_public_key = tls_private_key.ssh_key.public_key_openssh
+ admin_registration_password = local.password
+ analytics_ssh_password = local.password
+ archiver_password = local.password
+
+ allowed_admin_cidrs = module.network[0].vnet_address_space
+ allowed_ssh_cidrs = concat(local.workstation_cidr, local.hub_cidr_list)
+ #allowed_ssh_cidrs = concat(var.allowed_ssh_cidrs, local.hub_cidr_list, local.workstation_cidr)
+
+ admin_server_private_ip = module.dra_admin[0].private_ip
+ admin_server_public_ip = module.dra_admin[0].public_ip
+
+ image_vhd_details = {
+ image = local.dra_analytics_image_exits ? {
+ resource_group_name = var.dra_analytics_image_details.resource_group_name
+ image_id = var.dra_analytics_image_details.image_id
+ } : null,
+ vhd = local.dra_analytics_vhd_exits ? {
+ path_to_vhd = var.dra_analytics_vhd_details.path_to_vhd
+ storage_account_name = var.dra_analytics_vhd_details.storage_account_name
+ container_name = var.dra_analytics_vhd_details.container_name
+ } : null
+ }
+ tags = local.tags
+
+ depends_on = [
+ module.network
+ ]
+}
\ No newline at end of file
diff --git a/examples/azure/poc/dsf_deployment/dsf_deployment_1_7_5.zip b/examples/azure/poc/dsf_deployment/dsf_deployment_1_7_5.zip
deleted file mode 100644
index 645772f12..000000000
Binary files a/examples/azure/poc/dsf_deployment/dsf_deployment_1_7_5.zip and /dev/null differ
diff --git a/examples/azure/poc/dsf_deployment/dsf_deployment_1_7_8.zip b/examples/azure/poc/dsf_deployment/dsf_deployment_1_7_8.zip
new file mode 100644
index 000000000..533f61c7f
Binary files /dev/null and b/examples/azure/poc/dsf_deployment/dsf_deployment_1_7_8.zip differ
diff --git a/examples/azure/poc/dsf_deployment/main.tf b/examples/azure/poc/dsf_deployment/main.tf
index 5e2acd163..652cd3dfc 100644
--- a/examples/azure/poc/dsf_deployment/main.tf
+++ b/examples/azure/poc/dsf_deployment/main.tf
@@ -1,6 +1,6 @@
module "globals" {
source = "imperva/dsf-globals/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
}
resource "azurerm_resource_group" "rg" {
diff --git a/examples/azure/poc/dsf_deployment/networking.tf b/examples/azure/poc/dsf_deployment/networking.tf
index e5933c6d1..c91e60573 100644
--- a/examples/azure/poc/dsf_deployment/networking.tf
+++ b/examples/azure/poc/dsf_deployment/networking.tf
@@ -18,6 +18,7 @@ locals {
module "network" {
count = 1
source = "Azure/network/azurerm"
+ version = "5.3.0"
vnet_name = "${local.deployment_name_salted}-${module.globals.current_user_name}"
resource_group_name = local.resource_group.name
address_spaces = [var.vnet_ip_range]
@@ -86,8 +87,8 @@ resource "azurerm_nat_gateway_public_ip_association" "nat_gw_public_ip_associati
public_ip_address_id = azurerm_public_ip.nat_gw_public_ip.id
}
+# subnet 1 is the private subnet
resource "azurerm_subnet_nat_gateway_association" "nat_gw_vnet_association" {
- count = length(local.subnet_prefixes)
- subnet_id = module.network[0].vnet_subnets[count.index]
+ subnet_id = module.network[0].vnet_subnets[1]
nat_gateway_id = azurerm_nat_gateway.nat_gw.id
}
\ No newline at end of file
diff --git a/examples/azure/poc/dsf_deployment/outputs.tf b/examples/azure/poc/dsf_deployment/outputs.tf
index 66ca0c6ac..e99d7a42e 100644
--- a/examples/azure/poc/dsf_deployment/outputs.tf
+++ b/examples/azure/poc/dsf_deployment/outputs.tf
@@ -13,8 +13,9 @@ output "dsf_private_ssh_key_file_path" {
output "generated_network" {
value = try({
- vnet = module.network[0].vnet_id
- subnets = module.network[0].vnet_subnets
+ vnet = module.network[0].vnet_id
+ subnets = module.network[0].vnet_subnets
+ address_space = module.network[0].vnet_address_space
}, null)
}
@@ -60,6 +61,24 @@ output "sonar" {
} : null
}
+output "dra" {
+ value = var.enable_dra ? {
+ admin_server = {
+ public_ip = try(module.dra_admin[0].public_ip, null)
+ private_ip = try(module.dra_admin[0].private_ip, null)
+ display_name = try(module.dra_admin[0].display_name, null)
+ ssh_command = try("ssh -i ${local.private_key_file_path} ${module.dra_admin[0].ssh_user}@${module.dra_admin[0].public_ip}", null)
+ }
+ analytics = [
+ for idx, val in module.dra_analytics : {
+ private_ip = val.private_ip
+ archiver_user = val.archiver_user
+ ssh_command = try("ssh -o UserKnownHostsFile=/dev/null -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -i ${local.private_key_file_path} -W %h:%p ${module.dra_admin[0].ssh_user}@${module.dra_admin[0].public_ip}' -i ${local.private_key_file_path} ${val.ssh_user}@${val.private_ip}", null)
+ }
+ ]
+ } : null
+}
+
output "dam" {
value = var.enable_dam ? {
mx = {
@@ -96,6 +115,10 @@ output "audit_sources" {
ssh_command = try("ssh -o UserKnownHostsFile=/dev/null -o ProxyCommand='ssh -o UserKnownHostsFile=/dev/null -i ${local.private_key_file_path} -W %h:%p ${module.mx[0].ssh_user}@${module.mx[0].public_ip}' -i ${local.private_key_file_path} ${val.ssh_user}@${val.private_ip}", null)
}
]
+ agentless_sources = var.enable_sonar ? {
+ mssql = try(module.mssql[0], null)
+ } : null
+
}
}
@@ -106,4 +129,11 @@ output "web_console_dsf_hub" {
public_url = join("", ["https://", module.hub_main[0].public_ip, ":8443/"])
private_url = join("", ["https://", module.hub_main[0].private_ip, ":8443/"])
}, null)
-}
\ No newline at end of file
+}
+
+output "web_console_dra" {
+ value = try({
+ public_url = join("", ["https://", module.dra_admin[0].public_ip, ":8443/"])
+ private_url = join("", ["https://", module.dra_admin[0].private_ip, ":8443/"])
+ }, null)
+}
diff --git a/examples/azure/poc/dsf_deployment/sonar.tf b/examples/azure/poc/dsf_deployment/sonar.tf
index 2c9e4b2da..23a9200f9 100644
--- a/examples/azure/poc/dsf_deployment/sonar.tf
+++ b/examples/azure/poc/dsf_deployment/sonar.tf
@@ -1,10 +1,15 @@
locals {
agentless_gw_count = var.enable_sonar ? var.agentless_gw_count : 0
+
+ hub_public_ip = var.enable_sonar ? (length(module.hub_main[0].public_ip) > 0 ? format("%s/32", module.hub_main[0].public_ip) : null) : null
+ hub_dr_public_ip = var.enable_sonar && var.hub_hadr ? (length(module.hub_dr[0].public_ip) > 0 ? format("%s/32", module.hub_dr[0].public_ip) : null) : null
+ # WA since the following doesn't work: hub_cidr_list = concat(module.network[0].vnet_address_space, compact([local.hub_public_ip, local.hub_dr_public_ip]))
+ hub_cidr_list = var.enable_sonar ? (var.hub_hadr ? concat(module.network[0].vnet_address_space, [local.hub_public_ip, local.hub_dr_public_ip]) : concat(module.network[0].vnet_address_space, [local.hub_public_ip])) : module.network[0].vnet_address_space
}
module "hub_main" {
source = "imperva/dsf-hub/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_sonar ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "hub"])
@@ -13,7 +18,7 @@ module "hub_main" {
binaries_location = var.tarball_location
password = local.password
storage_details = var.hub_storage_details
- instance_type = var.hub_instance_type
+ instance_size = var.hub_instance_size
base_directory = var.sonar_machine_base_directory
attach_persistent_public_ip = true
use_public_ip = true
@@ -25,13 +30,22 @@ module "hub_main" {
allowed_web_console_and_api_cidrs = var.web_console_cidr
allowed_hub_cidrs = module.network[0].vnet_address_space
allowed_agentless_gw_cidrs = module.network[0].vnet_address_space
+ allowed_dra_admin_cidrs = local.dra_admin_cidr_list
allowed_all_cidrs = local.workstation_cidr
+ allowed_ssh_cidrs = var.allowed_ssh_cidrs
mx_details = var.enable_dam ? [for mx in module.mx : {
name = mx.display_name
address = coalesce(mx.public_ip, mx.private_ip)
username = mx.web_console_user
password = local.password
}] : []
+ dra_details = var.enable_dra ? {
+ name = module.dra_admin[0].display_name
+ address = module.dra_admin[0].public_ip
+ password = local.password
+ archiver_username = module.dra_analytics[0].archiver_user
+ archiver_password = module.dra_analytics[0].archiver_password
+ } : null
tags = local.tags
depends_on = [
@@ -41,7 +55,7 @@ module "hub_main" {
module "hub_dr" {
source = "imperva/dsf-hub/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.enable_sonar && var.hub_hadr ? 1 : 0
friendly_name = join("-", [local.deployment_name_salted, "hub", "DR"])
@@ -50,7 +64,7 @@ module "hub_dr" {
binaries_location = var.tarball_location
password = local.password
storage_details = var.hub_storage_details
- instance_type = var.hub_instance_type
+ instance_size = var.hub_instance_size
base_directory = var.sonar_machine_base_directory
attach_persistent_public_ip = true
use_public_ip = true
@@ -65,7 +79,9 @@ module "hub_dr" {
allowed_web_console_and_api_cidrs = var.web_console_cidr
allowed_hub_cidrs = module.network[0].vnet_address_space
allowed_agentless_gw_cidrs = module.network[0].vnet_address_space
+ allowed_dra_admin_cidrs = local.dra_admin_cidr_list
allowed_all_cidrs = local.workstation_cidr
+ allowed_ssh_cidrs = var.allowed_ssh_cidrs
tags = local.tags
depends_on = [
module.network
@@ -74,7 +90,7 @@ module "hub_dr" {
module "hub_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(module.hub_dr) > 0 ? 1 : 0
sonar_version = var.sonar_version
@@ -92,7 +108,7 @@ module "hub_hadr" {
module "agentless_gw_main" {
source = "imperva/dsf-agentless-gw/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = local.agentless_gw_count
friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", count.index])
@@ -100,7 +116,7 @@ module "agentless_gw_main" {
subnet_id = module.network[0].vnet_subnets[0]
storage_details = var.agentless_gw_storage_details
binaries_location = var.tarball_location
- instance_type = var.agentless_gw_instance_type
+ instance_size = var.agentless_gw_instance_size
base_directory = var.sonar_machine_base_directory
password = local.password
hub_sonarw_public_key = module.hub_main[0].sonarw_public_key
@@ -111,6 +127,7 @@ module "agentless_gw_main" {
allowed_agentless_gw_cidrs = module.network[0].vnet_address_space
allowed_hub_cidrs = module.network[0].vnet_address_space
allowed_all_cidrs = local.workstation_cidr
+ allowed_ssh_cidrs = var.allowed_ssh_cidrs
ingress_communication_via_proxy = {
proxy_address = module.hub_main[0].public_ip
proxy_private_ssh_key_path = local_sensitive_file.ssh_key.filename
@@ -124,7 +141,7 @@ module "agentless_gw_main" {
module "agentless_gw_dr" {
source = "imperva/dsf-agentless-gw/azurerm"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = var.agentless_gw_hadr ? local.agentless_gw_count : 0
friendly_name = join("-", [local.deployment_name_salted, "agentless", "gw", count.index, "DR"])
@@ -132,7 +149,7 @@ module "agentless_gw_dr" {
subnet_id = module.network[0].vnet_subnets[1]
storage_details = var.agentless_gw_storage_details
binaries_location = var.tarball_location
- instance_type = var.agentless_gw_instance_type
+ instance_size = var.agentless_gw_instance_size
base_directory = var.sonar_machine_base_directory
password = local.password
hub_sonarw_public_key = module.hub_main[0].sonarw_public_key
@@ -146,6 +163,7 @@ module "agentless_gw_dr" {
allowed_agentless_gw_cidrs = module.network[0].vnet_address_space
allowed_hub_cidrs = module.network[0].vnet_address_space
allowed_all_cidrs = local.workstation_cidr
+ allowed_ssh_cidrs = var.allowed_ssh_cidrs
ingress_communication_via_proxy = {
proxy_address = module.hub_main[0].public_ip
proxy_private_ssh_key_path = local_sensitive_file.ssh_key.filename
@@ -159,7 +177,7 @@ module "agentless_gw_dr" {
module "agentless_gw_hadr" {
source = "imperva/dsf-hadr/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
count = length(module.agentless_gw_dr)
sonar_version = var.sonar_version
@@ -203,7 +221,7 @@ locals {
module "federation" {
source = "imperva/dsf-federation/null"
- version = "1.7.5" # latest release tag
+ version = "1.7.8" # latest release tag
for_each = local.hub_gw_combinations
hub_info = {
diff --git a/examples/azure/poc/dsf_deployment/variables.tf b/examples/azure/poc/dsf_deployment/variables.tf
index 2f4fcd481..40d3abe1d 100644
--- a/examples/azure/poc/dsf_deployment/variables.tf
+++ b/examples/azure/poc/dsf_deployment/variables.tf
@@ -34,6 +34,12 @@ variable "enable_dam" {
description = "Provision DAM MX and Agent Gateways"
}
+variable "enable_dra" {
+ type = bool
+ default = true
+ description = "Provision DRA Admin and Analytics"
+}
+
variable "agentless_gw_count" {
type = number
default = 1
@@ -46,6 +52,12 @@ variable "agent_gw_count" {
description = "Number of Agent Gateways. Provisioning Agent Gateways requires the enable_dam variable to be set to 'true'."
}
+variable "dra_analytics_count" {
+ type = number
+ default = 1
+ description = "Number of DRA Analytics servers. Provisioning Analytics servers requires the enable_dra variable to be set to 'true'."
+}
+
variable "password" {
sensitive = true
type = string
@@ -59,13 +71,19 @@ variable "password" {
variable "web_console_cidr" {
type = list(string)
default = ["0.0.0.0/0"]
- description = "DSF Hub and MX web console IPs range. Specify IPs in the following format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]. The default configuration opens the DSF Hub web console as a public website. It is recommended to specify a more restricted IP and CIDR range."
+ description = "DSF Hub, MX and DRA Admin web consoles IPs range. Specify IPs in the following format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]. The default configuration opens the DSF Hub web console as a public website. It is recommended to specify a more restricted IP and CIDR range."
}
variable "workstation_cidr" {
type = list(string)
default = null
- description = "IP ranges from which SSH/API access will be allowed to setup the deployment. If not set, the subnet (x.x.x.0/24) of the public IP of the computer where the Terraform is run is used Format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]"
+ description = "IP ranges from which SSH/API access will be allowed to setup the deployment. If not set, the subnet (x.x.x.0/24) of the public IP of the computer where the Terraform is run is used. Format - [\"x.x.x.x/x\", \"y.y.y.y/y\"]"
+}
+
+variable "allowed_ssh_cidrs" {
+ type = list(string)
+ description = "IP ranges from which SSH access to the deployed DSF nodes will be allowed"
+ default = []
}
variable "vnet_ip_range" {
@@ -74,25 +92,24 @@ variable "vnet_ip_range" {
description = "Vnet ip range"
}
-#variable "subnet_ids" {
-# type = object({
-# hub_subnet_id = string
-# hub_dr_subnet_id = string
-# agentless_gw_subnet_id = string
-# agentless_gw_dr_subnet_id = string
-# })
-# default = null
-# description = "The IDs of existing subnets to deploy resources in. Keep empty if you wish to provision new VPC and subnets. db_subnet_ids can be an empty list only if no databases should be provisioned"
-# validation {
-# condition = var.subnet_ids == null || try(var.subnet_ids.hub_subnet_id != null && var.subnet_ids.hub_dr_subnet_id != null && var.subnet_ids.agentless_gw_subnet_id != null && var.subnet_ids.agentless_gw_dr_subnet_id != null, false)
-# error_message = "Value must either be null or specified for all."
-# }
-# validation {
-# condition = var.subnet_ids == null || try(alltrue([for subnet_id in values({ for k, v in var.subnet_ids : k => v if k != "db_subnet_ids" }) : can(regex(".*Microsoft.Network/virtualNetworks/.*/subnets/.*", subnet_id))]), false)
-# error_message = "Subnet id is invalid."
-# }
-#}
-
+variable "subnet_ids" {
+ type = object({
+ hub_subnet_id = string
+ hub_dr_subnet_id = string
+ agentless_gw_subnet_id = string
+ agentless_gw_dr_subnet_id = string
+ mx_subnet_id = string
+ agent_gw_subnet_id = string
+ dra_admin_subnet_id = string
+ dra_analytics_subnet_id = string
+ })
+ default = null
+ description = "The IDs of existing subnets to deploy resources in. Keep empty if you wish to provision new VPC and subnets. db_subnet_ids can be an empty list only if no databases should be provisioned"
+ validation {
+ condition = var.subnet_ids == null || try(var.subnet_ids.hub_subnet_id != null && var.subnet_ids.hub_dr_subnet_id != null && var.subnet_ids.agentless_gw_subnet_id != null && var.subnet_ids.agentless_gw_dr_subnet_id != null && var.subnet_ids.dra_admin_subnet_id != null && var.subnet_ids.dra_analytics_subnet_id != null, false)
+ error_message = "Value must either be null or specified for all."
+ }
+}
##############################
#### DAM variables ####
@@ -111,6 +128,7 @@ variable "dam_version" {
variable "dam_license" {
description = "License file path"
type = string
+ default = null
}
variable "large_scale_mode" {
@@ -137,6 +155,7 @@ variable "dam_agent_installation_location" {
az_blob = string
})
description = "Storage account and container location of the DSF DAM agent installation software. az_blob is the full path to the installation file within the storage account container"
+ default = null
}
variable "simulation_db_types_for_agent" {
@@ -172,7 +191,8 @@ variable "tarball_location" {
az_container = string
az_blob = string
})
- description = "Storage account and container location of the DSF installation software. az_blob is the full path to the tarball file within the storage account container"
+ description = "Storage account and container location of the DSF Sonar installation software. az_blob is the full path to the tarball file within the storage account container"
+ default = null
}
variable "hub_hadr" {
@@ -187,16 +207,16 @@ variable "agentless_gw_hadr" {
description = "Provisions a High Availability and Disaster Recovery node for the Agentless Gateway"
}
-variable "hub_instance_type" {
+variable "hub_instance_size" {
type = string
default = "Standard_E4s_v5"
- description = "Instance type for the DSF Hub"
+ description = "Instance size for the DSF Hub"
}
-variable "agentless_gw_instance_type" {
+variable "agentless_gw_instance_size" {
type = string
default = "Standard_E4s_v5"
- description = "Instance type for the Agentless Gateway"
+ description = "Instance size for the Agentless Gateway"
}
variable "hub_storage_details" {
@@ -236,4 +256,114 @@ variable "sonar_machine_base_directory" {
type = string
default = "/imperva"
description = "The base directory where all Sonar related directories will be installed"
-}
\ No newline at end of file
+}
+
+variable "simulation_db_types_for_agentless" {
+ type = list(string)
+ default = []
+ description = "Types of databases to provision and onboard to an Agentless Gateway for simulation purposes. Available types are: 'MsSQL'."
+ validation {
+ condition = alltrue([
+ for db_type in var.simulation_db_types_for_agentless : contains(["MsSQL"], db_type)
+ ])
+ error_message = "Value must be a subset of: ['MsSQL']"
+ }
+}
+
+##############################
+#### DRA variables ####
+##############################
+
+variable "dra_admin_instance_size" {
+ type = string
+ default = "Standard_E4as_v5" # 4 cores & 32GB ram
+ description = "VM instance size for the Admin Server"
+}
+
+variable "dra_admin_storage_details" {
+ type = object({
+ disk_size = number
+ volume_caching = string
+ storage_account_type = string
+ })
+ description = "DRA Admin compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.11-data-risk-analytics-installation-guide/page/69846.htm"
+ default = {
+ disk_size = 260
+ volume_caching = "ReadWrite"
+ storage_account_type = "Standard_LRS"
+ }
+}
+
+variable "dra_admin_image_details" {
+ type = object({
+ resource_group_name = string
+ image_id = string
+ })
+ default = null
+ description = "Image attributes for the Admin Server"
+ validation {
+ condition = var.dra_admin_image_details == null || try(var.dra_admin_image_details.resource_group_name != null && var.dra_admin_image_details.image_id != null, false)
+ error_message = "Value must either be null or specified for all"
+ }
+}
+
+variable "dra_admin_vhd_details" {
+ type = object({
+ path_to_vhd = string
+ storage_account_name = string
+ container_name = string
+ })
+ default = null
+ description = "VHD details for creating the Admin server image. Keep empty if you provide an image for the Admin server instead."
+ validation {
+ condition = var.dra_admin_vhd_details == null || try(var.dra_admin_vhd_details.path_to_vhd != null && var.dra_admin_vhd_details.storage_account_name != null && var.dra_admin_vhd_details.container_name != null, false)
+ error_message = "Value must either be null or specified for all"
+ }
+}
+
+variable "dra_analytics_instance_size" {
+ type = string
+ default = "Standard_E4as_v5" # 4 cores & 32GB ram
+ description = "VM instance size for the Analytics Server"
+}
+
+variable "dra_analytics_storage_details" {
+ type = object({
+ disk_size = number
+ volume_caching = string
+ storage_account_type = string
+ })
+ description = "DRA Analytics compute instance volume attributes. More info in sizing doc - https://docs.imperva.com/bundle/v4.11-data-risk-analytics-installation-guide/page/69846.htm"
+ default = {
+ disk_size = 1010
+ volume_caching = "ReadWrite"
+ storage_account_type = "Standard_LRS"
+ }
+}
+
+variable "dra_analytics_image_details" {
+ type = object({
+ resource_group_name = string
+ image_id = string
+ })
+ default = null
+ description = "Image attributes for the Analytics Server"
+ validation {
+ condition = var.dra_analytics_image_details == null || try(var.dra_analytics_image_details.resource_group_name != null && var.dra_analytics_image_details.image_id != null, false)
+ error_message = "Value must either be null or specified for all"
+ }
+}
+
+variable "dra_analytics_vhd_details" {
+ type = object({
+ path_to_vhd = string
+ storage_account_name = string
+ container_name = string
+ })
+ default = null
+ description = "VHD details for creating the Analytics server image. Keep empty if you provide an image for the Analytics server instead."
+ validation {
+ condition = var.dra_analytics_vhd_details == null || try(var.dra_analytics_vhd_details.path_to_vhd != null && var.dra_analytics_vhd_details.storage_account_name != null && var.dra_analytics_vhd_details.container_name != null, false)
+ error_message = "Value must either be null or specified for all"
+ }
+}
diff --git a/examples/azure/poc/dsf_deployment/versions.tf b/examples/azure/poc/dsf_deployment/versions.tf
index c1f2dc8e9..45ecd887f 100644
--- a/examples/azure/poc/dsf_deployment/versions.tf
+++ b/examples/azure/poc/dsf_deployment/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/aws/agent-gw/README.md b/modules/aws/agent-gw/README.md
index dda1a5e82..67e430eb6 100644
--- a/modules/aws/agent-gw/README.md
+++ b/modules/aws/agent-gw/README.md
@@ -83,4 +83,4 @@ API access to the DSF Management server is required to provision this module. Pl
For more information about the DSF Agent Gateway and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v14.11-database-activity-monitoring-user-guide/page/378.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/aws/agent-gw/versions.tf b/modules/aws/agent-gw/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/agent-gw/versions.tf
+++ b/modules/aws/agent-gw/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/agentless-gw/README.md b/modules/aws/agentless-gw/README.md
index 6708505a6..2b6861fc7 100644
--- a/modules/aws/agentless-gw/README.md
+++ b/modules/aws/agentless-gw/README.md
@@ -194,4 +194,4 @@ module "dsf_agentless_gw" {
For more information about the Agentless Gateway and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v4.13-sonar-user-guide/page/80401.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
diff --git a/modules/aws/agentless-gw/versions.tf b/modules/aws/agentless-gw/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/agentless-gw/versions.tf
+++ b/modules/aws/agentless-gw/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/core/globals/main.tf b/modules/aws/core/globals/main.tf
index d7ac4ddcc..d87a1471e 100644
--- a/modules/aws/core/globals/main.tf
+++ b/modules/aws/core/globals/main.tf
@@ -1,15 +1,20 @@
locals {
sonar_tarball_s3_key_map = {
- "4.13" = "jsonar-4.13.0.10.0.tar.gz"
+ "4.13" = "jsonar-4.13.0.20.0.tar.gz"
+ "4.13.0.20" = "jsonar-4.13.0.20.0.tar.gz"
"4.13.0.10" = "jsonar-4.13.0.10.0.tar.gz"
- "4.12" = "jsonar-4.12.0.10.0.tar.gz"
+ "4.12" = "jsonar-4.12.0.20.0.tar.gz"
+ "4.12.0.20" = "jsonar-4.12.0.20.0.tar.gz"
"4.12.0.10" = "jsonar-4.12.0.10.0.tar.gz"
- "4.11" = "jsonar-4.11.0.0.0.tar.gz"
+ "4.11" = "jsonar-4.11.0.2.0.tar.gz"
+ "4.11.0.2" = "jsonar-4.11.0.2.0.tar.gz"
+ "4.11.0.1" = "jsonar-4.11.0.1.0.tar.gz"
"4.11.0.0" = "jsonar-4.11.0.0.0.tar.gz"
- "4.10" = "jsonar-4.10.0.1.0.tar.gz"
+ "4.10" = "jsonar-4.10.0.2.0.tar.gz"
+ "4.10.0.2" = "jsonar-4.10.0.2.0.tar.gz"
"4.10.0.1" = "jsonar-4.10.0.1.0.tar.gz"
"4.10.0.0" = "jsonar-4.10.0.0.0.tar.gz"
@@ -76,4 +81,4 @@ resource "random_password" "pass" {
min_special = 1
min_upper = 1
override_special = "*+#%^:/~.,[]_"
-}
\ No newline at end of file
+}
diff --git a/modules/aws/core/globals/outputs.tf b/modules/aws/core/globals/outputs.tf
index ab82ec67c..8bda1ad02 100644
--- a/modules/aws/core/globals/outputs.tf
+++ b/modules/aws/core/globals/outputs.tf
@@ -54,10 +54,12 @@ output "dra_version" {
}
output "sonar_supported_versions" {
+ description = "Sonar versions which are supported by at least one module"
value = local.sonar_supported_versions
}
output "sonar_fully_supported_versions" {
+ description = "Sonar versions which are supported in by all modules"
value = local.sonar_fully_supported_versions
}
diff --git a/modules/aws/core/globals/versions.tf b/modules/aws/core/globals/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/core/globals/versions.tf
+++ b/modules/aws/core/globals/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/core/key_pair/versions.tf b/modules/aws/core/key_pair/versions.tf
index 23724e137..8ed85317d 100644
--- a/modules/aws/core/key_pair/versions.tf
+++ b/modules/aws/core/key_pair/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/dam-base-instance/versions.tf b/modules/aws/dam-base-instance/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/dam-base-instance/versions.tf
+++ b/modules/aws/dam-base-instance/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/db-with-agent/README.md b/modules/aws/db-with-agent/README.md
index 6176acc74..449fec0d4 100644
--- a/modules/aws/db-with-agent/README.md
+++ b/modules/aws/db-with-agent/README.md
@@ -69,4 +69,4 @@ module "db_with_agent" {
## Additional Information
For more information about the DSF Agent Gateway and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v14.11-database-activity-monitoring-user-guide/page/378.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/aws/db-with-agent/versions.tf b/modules/aws/db-with-agent/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/db-with-agent/versions.tf
+++ b/modules/aws/db-with-agent/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/dra-admin/README.md b/modules/aws/dra-admin/README.md
index ecda2bc95..8bf3a889d 100644
--- a/modules/aws/dra-admin/README.md
+++ b/modules/aws/dra-admin/README.md
@@ -18,7 +18,7 @@ This Terraform module provisions several resources on AWS. These resources inclu
* AWS Secrets that hold the passwords.
* An AWS Elastic Network Interface (ENI).
-The EC2 instance and EBS volume provide the computing and storage resources needed to run the DSF software. The security group controls the inbound and outbound traffic to the instance, while the IAM role grants the necessary permissions to access AWS resources. The KMS is used for encrypting sensitive data.
+The EC2 instance and EBS volume provide the computing and storage resources needed to run the DSF Admin Server software. The security group controls the inbound and outbound traffic to the instance, while the IAM role grants the necessary permissions to access AWS resources. The KMS is used for encrypting sensitive data.
## Inputs
@@ -54,7 +54,7 @@ module "dra_admin" {
}
```
-To see a complete example of how to use this module in a DSF deployment with other modules, check out the [examples](../../../examples/) directory.
+To see a complete example of how to use this module in a DSF deployment with other modules, check out the [examples](../../../examples/aws/) directory.
We recommend using a specific version of the module (and not the latest).
See available released versions in the main repo README [here](https://github.com/imperva/dsfkit#version-history).
@@ -72,4 +72,4 @@ module "dsf_dra_admin" {
For more information about the DSF DRA Admin and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/z-kb-articles-km/page/4e487f3c.html).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/aws/dra-admin/iam_role.tf b/modules/aws/dra-admin/iam_role.tf
index 7c76e40c4..5f1e091cc 100644
--- a/modules/aws/dra-admin/iam_role.tf
+++ b/modules/aws/dra-admin/iam_role.tf
@@ -29,7 +29,7 @@ locals {
"Action" : "secretsmanager:GetSecretValue",
"Resource" : [
aws_secretsmanager_secret.admin_analytics_registration_password.arn,
- aws_secretsmanager_secret.admin_password.arn,
+ aws_secretsmanager_secret.admin_ssh_password.arn,
]
}
]
@@ -39,14 +39,14 @@ locals {
resource "aws_iam_instance_profile" "dsf_node_instance_iam_profile" {
count = var.instance_profile_name == null ? 1 : 0
- name_prefix = "${var.friendly_name}-dra-admin-instance-iam-profile"
+ name_prefix = "${var.name}-dra-admin-instance-iam-profile"
role = local.role_name
tags = var.tags
}
resource "aws_iam_role" "dsf_node_role" {
count = var.instance_profile_name == null ? 1 : 0
- name = "${var.friendly_name}-role"
+ name = "${var.name}-role"
managed_policy_arns = null
assume_role_policy = local.role_assume_role_policy
inline_policy {
diff --git a/modules/aws/dra-admin/main.tf b/modules/aws/dra-admin/main.tf
index aa2ca49cf..08b292bd7 100644
--- a/modules/aws/dra-admin/main.tf
+++ b/modules/aws/dra-admin/main.tf
@@ -10,15 +10,18 @@ locals {
install_script = templatefile("${path.module}/setup.tftpl", {
admin_registration_password_secret_arn = aws_secretsmanager_secret.admin_analytics_registration_password.arn
- admin_password_secret_arn = aws_secretsmanager_secret.admin_password.arn
+ admin_ssh_password_secret_arn = aws_secretsmanager_secret.admin_ssh_password.arn
})
+ readiness_script = templatefile("${path.module}/readiness.tftpl", {
+ admin_server_public_ip = try(local.public_ip, local.private_ip)
+ })
}
resource "aws_eip" "dsf_instance_eip" {
count = var.attach_persistent_public_ip ? 1 : 0
domain = "vpc"
- tags = merge(var.tags, { Name = var.friendly_name })
+ tags = merge(var.tags, { Name = var.name })
}
resource "aws_eip_association" "eip_assoc" {
@@ -36,7 +39,7 @@ resource "aws_instance" "dsf_base_instance" {
volume_size = var.ebs.volume_size
volume_type = var.ebs.volume_type
delete_on_termination = true
- tags = merge(var.tags, { Name = var.friendly_name })
+ tags = merge(var.tags, { Name = var.name })
}
iam_instance_profile = local.instance_profile
network_interface {
@@ -49,7 +52,7 @@ resource "aws_instance" "dsf_base_instance" {
http_endpoint = "enabled"
http_tokens = "required"
}
- tags = merge(var.tags, { Name = var.friendly_name })
+ tags = merge(var.tags, { Name = var.name })
}
resource "aws_network_interface" "eni" {
@@ -62,8 +65,28 @@ module "statistics" {
source = "../../../modules/aws/statistics"
count = var.send_usage_statistics ? 1 : 0
- deployment_name = var.friendly_name
+ deployment_name = var.name
product = "DRA"
resource_type = "dra-admin"
artifact = "ami://${sha256(data.aws_ami.selected-ami.image_id)}@${var.dra_version}"
}
+
+resource "null_resource" "readiness" {
+ provisioner "local-exec" {
+ command = local.readiness_script
+ interpreter = ["/bin/bash", "-c"]
+ }
+ depends_on = [
+ aws_instance.dsf_base_instance,
+ module.statistics
+ ]
+}
+
+module "statistics_success" {
+ source = "../../../modules/aws/statistics"
+ count = var.send_usage_statistics ? 1 : 0
+
+ id = module.statistics[0].id
+ status = "success"
+ depends_on = [null_resource.readiness]
+}
diff --git a/modules/aws/dra-analytics/waiter.tpl b/modules/aws/dra-admin/readiness.tftpl
old mode 100755
new mode 100644
similarity index 100%
rename from modules/aws/dra-analytics/waiter.tpl
rename to modules/aws/dra-admin/readiness.tftpl
diff --git a/modules/aws/dra-admin/secrets.tf b/modules/aws/dra-admin/secrets.tf
index 4952912f6..71e725209 100644
--- a/modules/aws/dra-admin/secrets.tf
+++ b/modules/aws/dra-admin/secrets.tf
@@ -1,6 +1,6 @@
resource "aws_secretsmanager_secret" "admin_analytics_registration_password" {
- name_prefix = "${var.friendly_name}-admin-analytics-registration-password"
- description = "DRA admin_registration_password"
+ name_prefix = "${var.name}-admin-analytics-registration-password"
+ description = "DRA admin registration password"
tags = var.tags
}
@@ -9,13 +9,13 @@ resource "aws_secretsmanager_secret_version" "admin_analytics_registration_passw
secret_string = var.admin_registration_password
}
-resource "aws_secretsmanager_secret" "admin_password" {
- name_prefix = "${var.friendly_name}-admin-password"
- description = "DRA admin_registration_password"
+resource "aws_secretsmanager_secret" "admin_ssh_password" {
+ name_prefix = "${var.name}-admin-ssh-password"
+ description = "DRA Admin ssh password"
tags = var.tags
}
-resource "aws_secretsmanager_secret_version" "admin_password" {
- secret_id = aws_secretsmanager_secret.admin_password.id
- secret_string = var.admin_password
+resource "aws_secretsmanager_secret_version" "admin_ssh_password" {
+ secret_id = aws_secretsmanager_secret.admin_ssh_password.id
+ secret_string = var.admin_ssh_password
}
diff --git a/modules/aws/dra-admin/setup.tftpl b/modules/aws/dra-admin/setup.tftpl
index 779ada241..80f7e1afa 100644
--- a/modules/aws/dra-admin/setup.tftpl
+++ b/modules/aws/dra-admin/setup.tftpl
@@ -24,7 +24,7 @@ unzip awscliv2.zip
sudo ./aws/install
admin_registration_password=$(/usr/local/bin/aws secretsmanager get-secret-value --secret-id ${admin_registration_password_secret_arn} --query SecretString --output text)
-ssh_password=$(/usr/local/bin/aws secretsmanager get-secret-value --secret-id ${admin_password_secret_arn} --query SecretString --output text)
+ssh_password=$(/usr/local/bin/aws secretsmanager get-secret-value --secret-id ${admin_ssh_password_secret_arn} --query SecretString --output text)
/opt/itp_global_conf/auto_deploy.sh --hostname "$(hostname)" --ip-address "$my_ip" --dns-servers "$my_nameserver" --registration-password "$admin_registration_password" --cidr "$my_cidr" --default-gateway "$my_default_gw" --machine-type "Admin"
diff --git a/modules/aws/dra-admin/sg.tf b/modules/aws/dra-admin/sg.tf
index 782e72382..bb5529f1a 100644
--- a/modules/aws/dra-admin/sg.tf
+++ b/modules/aws/dra-admin/sg.tf
@@ -44,9 +44,9 @@ data "aws_subnet" "selected_subnet" {
resource "aws_security_group" "dsf_base_sg" {
for_each = { for idx, config in local._security_groups_config : idx => config }
- name = join("-", [var.friendly_name, join("-", each.value.name)])
+ name = join("-", [var.name, join("-", each.value.name)])
vpc_id = data.aws_subnet.selected_subnet.vpc_id
- description = format("%s - %s ingress access", var.friendly_name, join(" ", each.value.name))
+ description = format("%s - %s ingress access", var.name, join(" ", each.value.name))
dynamic "ingress" {
for_each = { for idx, port in each.value.tcp : idx => port }
@@ -77,5 +77,5 @@ resource "aws_security_group" "dsf_base_sg" {
ipv6_cidr_blocks = each.value.internet_access ? ["::/0"] : []
}
- tags = merge(var.tags, { Name = join("-", [var.friendly_name, join("-", each.value.name)]) })
+ tags = merge(var.tags, { Name = join("-", [var.name, join("-", each.value.name)]) })
}
diff --git a/modules/aws/dra-admin/variables.tf b/modules/aws/dra-admin/variables.tf
index 8fe66f979..0f23f2c4e 100644
--- a/modules/aws/dra-admin/variables.tf
+++ b/modules/aws/dra-admin/variables.tf
@@ -1,10 +1,14 @@
-variable "friendly_name" {
+variable "name" {
type = string
default = "imperva-dsf-dra-admin"
- description = "Friendly name, EC2 Instance Name"
+ description = "Name to identify all resources"
validation {
- condition = length(var.friendly_name) > 3
- error_message = "Deployment name must be at least 3 characters"
+ condition = length(var.name) >= 3
+ error_message = "Name must be at least 3 characters"
+ }
+ validation {
+ condition = can(regex("^\\p{L}.*", var.name))
+ error_message = "Must start with a letter"
}
}
@@ -66,32 +70,32 @@ variable "admin_registration_password" {
}
}
-variable "admin_password" {
+variable "admin_ssh_password" {
type = string
- description = "Password to be used to admin os user"
+ description = "Password to be used to ssh to the Admin Server"
validation {
- condition = length(var.admin_password) >= 7
+ condition = length(var.admin_ssh_password) >= 7
error_message = "Password must be at least 7 characters long"
}
validation {
- condition = can(regex("[A-Z]", var.admin_password))
+ condition = can(regex("[A-Z]", var.admin_ssh_password))
error_message = "Password must contain at least one uppercase letter"
}
validation {
- condition = can(regex("[a-z]", var.admin_password))
+ condition = can(regex("[a-z]", var.admin_ssh_password))
error_message = "Password must contain at least one lowercase letter"
}
validation {
- condition = can(regex("\\d", var.admin_password))
+ condition = can(regex("\\d", var.admin_ssh_password))
error_message = "Password must contain at least one digit"
}
validation {
- condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.admin_password))
+ condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.admin_ssh_password))
error_message = "Password must contain at least one of the following special characters: *+=#%^:/~.,[]_"
}
}
diff --git a/modules/aws/dra-admin/versions.tf b/modules/aws/dra-admin/versions.tf
index 66fd2a10c..869137a90 100644
--- a/modules/aws/dra-admin/versions.tf
+++ b/modules/aws/dra-admin/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/dra-analytics/README.md b/modules/aws/dra-analytics/README.md
index 8ce9a8c84..4993b14bc 100644
--- a/modules/aws/dra-analytics/README.md
+++ b/modules/aws/dra-analytics/README.md
@@ -68,7 +68,7 @@ See available released versions in the main repo README [here](https://github.co
Specify the module's version by adding the version parameter. For example:
```
-module "dsf_dra_admin" {
+module "dsf_dra_analytics" {
source = "imperva/dsf-dra-analytics/aws"
version = "x.y.z"
}
@@ -78,4 +78,4 @@ module "dsf_dra_admin" {
For more information about the DSF DRA Analytics and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/z-kb-articles-km/page/4e487f3c.html).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/aws/dra-analytics/iam_role.tf b/modules/aws/dra-analytics/iam_role.tf
index b70283fcf..83dfe9ef8 100644
--- a/modules/aws/dra-analytics/iam_role.tf
+++ b/modules/aws/dra-analytics/iam_role.tf
@@ -29,7 +29,7 @@ locals {
"Resource" : [
aws_secretsmanager_secret.analytics_archiver_password.arn,
aws_secretsmanager_secret.admin_registration_password.arn,
- aws_secretsmanager_secret.admin_password.arn
+ aws_secretsmanager_secret.analytics_ssh_password.arn
]
}
]
@@ -39,14 +39,14 @@ locals {
resource "aws_iam_instance_profile" "dsf_node_instance_iam_profile" {
count = var.instance_profile_name == null ? 1 : 0
- name_prefix = "${var.friendly_name}-dra-analytics-instance-iam-profile"
+ name_prefix = "${var.name}-dra-analytics-instance-iam-profile"
role = local.role_name
tags = var.tags
}
resource "aws_iam_role" "dsf_node_role" {
count = var.instance_profile_name == null ? 1 : 0
- name = "${var.friendly_name}-role"
+ name = "${var.name}-role"
managed_policy_arns = null
assume_role_policy = local.role_assume_role_policy
inline_policy {
diff --git a/modules/aws/dra-analytics/main.tf b/modules/aws/dra-analytics/main.tf
index 3cede44a5..107a3eb69 100644
--- a/modules/aws/dra-analytics/main.tf
+++ b/modules/aws/dra-analytics/main.tf
@@ -13,14 +13,14 @@ locals {
install_script = templatefile("${path.module}/setup.tftpl", {
analytics_archiver_password_secret_arn = aws_secretsmanager_secret.analytics_archiver_password.arn
admin_analytics_registration_password_arn = aws_secretsmanager_secret.admin_registration_password.arn
- admin_password_secret_arn = aws_secretsmanager_secret.admin_password.arn
+ analytics_ssh_password_secret_arn = aws_secretsmanager_secret.analytics_ssh_password.arn
archiver_user = var.archiver_user
archiver_password = var.archiver_password
admin_server_private_ip = var.admin_server_private_ip
})
- readiness_script = templatefile("${path.module}/waiter.tpl", {
- admin_server_public_ip = var.admin_server_public_ip
+ readiness_script = templatefile("${path.module}/readiness.tftpl", {
+ admin_server_public_ip = try(var.admin_server_public_ip, var.admin_server_private_ip)
})
}
@@ -33,7 +33,7 @@ resource "aws_instance" "dsf_base_instance" {
volume_size = var.ebs.volume_size
volume_type = var.ebs.volume_type
delete_on_termination = true
- tags = merge(var.tags, { Name = var.friendly_name })
+ tags = merge(var.tags, { Name = var.name })
}
iam_instance_profile = local.instance_profile
network_interface {
@@ -46,7 +46,7 @@ resource "aws_instance" "dsf_base_instance" {
http_endpoint = "enabled"
http_tokens = "required"
}
- tags = merge(var.tags, { Name = var.friendly_name })
+ tags = merge(var.tags, { Name = var.name })
}
resource "aws_network_interface" "eni" {
@@ -59,7 +59,7 @@ module "statistics" {
source = "../../../modules/aws/statistics"
count = var.send_usage_statistics ? 1 : 0
- deployment_name = var.friendly_name
+ deployment_name = var.name
product = "DRA"
resource_type = "dra-analytics"
artifact = "ami://${sha256(data.aws_ami.selected-ami.image_id)}@${var.dra_version}"
diff --git a/modules/aws/dra-analytics/readiness.tftpl b/modules/aws/dra-analytics/readiness.tftpl
new file mode 100755
index 000000000..339221dc0
--- /dev/null
+++ b/modules/aws/dra-analytics/readiness.tftpl
@@ -0,0 +1,8 @@
+ while true; do
+ response=$(curl -k -s -o /dev/null -w "%%{http_code}" --request GET 'https://${admin_server_public_ip}:8443/mvc/login')
+ if [ $response -eq 200 ]; then
+ exit 0
+ else
+ sleep 60
+ fi
+ done
\ No newline at end of file
diff --git a/modules/aws/dra-analytics/secrets.tf b/modules/aws/dra-analytics/secrets.tf
index cb8d201cc..dda18f3f0 100644
--- a/modules/aws/dra-analytics/secrets.tf
+++ b/modules/aws/dra-analytics/secrets.tf
@@ -1,5 +1,5 @@
resource "aws_secretsmanager_secret" "analytics_archiver_password" {
- name_prefix = "${var.friendly_name}-analytics-archiver-password"
+ name_prefix = "${var.name}-analytics-archiver-password"
description = "analytics-archiver-password"
tags = var.tags
}
@@ -10,7 +10,7 @@ resource "aws_secretsmanager_secret_version" "analytics_archiver_password" {
}
resource "aws_secretsmanager_secret" "admin_registration_password" {
- name_prefix = "${var.friendly_name}-admin-analytics-registration-password"
+ name_prefix = "${var.name}-admin-analytics-registration-password"
description = "admin-analytics-registration-password"
tags = var.tags
}
@@ -20,13 +20,13 @@ resource "aws_secretsmanager_secret_version" "admin_registration_password" {
secret_string = var.admin_registration_password
}
-resource "aws_secretsmanager_secret" "admin_password" {
- name_prefix = "${var.friendly_name}-admin-password"
- description = "DRA admin_registration_password"
+resource "aws_secretsmanager_secret" "analytics_ssh_password" {
+ name_prefix = "${var.name}-analytics-ssh-password"
+ description = "DRA Analytics ssh password"
tags = var.tags
}
-resource "aws_secretsmanager_secret_version" "admin_password" {
- secret_id = aws_secretsmanager_secret.admin_password.id
- secret_string = var.admin_password
+resource "aws_secretsmanager_secret_version" "analytics_ssh_password" {
+ secret_id = aws_secretsmanager_secret.analytics_ssh_password.id
+ secret_string = var.analytics_ssh_password
}
diff --git a/modules/aws/dra-analytics/setup.tftpl b/modules/aws/dra-analytics/setup.tftpl
index c24b2ec88..4866b94ee 100644
--- a/modules/aws/dra-analytics/setup.tftpl
+++ b/modules/aws/dra-analytics/setup.tftpl
@@ -31,7 +31,7 @@ unzip awscliv2.zip
sudo ./aws/install
archiver_password=$(/usr/local/bin/aws secretsmanager get-secret-value --secret-id ${analytics_archiver_password_secret_arn} --query SecretString --output text)
admin_registration_password=$(/usr/local/bin/aws secretsmanager get-secret-value --secret-id ${admin_analytics_registration_password_arn} --query SecretString --output text)
-ssh_password=$(/usr/local/bin/aws secretsmanager get-secret-value --secret-id ${admin_password_secret_arn} --query SecretString --output text)
+ssh_password=$(/usr/local/bin/aws secretsmanager get-secret-value --secret-id ${analytics_ssh_password_secret_arn} --query SecretString --output text)
/opt/itp_global_conf/auto_deploy.sh --hostname "$(hostname)" --ip-address "$my_ip" --dns-servers "$my_nameserver" --registration-password "$admin_registration_password" --cidr "$my_cidr" --default-gateway "$my_default_gw" --machine-type "Analytics" --analytics-user "${archiver_user}" --analytics-password "$archiver_password" --admin-server-ip "${admin_server_private_ip}"
diff --git a/modules/aws/dra-analytics/sg.tf b/modules/aws/dra-analytics/sg.tf
index 26b7c5fa4..1407f5f91 100644
--- a/modules/aws/dra-analytics/sg.tf
+++ b/modules/aws/dra-analytics/sg.tf
@@ -44,9 +44,9 @@ data "aws_subnet" "selected_subnet" {
resource "aws_security_group" "dsf_base_sg" {
for_each = { for idx, config in local._security_groups_config : idx => config }
- name = join("-", [var.friendly_name, join("-", each.value.name)])
+ name = join("-", [var.name, join("-", each.value.name)])
vpc_id = data.aws_subnet.selected_subnet.vpc_id
- description = format("%s - %s ingress access", var.friendly_name, join(" ", each.value.name))
+ description = format("%s - %s ingress access", var.name, join(" ", each.value.name))
dynamic "ingress" {
for_each = { for idx, port in each.value.tcp : idx => port }
@@ -77,5 +77,5 @@ resource "aws_security_group" "dsf_base_sg" {
ipv6_cidr_blocks = each.value.internet_access ? ["::/0"] : []
}
- tags = merge(var.tags, { Name = join("-", [var.friendly_name, join("-", each.value.name)]) })
+ tags = merge(var.tags, { Name = join("-", [var.name, join("-", each.value.name)]) })
}
diff --git a/modules/aws/dra-analytics/variables.tf b/modules/aws/dra-analytics/variables.tf
index 98862334d..7ea9bb393 100644
--- a/modules/aws/dra-analytics/variables.tf
+++ b/modules/aws/dra-analytics/variables.tf
@@ -1,10 +1,14 @@
-variable "friendly_name" {
+variable "name" {
type = string
default = "imperva-dsf-dra-analytics"
- description = "Friendly name, EC2 Instance Name"
+ description = "Name to identify all resources"
validation {
- condition = length(var.friendly_name) > 3
- error_message = "Deployment name must be at least 3 characters"
+ condition = length(var.name) >= 3
+ error_message = "Name must be at least 3 characters"
+ }
+ validation {
+ condition = can(regex("^\\p{L}.*", var.name))
+ error_message = "Must start with a letter"
}
}
@@ -44,32 +48,32 @@ variable "admin_registration_password" {
}
}
-variable "admin_password" {
+variable "analytics_ssh_password" {
type = string
- description = "Password to be used to admin os user"
+ description = "Password to be used to ssh to the Analytics Server"
validation {
- condition = length(var.admin_password) >= 7
+ condition = length(var.analytics_ssh_password) >= 7
error_message = "Password must be at least 7 characters long"
}
validation {
- condition = can(regex("[A-Z]", var.admin_password))
+ condition = can(regex("[A-Z]", var.analytics_ssh_password))
error_message = "Password must contain at least one uppercase letter"
}
validation {
- condition = can(regex("[a-z]", var.admin_password))
+ condition = can(regex("[a-z]", var.analytics_ssh_password))
error_message = "Password must contain at least one lowercase letter"
}
validation {
- condition = can(regex("\\d", var.admin_password))
+ condition = can(regex("\\d", var.analytics_ssh_password))
error_message = "Password must contain at least one digit"
}
validation {
- condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.admin_password))
+ condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.analytics_ssh_password))
error_message = "Password must contain at least one of the following special characters: *+=#%^:/~.,[]_"
}
}
@@ -77,7 +81,7 @@ variable "admin_password" {
variable "archiver_user" {
type = string
default = "archiver-user"
- description = "User to be used to upload archive files for the analytics server"
+ description = "User to be used to upload archive files for the Analytics server"
}
variable "dra_version" {
diff --git a/modules/aws/dra-analytics/versions.tf b/modules/aws/dra-analytics/versions.tf
index 66fd2a10c..869137a90 100644
--- a/modules/aws/dra-analytics/versions.tf
+++ b/modules/aws/dra-analytics/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/hub/README.md b/modules/aws/hub/README.md
index 953451a70..9ba4e49a9 100644
--- a/modules/aws/hub/README.md
+++ b/modules/aws/hub/README.md
@@ -147,4 +147,4 @@ module "dsf_hub" {
For more information about the DSF Hub and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v4.13-sonar-user-guide/page/80401.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
diff --git a/modules/aws/hub/dra_assocoation.tf b/modules/aws/hub/dra_assocoation.tf
index 6729dffb8..14ac69f67 100644
--- a/modules/aws/hub/dra_assocoation.tf
+++ b/modules/aws/hub/dra_assocoation.tf
@@ -1,11 +1,11 @@
locals {
# we are using one password for all services and we have one DRA only
admin_password = var.dra_details == null ? "" : urlencode(var.dra_details.password)
+ archiver_username = var.dra_details == null ? "" : var.dra_details.archiver_username
archiver_password = var.dra_details == null ? "" : urlencode(var.dra_details.archiver_password)
- admin_username = var.dra_details == null ? "" : var.dra_details.username
admin_address = var.dra_details == null ? "" : var.dra_details.address
dra_association_commands = var.dra_details == null ? "" : <<-EOF
- curl -k --max-time 10000 -X POST -G 'https://127.0.0.1:8443/register-to-dra' -d adminIpOrHostname=${local.admin_address} -d adminRegistrationPassword=${local.admin_password} -d adminReportingServer=true -d analyticsArchiveUsername=${local.admin_username} -d analyticsArchivePassword=${local.archiver_password} -d resumeDraJobs=true --header "Authorization: Bearer ${module.hub_instance.access_tokens["archiver"].token}"
+ curl -k --max-time 10000 -X POST -G 'https://127.0.0.1:8443/register-to-dra' -d adminIpOrHostname=${local.admin_address} -d adminRegistrationPassword=${local.admin_password} -d adminReportingServer=true -d analyticsArchiveUsername=${local.archiver_username} -d analyticsArchivePassword=${local.archiver_password} -d resumeDraJobs=true --header "Authorization: Bearer ${module.hub_instance.access_tokens["archiver"].token}"
EOF
}
diff --git a/modules/aws/hub/variables.tf b/modules/aws/hub/variables.tf
index 99305069e..8eee16e14 100644
--- a/modules/aws/hub/variables.tf
+++ b/modules/aws/hub/variables.tf
@@ -296,8 +296,8 @@ variable "dra_details" {
type = object({
name = string
address = string
- username = string
password = string
+ archiver_username = string
archiver_password = string
})
validation {
@@ -305,8 +305,8 @@ variable "dra_details" {
error_message = "Each DRA Admin must specify name and address"
}
validation {
- condition = (var.dra_details == null || (can(var.dra_details.username) && can(var.dra_details.password)))
- error_message = "Each DRA Admin must specify username and password"
+ condition = (var.dra_details == null || (can(var.dra_details.archiver_username) && can(var.dra_details.archiver_password) && can(var.dra_details.password)))
+ error_message = "Each DRA Admin must specify archiver_username, archiver_password and password"
}
default = null
}
diff --git a/modules/aws/hub/versions.tf b/modules/aws/hub/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/hub/versions.tf
+++ b/modules/aws/hub/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/mx/README.md b/modules/aws/mx/README.md
index 2f9b81dd9..2c1deb952 100644
--- a/modules/aws/mx/README.md
+++ b/modules/aws/mx/README.md
@@ -83,4 +83,4 @@ API access is required to provision this module. Please make sure to pass the re
For more information about the DSF MX and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v14.11-dam-management-server-manager-user-guide/page/10068.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/aws/mx/variables.tf b/modules/aws/mx/variables.tf
index 8a4beb4d5..ea59a33f4 100644
--- a/modules/aws/mx/variables.tf
+++ b/modules/aws/mx/variables.tf
@@ -245,7 +245,7 @@ variable "create_server_group" {
}
variable "dra_details" {
- description = "Details of the DRA for sending audit logs"
+ description = "Details of the DRA for sending audit logs in the legacy format. More info in https://docs.imperva.com/bundle/v4.14-data-risk-analytics-installation-guide/page/60553.htm"
type = object({
address = string
port = number
diff --git a/modules/aws/mx/versions.tf b/modules/aws/mx/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/mx/versions.tf
+++ b/modules/aws/mx/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/poc-db-onboarder/main.tf b/modules/aws/poc-db-onboarder/main.tf
index 294616b60..de4458f72 100644
--- a/modules/aws/poc-db-onboarder/main.tf
+++ b/modules/aws/poc-db-onboarder/main.tf
@@ -35,82 +35,47 @@ data "aws_caller_identity" "current" {}
data "aws_region" "current" {}
-locals {
- cloud_account_data = {
- data = {
- applianceId = 1,
- applianceType = "DSF_HUB",
- id = "arn:aws:iam::${data.aws_caller_identity.current.account_id}",
- serverType = "AWS",
- auditState = "NO",
- gatewayId = var.assignee_gw
- assetData = {
- admin_email = "admin@email.com",
- "Server Port" = 443,
- asset_display_name = "Auto Onboarded AWS Account",
- auth_mechanism = "default",
- arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}",
- region = data.aws_region.current.name,
- }
- }
- }
- database_asset_data = {
- data : {
- applianceType : "DSF_HUB",
- applianceId : 1,
- serverType : local.server_type_by_engine_map[var.database_details.db_engine],
- gatewayId : var.assignee_gw,
- parentAssetId : local.cloud_account_data.data.id,
- assetData : {
- "Server Port" : var.database_details.db_port,
- database_name : var.database_details.db_name,
- "Service Name" : var.database_details.db_name,
- db_engine : var.database_details.db_engine,
- auth_mechanism : "password",
- username : var.database_details.db_username,
- password : var.database_details.db_password,
- region : data.aws_region.current.name,
- asset_source : "AWS",
- "Server Host Name" : var.database_details.db_address,
- admin_email = "admin@email.com",
- arn : var.database_details.db_arn,
- asset_display_name : var.database_details.db_identifier,
- isMonitored : var.enable_audit
- }
- }
- }
-}
-resource "null_resource" "onboard_db_to_dsf" {
- connection {
- type = "ssh"
- user = var.hub_info.hub_ssh_user
- private_key = file(var.hub_info.hub_private_ssh_key_path)
- host = var.hub_info.hub_ip_address
+module "onboard_db_to_dsf" {
+ source = "../../../modules/null/poc-db-onboarder"
+
+ assignee_gw = var.assignee_gw
- bastion_host = local.bastion_host
- bastion_private_key = local.bastion_private_key
- bastion_user = local.bastion_user
+ usc_access_token = var.usc_access_token
+ enable_audit = var.enable_audit
- script_path = local.script_path
+ database_data = {
+ id = {
+ name = "arn"
+ value = var.database_details.db_arn
+ }
+ name = var.database_details.db_identifier
+ hostname = var.database_details.db_address
+ port = var.database_details.db_port
+ server_type = local.server_type_by_engine_map[var.database_details.db_engine]
}
- provisioner "remote-exec" {
- inline = [
- templatefile("${path.module}/onboard.tftpl", {
- cloud_account_data = jsonencode(local.cloud_account_data),
- database_asset_data = jsonencode(local.database_asset_data)
- db_arn = var.database_details.db_arn
- account_arn = local.cloud_account_data.data.id
- usc_access_token = var.usc_access_token
- enable_audit = var.enable_audit
- })
- ]
+ cloud_account_data = {
+ id = {
+ name = "arn"
+ value = "arn:aws:iam::${data.aws_caller_identity.current.account_id}"
+ }
+ name = data.aws_caller_identity.current.account_id
+ type = "AWS"
+ connections_data = []
}
- triggers = {
- db_arn = var.database_details.db_arn
+
+ cloud_account_additional_data = {
+ auth_mechanism = "default"
+ region = data.aws_region.current.name
}
- depends_on = [
- aws_iam_role_policy_attachment.policy_attach
- ]
+ database_additional_data = {
+ region = data.aws_region.current.name
+ }
+
+ hub_info = var.hub_info
+ hub_proxy_info = var.hub_proxy_info
+ terraform_script_path_folder = var.terraform_script_path_folder
+
+ depends_on = [aws_iam_role_policy_attachment.policy_attach]
}
diff --git a/modules/aws/poc-db-onboarder/variables.tf b/modules/aws/poc-db-onboarder/variables.tf
index 54b8c1278..b5f08d512 100644
--- a/modules/aws/poc-db-onboarder/variables.tf
+++ b/modules/aws/poc-db-onboarder/variables.tf
@@ -1,13 +1,3 @@
-variable "sonar_version" {
- type = string
- description = "The Sonar version to install. Supported versions are: 4.11 and up. Both long and short version formats are supported, for example, 4.12.0.10 or 4.12. The short format maps to the latest patch."
- nullable = false
- validation {
- condition = !startswith(var.sonar_version, "4.9.") && !startswith(var.sonar_version, "4.10.")
- error_message = "The sonar_version value must be 4.11 or higher"
- }
-}
-
variable "hub_info" {
type = object({
hub_ip_address = string
diff --git a/modules/aws/poc-db-onboarder/versions.tf b/modules/aws/poc-db-onboarder/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/poc-db-onboarder/versions.tf
+++ b/modules/aws/poc-db-onboarder/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/rds-mssql-db/README.md b/modules/aws/rds-mssql-db/README.md
index 5a88bf032..a55beed32 100644
--- a/modules/aws/rds-mssql-db/README.md
+++ b/modules/aws/rds-mssql-db/README.md
@@ -80,4 +80,4 @@ module "dsf_rds_mssql" {
## Additional Information
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/aws/rds-mssql-db/main.tf b/modules/aws/rds-mssql-db/main.tf
index e426a0645..6cd9f6ca9 100644
--- a/modules/aws/rds-mssql-db/main.tf
+++ b/modules/aws/rds-mssql-db/main.tf
@@ -3,8 +3,7 @@ resource "random_password" "db_password" {
special = false
}
-resource "random_pet" "db_id" {
-}
+resource "random_pet" "db_id" {}
resource "random_id" "salt" {
byte_length = 2
diff --git a/modules/aws/rds-mssql-db/versions.tf b/modules/aws/rds-mssql-db/versions.tf
index 2c034a2cb..dfaba2d42 100644
--- a/modules/aws/rds-mssql-db/versions.tf
+++ b/modules/aws/rds-mssql-db/versions.tf
@@ -1,9 +1,9 @@
terraform {
- required_version = ">= 0.13"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
- source = "hashicorp/aws"
+ source = "hashicorp/aws"
version = ">= 4.23.0"
configuration_aliases = [aws.poc_scripts_s3_region]
}
diff --git a/modules/aws/rds-mysql-db/versions.tf b/modules/aws/rds-mysql-db/versions.tf
index 07a5d3bb7..272db1955 100644
--- a/modules/aws/rds-mysql-db/versions.tf
+++ b/modules/aws/rds-mysql-db/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 0.13"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/rds-postgres-db/versions.tf b/modules/aws/rds-postgres-db/versions.tf
index 07a5d3bb7..272db1955 100644
--- a/modules/aws/rds-postgres-db/versions.tf
+++ b/modules/aws/rds-postgres-db/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 0.13"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/sonar-base-instance/versions.tf b/modules/aws/sonar-base-instance/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/sonar-base-instance/versions.tf
+++ b/modules/aws/sonar-base-instance/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/aws/sonar-upgrader/versions.tf b/modules/aws/sonar-upgrader/versions.tf
index 3ec2b2811..36219a637 100644
--- a/modules/aws/sonar-upgrader/versions.tf
+++ b/modules/aws/sonar-upgrader/versions.tf
@@ -1,3 +1,3 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
}
diff --git a/modules/aws/statistics/versions.tf b/modules/aws/statistics/versions.tf
index f205f46c6..272db1955 100644
--- a/modules/aws/statistics/versions.tf
+++ b/modules/aws/statistics/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
aws = {
diff --git a/modules/azurerm/agent-gw/README.md b/modules/azurerm/agent-gw/README.md
index e97205d9e..482ac3671 100644
--- a/modules/azurerm/agent-gw/README.md
+++ b/modules/azurerm/agent-gw/README.md
@@ -84,4 +84,4 @@ API access to the DSF Management server is required to provision this module. Pl
For more information about the DSF Agent Gateway and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v14.11-database-activity-monitoring-user-guide/page/378.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/azurerm/agent-gw/versions.tf b/modules/azurerm/agent-gw/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/agent-gw/versions.tf
+++ b/modules/azurerm/agent-gw/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/agentless-gw/README.md b/modules/azurerm/agentless-gw/README.md
index e6caaef33..40db3a070 100644
--- a/modules/azurerm/agentless-gw/README.md
+++ b/modules/azurerm/agentless-gw/README.md
@@ -100,4 +100,4 @@ SSH access is required to provision this module. To SSH into the Agentless Gatew
For more information about the Agentless Gateway and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v4.12-sonar-user-guide/page/80401.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/azurerm/agentless-gw/main.tf b/modules/azurerm/agentless-gw/main.tf
index 5354fa86c..dade1a422 100644
--- a/modules/azurerm/agentless-gw/main.tf
+++ b/modules/azurerm/agentless-gw/main.tf
@@ -38,7 +38,7 @@ module "gw_instance" {
security_groups_config = local.security_groups_config
security_group_ids = var.security_group_ids
public_ssh_key = var.ssh_key.ssh_public_key
- instance_type = var.instance_type
+ instance_size = var.instance_size
storage_details = var.storage_details
vm_image = var.vm_image
vm_image_id = var.vm_image_id
diff --git a/modules/azurerm/agentless-gw/variables.tf b/modules/azurerm/agentless-gw/variables.tf
index 89b7f2351..70f851262 100644
--- a/modules/azurerm/agentless-gw/variables.tf
+++ b/modules/azurerm/agentless-gw/variables.tf
@@ -101,10 +101,10 @@ variable "use_public_ip" {
description = "Whether to use the DSF instance's public or private IP to connect to the instance"
}
-variable "instance_type" {
+variable "instance_size" {
type = string
default = "Standard_E4as_v5" # 4 cores & 32GB ram
- description = "Ec2 instance type for the Agentless Gateway"
+ description = "Instance size for the Agentless Gateway"
}
variable "storage_details" {
@@ -255,7 +255,7 @@ variable "base_directory" {
variable "cloud_init_timeout" {
type = number
- default = 900
+ default = 1200
description = "Max time to wait for the machine to start"
}
diff --git a/modules/azurerm/agentless-gw/versions.tf b/modules/azurerm/agentless-gw/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/agentless-gw/versions.tf
+++ b/modules/azurerm/agentless-gw/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/core/globals/versions.tf b/modules/azurerm/core/globals/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/core/globals/versions.tf
+++ b/modules/azurerm/core/globals/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/dam-base-instance/versions.tf b/modules/azurerm/dam-base-instance/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/dam-base-instance/versions.tf
+++ b/modules/azurerm/dam-base-instance/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/db-with-agent/README.md b/modules/azurerm/db-with-agent/README.md
index 6a7eb8d35..ad56803a8 100644
--- a/modules/azurerm/db-with-agent/README.md
+++ b/modules/azurerm/db-with-agent/README.md
@@ -81,4 +81,4 @@ module "db_with_agent" {
## Additional Information
For more information about the DAM Agent and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v14.11-database-activity-monitoring-user-guide/page/378.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
diff --git a/modules/azurerm/db-with-agent/versions.tf b/modules/azurerm/db-with-agent/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/db-with-agent/versions.tf
+++ b/modules/azurerm/db-with-agent/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/dra-admin/README.md b/modules/azurerm/dra-admin/README.md
new file mode 100644
index 000000000..b1b1e9248
--- /dev/null
+++ b/modules/azurerm/dra-admin/README.md
@@ -0,0 +1,86 @@
+# DSF DRA Admin
+[![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags)
+
+This Terraform module provisions a DSF DRA Admin instance on Azure as a Virtual Machine.
+
+## Requirements
+* Terraform, refer to [versions.tf](versions.tf) for supported versions.
+* An Azure account.
+* DRA image or VHD located in an Azure Storage account. [Request access to vhd here](https://docs.google.com/forms/d/e/1FAIpQLSfCBUGHN04u2gK8IoxuHl4TLooBWUl7cK7ihS9Q5ZHwafNBHA/viewform).
+There is an option to provide details for either the image or the VHD. When supplying the VHD details, Terraform will use them to create the image and this image will be utilized to create the Virtual Machine.
+
+**NOTE:** In case you are not yet an Imperva customer, [please contact our team](https://www.imperva.com/contact-us/).
+
+## Resources Provisioned
+This Terraform module provisions several resources on Azure. These resources include:
+* A Virtual Machine instance for running the DSF Admin Server software.
+* Security group rules to allow the required network access to and from the DSF Admin Server instance.
+* An Azure Key Vault that hold the passwords.
+* An Azure network interface.
+
+The Virtual Machine instance provide the computing resource needed to run the DSF Admin Server software. The security group rules controls the inbound and outbound traffic to the instance. The Vault is used for encrypting sensitive data (passwords).
+
+## Inputs
+
+The following input variables are **required**:
+
+* `resource_group`: Resource group to provision all the resources into
+* `subnet_id`: The ID of the subnet in which to launch the DSF Admin Server instance in
+* `ssh_public_key`: SSH public key to access the DSF Admin Server instance
+* `image_vhd_details`: Image or VHD details to create the Virtual Machine from. There is an option to provide details for either the image or the VHD. When supplying the VHD details, Terraform will use them to create the image which will be utilized to create the Virtual Machine
+* `admin_ssh_password`: Password to be used to SSH to the Admin Server instance
+* `admin_registration_password`: Password to be used to register Analytics Server to Admin Server
+
+Refer to [variables.tf](variables.tf) for additional variables with default values and additional info.
+
+## Outputs
+
+Refer to [outputs](outputs.tf) or https://registry.terraform.io/modules/imperva/dsf-dra-admin/aws/latest?tab=outputs.
+
+## Usage
+
+To use this module, add the following to your Terraform configuration:
+
+```
+provider "azurerm" {
+ features {}
+}
+
+module "dra_admin" {
+ source = "imperva/dsf-dra-admin/azurerm"
+
+ resource_group = azurerm_resource_group.example.name
+ subnet_id = azurerm_subnet.example.id
+ ssh_public_key = var.ssh_public_key
+
+ image_vhd_details = {
+ image = {
+ resource_group_name = var.image_details.resource_group_name
+ image_id = var.image_details.image_id
+ }
+ }
+ admin_ssh_password = var.admin_ssh_password
+ admin_registration_password = var.admin_registration_password
+ allowed_all_cidrs = var.allowed_all_cidrs
+}
+```
+
+To see a complete example of how to use this module in a DSF deployment with other modules, check out the [examples](../../../examples/azure/) directory.
+
+We recommend using a specific version of the module (and not the latest).
+See available released versions in the main repo README [here](https://github.com/imperva/dsfkit#version-history).
+
+Specify the module's version by adding the version parameter. For example:
+
+```
+module "dsf_dra_admin" {
+ source = "imperva/dsf-dra-admin/azurerm"
+ version = "x.y.z"
+}
+```
+
+## Additional Information
+
+For more information about the DSF DRA Admin and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/z-kb-articles-km/page/4e487f3c.html).
+
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/azurerm/dra-admin/image.tf b/modules/azurerm/dra-admin/image.tf
new file mode 100644
index 000000000..8d7357849
--- /dev/null
+++ b/modules/azurerm/dra-admin/image.tf
@@ -0,0 +1,28 @@
+locals {
+ # vm user
+ vm_default_user = "adminuser"
+ vm_user = var.vm_user != null ? var.vm_user : local.vm_default_user
+
+ create_image_from_vhd = var.image_vhd_details.vhd != null ? true : false
+ use_existing_image = var.image_vhd_details.image != null ? true : false
+
+ image_id = (local.use_existing_image ?
+ "${data.azurerm_subscription.subscription.id}/resourceGroups/${var.image_vhd_details.image.resource_group_name}/providers/Microsoft.Compute/images/${var.image_vhd_details.image.image_id}" :
+ "${azurerm_image.created_image[0].id}")
+}
+
+resource "azurerm_image" "created_image" {
+ count = local.create_image_from_vhd ? 1 : 0
+
+ name = join("-", [var.name, "image"])
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+
+ os_disk {
+ os_type = "Linux"
+ caching = "ReadWrite"
+ os_state = "Generalized"
+ blob_uri = "https://${var.image_vhd_details.vhd.storage_account_name}.blob.core.windows.net/${var.image_vhd_details.vhd.container_name}/${var.image_vhd_details.vhd.path_to_vhd}"
+ }
+ tags = var.tags
+}
diff --git a/modules/azurerm/dra-admin/main.tf b/modules/azurerm/dra-admin/main.tf
new file mode 100644
index 000000000..ca05f0c3e
--- /dev/null
+++ b/modules/azurerm/dra-admin/main.tf
@@ -0,0 +1,132 @@
+locals {
+ security_group_id = length(var.security_group_ids) == 0 ? azurerm_network_security_group.dsf_base_sg.id : var.security_group_ids[0]
+
+ public_ip = azurerm_linux_virtual_machine.vm.public_ip_address
+ private_ip = azurerm_linux_virtual_machine.vm.private_ip_address
+
+ install_script = templatefile("${path.module}/setup.tftpl", {
+ vault_name = azurerm_key_vault.vault.name
+ admin_registration_password_secret_name = azurerm_key_vault_secret.admin_analytics_registration_password.name
+ admin_ssh_password_secret_name = azurerm_key_vault_secret.ssh_password.name
+ })
+
+ readiness_script = templatefile("${path.module}/readiness.tftpl", {
+ admin_server_public_ip = try(local.public_ip, local.private_ip)
+ })
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = var.name
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+
+ ip_configuration {
+ name = join("-", [var.name, "nic"])
+ subnet_id = var.subnet_id
+ private_ip_address_allocation = "Dynamic"
+ public_ip_address_id = try(azurerm_public_ip.vm_public_ip[0].id, null)
+ }
+ tags = var.tags
+}
+
+resource "azurerm_network_interface_security_group_association" "nic_sg_association" {
+ network_interface_id = azurerm_network_interface.nic.id
+ network_security_group_id = local.security_group_id
+}
+
+resource "azurerm_public_ip" "vm_public_ip" {
+ count = var.attach_persistent_public_ip ? 1 : 0
+ name = join("-", [var.name, "public", "ip"])
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+ sku = "Standard"
+ allocation_method = "Static"
+ tags = var.tags
+}
+
+resource "azurerm_linux_virtual_machine" "vm" {
+ name = var.name
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+ size = var.instance_size
+ admin_username = local.vm_user
+
+ network_interface_ids = [
+ azurerm_network_interface.nic.id
+ ]
+
+ admin_ssh_key {
+ public_key = var.ssh_public_key
+ username = local.vm_user
+ }
+
+ os_disk {
+ disk_size_gb = var.storage_details.disk_size
+ caching = var.storage_details.volume_caching
+ storage_account_type = var.storage_details.storage_account_type
+ }
+
+ source_image_id = local.image_id
+
+ identity {
+ type = "UserAssigned"
+ identity_ids = [
+ azurerm_user_assigned_identity.user_assigned_identity.id
+ ]
+ }
+ custom_data = base64encode(local.install_script)
+
+ # Ignore changes to the custom_data attribute (Don't replace on userdata change)
+ lifecycle {
+ ignore_changes = [
+ custom_data
+ ]
+ }
+
+ tags = var.tags
+}
+
+resource "azurerm_user_assigned_identity" "user_assigned_identity" {
+ name = var.name
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+}
+
+data "azurerm_subscription" "subscription" {}
+
+resource "azurerm_role_assignment" "vm_identity_role_assignment" {
+ scope = data.azurerm_subscription.subscription.id
+ principal_id = azurerm_user_assigned_identity.user_assigned_identity.principal_id
+ role_definition_name = "Storage Blob Data Reader"
+}
+
+module "statistics" {
+ source = "../../../modules/azurerm/statistics"
+ count = var.send_usage_statistics ? 1 : 0
+
+ deployment_name = var.name
+ product = "DRA"
+ resource_type = "dra-admin"
+ artifact = local.image_id
+ location = var.resource_group.location
+}
+
+resource "null_resource" "readiness" {
+ provisioner "local-exec" {
+ command = local.readiness_script
+ interpreter = ["/bin/bash", "-c"]
+ }
+ depends_on = [
+ azurerm_linux_virtual_machine.vm,
+ module.statistics
+ ]
+}
+
+module "statistics_success" {
+ source = "../../../modules/azurerm/statistics"
+ count = var.send_usage_statistics ? 1 : 0
+
+ id = module.statistics[0].id
+ status = "success"
+ depends_on = [null_resource.readiness]
+}
diff --git a/modules/azurerm/dra-admin/outputs.tf b/modules/azurerm/dra-admin/outputs.tf
new file mode 100644
index 000000000..d9f509076
--- /dev/null
+++ b/modules/azurerm/dra-admin/outputs.tf
@@ -0,0 +1,28 @@
+output "public_ip" {
+ description = "Public elastic IP address of the DSF instance"
+ value = local.public_ip
+}
+
+output "private_ip" {
+ description = "Private IP address of the DSF instance"
+ value = local.private_ip
+}
+
+
+output "ssh_password" {
+ value = var.admin_ssh_password
+}
+
+
+output "admin_image_id" {
+ value = local.image_id
+}
+
+output "display_name" {
+ value = var.name
+}
+
+output "ssh_user" {
+ value = "cbadmin"
+}
+
diff --git a/modules/azurerm/dra-admin/readiness.tftpl b/modules/azurerm/dra-admin/readiness.tftpl
new file mode 100644
index 000000000..339221dc0
--- /dev/null
+++ b/modules/azurerm/dra-admin/readiness.tftpl
@@ -0,0 +1,8 @@
+ while true; do
+ response=$(curl -k -s -o /dev/null -w "%%{http_code}" --request GET 'https://${admin_server_public_ip}:8443/mvc/login')
+ if [ $response -eq 200 ]; then
+ exit 0
+ else
+ sleep 60
+ fi
+ done
\ No newline at end of file
diff --git a/modules/azurerm/dra-admin/secrets.tf b/modules/azurerm/dra-admin/secrets.tf
new file mode 100644
index 000000000..4fdb9ad0c
--- /dev/null
+++ b/modules/azurerm/dra-admin/secrets.tf
@@ -0,0 +1,57 @@
+data "azurerm_client_config" "current" {}
+
+resource "azurerm_key_vault" "vault" {
+ name = trim(substr(var.name, -24, -1), "-")
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+ sku_name = "standard"
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ enabled_for_deployment = true
+ soft_delete_retention_days = 7
+ purge_protection_enabled = false
+ tags = var.tags
+}
+
+resource "azurerm_key_vault_access_policy" "vault_owner_access_policy" {
+ key_vault_id = azurerm_key_vault.vault.id
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ object_id = data.azurerm_client_config.current.object_id
+ secret_permissions = [
+ "Delete",
+ "Get",
+ "Purge",
+ "Set",
+ ]
+}
+
+resource "azurerm_key_vault_access_policy" "vault_vm_access_policy" {
+ key_vault_id = azurerm_key_vault.vault.id
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ object_id = azurerm_user_assigned_identity.user_assigned_identity.principal_id
+
+ secret_permissions = [
+ "Get",
+ ]
+}
+
+resource "azurerm_key_vault_secret" "admin_analytics_registration_password" {
+ name = join("-", [var.name, "admin", "analytics", "registration", "password"])
+ value = var.admin_registration_password
+ key_vault_id = azurerm_key_vault.vault.id
+ content_type = "DRA admin registration password"
+ tags = var.tags
+ depends_on = [
+ azurerm_key_vault_access_policy.vault_owner_access_policy
+ ]
+}
+
+resource "azurerm_key_vault_secret" "ssh_password" {
+ name = join("-", [var.name, "admin", "ssh", "password"])
+ value = var.admin_ssh_password
+ key_vault_id = azurerm_key_vault.vault.id
+ content_type = "DRA Admin ssh password"
+ tags = var.tags
+ depends_on = [
+ azurerm_key_vault_access_policy.vault_owner_access_policy
+ ]
+}
\ No newline at end of file
diff --git a/modules/azurerm/dra-admin/setup.tftpl b/modules/azurerm/dra-admin/setup.tftpl
new file mode 100644
index 000000000..349a001a5
--- /dev/null
+++ b/modules/azurerm/dra-admin/setup.tftpl
@@ -0,0 +1,45 @@
+#!/bin/bash
+set -x
+exec > >(tee /var/log/user-data.log|logger -t user-data ) 2>&1
+echo BEGIN
+
+function install-azure-cli(){
+ rpm --import https://packages.microsoft.com/keys/microsoft.asc
+ dnf install -y https://packages.microsoft.com/config/rhel/9.0/packages-microsoft-prod.rpm
+ dnf install -y https://packages.microsoft.com/config/rhel/8/packages-microsoft-prod.rpm
+ dnf install azure-cli -y
+ az login --identity
+}
+
+date '+%Y-%m-%d %H:%M:%S'
+my_nameserver=$(ifconfig eth0 | grep "inet " | awk '{print $2}')
+my_ip=$(ifconfig eth0 | grep "inet " | awk '{print $2}')
+my_default_gw=$(ip route show | grep default | awk '{print $3}')
+my_cidr=$(awk -F. '{
+ split($0, octets)
+ for (i in octets) {
+ mask += 8 - log(2**8 - octets[i])/log(2);
+ }
+ print mask
+}' <<< $(ifconfig eth0 | grep "inet " | awk '{print $4}'))
+
+sudo su
+export ITP_HOME=/opt/itp
+export CATALINA_HOME=/opt/apache-tomcat
+chmod +x /opt/itp_global_conf/auto_deploy.sh
+sed -i 's/^hosts:.*/hosts: files dns/' /etc/nsswitch.conf
+
+install-azure-cli
+
+admin_registration_password=$(az keyvault secret show --name ${admin_registration_password_secret_name} --vault-name ${vault_name} --query "value" --output tsv)
+ssh_password=$(az keyvault secret show --name ${admin_ssh_password_secret_name} --vault-name ${vault_name} --query "value" --output tsv)
+
+/opt/itp_global_conf/auto_deploy.sh --hostname "$(hostname)" --ip-address "$my_ip" --dns-servers "$my_nameserver" --registration-password "$admin_registration_password" --cidr "$my_cidr" --default-gateway "$my_default_gw" --machine-type "Admin"
+
+echo $ssh_password | passwd --stdin cbadmin
+
+# Allow using this host as jump box
+sed -i 's/.*AllowTcpForwarding.*/AllowTcpForwarding yes/g' /etc/ssh/sshd_config
+systemctl restart sshd
+
+#/opt/itp_global_conf/auto_deploy_cloud.sh --hostname "$(hostname)" --dhcp 1 --ip-address "$my_ip" --dns-servers "$my_nameserver" --registration-password "$password" --cidr "$my_cidr" --default-gateway "$my_default_gw" --machine-type "Admin"
diff --git a/modules/azurerm/dra-admin/sg.tf b/modules/azurerm/dra-admin/sg.tf
new file mode 100644
index 000000000..88667cf1f
--- /dev/null
+++ b/modules/azurerm/dra-admin/sg.tf
@@ -0,0 +1,74 @@
+locals {
+ security_groups_config = [ # https://docs.imperva.com/bundle/v4.11-data-risk-analytics-installation-guide/page/63052.htm
+ {
+ name = ["web", "console"]
+ internet_access = false
+ udp = []
+ tcp = [8443]
+ cidrs = concat(var.allowed_web_console_cidrs, var.allowed_all_cidrs)
+ },
+ {
+ name = ["other"]
+ internet_access = true
+ udp = []
+ tcp = [22]
+ cidrs = concat(var.allowed_ssh_cidrs, var.allowed_all_cidrs)
+ },
+ {
+ name = ["dra", "analytics"]
+ internet_access = false
+ udp = []
+ tcp = [61617, 8443, 8501]
+ cidrs = concat(var.allowed_analytics_cidrs, var.allowed_all_cidrs)
+ },
+ {
+ name = ["hub"]
+ internet_access = false
+ udp = []
+ tcp = [8443, 61617, 8501]
+ cidrs = concat(var.allowed_hub_cidrs, var.allowed_all_cidrs)
+ }
+ ]
+
+ # Skip sg creation if external sg list is given
+ _security_groups_config = length(var.security_group_ids) == 0 ? local.security_groups_config : []
+}
+
+##############################################################################
+### Ingress security group
+##############################################################################
+
+resource "azurerm_network_security_group" "dsf_base_sg" {
+ name = var.name
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+
+ dynamic "security_rule" {
+ for_each = { for idx, config in local._security_groups_config : idx => config if length(config.cidrs) > 0 }
+ content {
+ name = join("-", [var.name, "tcp", join("-", security_rule.value.name)])
+ priority = 100 + security_rule.key
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_ranges = security_rule.value.tcp
+ # Azure doesn't allow overlapping cidr blocks in a single rule. that's what the code below fixes
+ source_address_prefixes = [for k, v in { for v in security_rule.value.cidrs : v => {
+ cidr = v,
+ min_ip_int = (tonumber(split(".", cidrhost(v, 0))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, 0))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, 0))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, 0))[3])
+ max_ip_int = (tonumber(split(".", cidrhost(v, -1))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, -1))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, -1))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, -1))[3])
+ } } : v.cidr if !anytrue([for i in { for v in security_rule.value.cidrs : v => {
+ cidr = v,
+ min_ip_int = (tonumber(split(".", cidrhost(v, 0))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, 0))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, 0))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, 0))[3])
+ max_ip_int = (tonumber(split(".", cidrhost(v, -1))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, -1))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, -1))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, -1))[3])
+ } } : v.max_ip_int <= i.max_ip_int && v.min_ip_int >= i.min_ip_int if v.cidr != i.cidr])]
+ destination_address_prefix = "*"
+ # The below setup is a workaround for "Provider produced inconsistent final plan" error
+ description = ""
+ destination_port_range = ""
+ source_address_prefix = ""
+ }
+ }
+ tags = var.tags
+}
\ No newline at end of file
diff --git a/modules/azurerm/dra-admin/variables.tf b/modules/azurerm/dra-admin/variables.tf
new file mode 100644
index 000000000..62b30fc26
--- /dev/null
+++ b/modules/azurerm/dra-admin/variables.tf
@@ -0,0 +1,235 @@
+variable "name" {
+ type = string
+ default = "imperva-dsf-dra-admin"
+ description = "Friendly name to identify all resources"
+ validation {
+ condition = length(var.name) >= 3
+ error_message = "Name must be at least 3 characters"
+ }
+ validation {
+ condition = can(regex("^\\p{L}.*", var.name))
+ error_message = "Name must start with a letter"
+ }
+}
+
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
+}
+
+variable "resource_group" {
+ type = object({
+ name = string
+ location = string
+ })
+ description = "Resource group details"
+}
+
+
+variable "instance_size" {
+ type = string
+ default = "Standard_E4as_v5" # 4 cores & 32GB ram
+ description = "VM instance size for the Admin Server"
+}
+
+variable "ssh_public_key" {
+ type = string
+ description = "SSH public key to access machine"
+ nullable = false
+}
+
+variable "image_vhd_details" {
+ type = object({
+ image = optional(object({
+ resource_group_name = string
+ image_id = string
+ }))
+ vhd = optional(object({
+ path_to_vhd = string
+ storage_account_name = string
+ container_name = string
+ }))
+ })
+ description = "Image or VHD details for the Admin Server"
+ default = null
+
+ validation {
+ condition = try((var.image_vhd_details.image != null && var.image_vhd_details.vhd == null || (var.image_vhd_details.image == null && var.image_vhd_details.vhd != null)), false)
+ error_message = "Either one of 'image' or 'vhd' should be specified but not both."
+ }
+ validation {
+ condition = var.image_vhd_details.image == null || try(var.image_vhd_details.image.resource_group_name != null && var.image_vhd_details.image.image_id != null, false)
+ error_message = "Image value must either be null or specified for all"
+ }
+ validation {
+ condition = var.image_vhd_details.vhd == null || try(var.image_vhd_details.vhd.path_to_vhd != null && var.image_vhd_details.vhd.storage_account_name != null && var.image_vhd_details.vhd.container_name != null, false)
+ error_message = "VHD value must either be null or specified for all"
+ }
+}
+
+variable "vm_user" {
+ type = string
+ default = "cbadmin"
+ description = "VM user to use for SSH. Keep empty to use the default user."
+}
+
+variable "admin_registration_password" {
+ type = string
+ description = "Password to be used to register Analytics Server to Admin Server"
+
+ validation {
+ condition = length(var.admin_registration_password) >= 7
+ error_message = "Password must be at least 7 characters long"
+ }
+
+ validation {
+ condition = can(regex("[A-Z]", var.admin_registration_password))
+ error_message = "Password must contain at least one uppercase letter"
+ }
+
+ validation {
+ condition = can(regex("[a-z]", var.admin_registration_password))
+ error_message = "Password must contain at least one lowercase letter"
+ }
+
+ validation {
+ condition = can(regex("\\d", var.admin_registration_password))
+ error_message = "Password must contain at least one digit"
+ }
+
+ validation {
+ condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.admin_registration_password))
+ error_message = "Password must contain at least one of the following special characters: *+=#%^:/~.,[]_"
+ }
+}
+
+variable "admin_ssh_password" {
+ type = string
+ description = "Password to be used to ssh to the Admin Server"
+
+ validation {
+ condition = length(var.admin_ssh_password) >= 7
+ error_message = "Password must be at least 7 characters long"
+ }
+
+ validation {
+ condition = can(regex("[A-Z]", var.admin_ssh_password))
+ error_message = "Password must contain at least one uppercase letter"
+ }
+
+ validation {
+ condition = can(regex("[a-z]", var.admin_ssh_password))
+ error_message = "Password must contain at least one lowercase letter"
+ }
+
+ validation {
+ condition = can(regex("\\d", var.admin_ssh_password))
+ error_message = "Password must contain at least one digit"
+ }
+
+ validation {
+ condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.admin_ssh_password))
+ error_message = "Password must contain at least one of the following special characters: *+=#%^:/~.,[]_"
+ }
+}
+
+variable "subnet_id" {
+ type = string
+ description = "Subnet id for the Admin Server"
+ validation {
+ condition = can(regex(".*Microsoft.Network/virtualNetworks/.*/subnets/.*", var.subnet_id))
+ error_message = "The variable must match the pattern 'Microsoft.Network/virtualNetworks//subnets/'"
+ }
+}
+
+variable "security_group_ids" {
+ type = list(string)
+ description = "Security group Ids to attach to the instance. If provided, no security groups are created and all allowed_*_cidrs variables are ignored."
+ validation {
+ # validate if true
+ condition = length(var.security_group_ids) == 0 || length(var.security_group_ids) == 1
+ error_message = "Can't contain more than a single element"
+ }
+ validation {
+ condition = alltrue([for item in var.security_group_ids : can(regex(".*Microsoft.Network/networkSecurityGroups/.*", item))])
+ error_message = "One or more of the security group ids list is invalid. Each item should match the pattern '.*Microsoft.Network/networkSecurityGroups/"
+ }
+ default = []
+}
+
+variable "allowed_analytics_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing the Analytics Server to access the DSF Admin Server instance"
+ validation {
+ condition = alltrue([for item in var.allowed_analytics_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_ssh_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing ssh access"
+ validation {
+ condition = alltrue([for item in var.allowed_ssh_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_web_console_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing web console access"
+ validation {
+ condition = alltrue([for item in var.allowed_web_console_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_hub_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing hub access"
+ validation {
+ condition = alltrue([for item in var.allowed_hub_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_all_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing access to all relevant protocols (E.g vpc cidr range)"
+ validation {
+ condition = alltrue([for item in var.allowed_all_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "attach_persistent_public_ip" {
+ type = bool
+ default = true
+ description = "Create and attach an Elastic public IP for the instance. If false, a dynamic public IP is used. Relevant only if the DRA Admin is in a public subnet (ignored if in a private subnet). Currently, due to a DRA limitation, must only be true."
+}
+
+variable "storage_details" {
+ type = object({
+ disk_size = number
+ volume_caching = string
+ storage_account_type = string
+ })
+ description = "Compute instance volume attributes for the Admin Server"
+ default = {
+ disk_size = 260
+ volume_caching = "ReadWrite"
+ storage_account_type = "Standard_LRS"
+ }
+}
+
+variable "send_usage_statistics" {
+ type = bool
+ default = true
+ description = "Set to true to send usage statistics."
+}
diff --git a/modules/azurerm/dra-admin/versions.tf b/modules/azurerm/dra-admin/versions.tf
new file mode 100644
index 000000000..b13422df7
--- /dev/null
+++ b/modules/azurerm/dra-admin/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.3.1, < 1.8.0"
+
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = ">=3.0.0"
+ }
+ }
+}
diff --git a/modules/azurerm/dra-analytics/README.md b/modules/azurerm/dra-analytics/README.md
new file mode 100644
index 000000000..83b53c835
--- /dev/null
+++ b/modules/azurerm/dra-analytics/README.md
@@ -0,0 +1,94 @@
+# DSF DRA Analytics
+[![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags)
+
+This Terraform module provisions a DSF DRA Analytics instance on Azure as a Virtual Machine.
+
+## Requirements
+* Terraform, refer to [versions.tf](versions.tf) for supported versions.
+* An Azure account.
+* DRA image or VHD located in an Azure Storage account. [Request access to vhd here](https://docs.google.com/forms/d/e/1FAIpQLSfCBUGHN04u2gK8IoxuHl4TLooBWUl7cK7ihS9Q5ZHwafNBHA/viewform).
+ There is an option to provide details for either the image or the VHD. When supplying the VHD details, Terraform will use them to create the image and this image will be utilized to create the Virtual Machine.
+
+**NOTE:** In case you are not yet an Imperva customer, [please contact our team](https://www.imperva.com/contact-us/).
+
+## Resources Provisioned
+This Terraform module provisions several resources on Azure. These resources include:
+* A Virtual Machine instance for running the DSF Analytics Server software.
+* Security group rules to allow the required network access to and from the DSF Analytics Server instance.
+* An Azure Key Vault that hold the passwords.
+* An Azure network interface.
+
+The Virtual Machine instance provide the computing resource needed to run the DSF Analytics Server software. The security group rules controls the inbound and outbound traffic to the instance. The Vault is used for encrypting sensitive data (passwords).
+
+## Inputs
+
+The following input variables are **required**:
+
+* `resource_group`: Resource group to provision all the resources into
+* `subnet_id`: The ID of the subnet in which to launch the DSF Analytics Server instance in
+* `ssh_public_key`: SSH public key to access the DSF Analytics Server instance
+* `image_vhd_details`: Image or VHD details to create the Virtual Machine from. There is an option to provide details for either the image or the VHD. When supplying the VHD details, Terraform will use them to create the image which will be utilized to create the Virtual Machine
+* `admin_registration_password`: Password to be used to register Analytics Server to Admin Server
+* `analytics_ssh_password`: Password to be used to SSH to the Analytics Server instance
+* `archiver_password`: Password to be used to upload archive files for the Analysis Server
+* `admin_server_private_ip`: Private IP of the Admin Server (Used for registration)
+* `admin_server_public_ip`: Public IP of the Admin Server (Used for verifying the Analytics Server is launched successfully)
+
+Refer to [variables.tf](variables.tf) for additional variables with default values and additional info.
+
+## Outputs
+
+Refer to [outputs](outputs.tf) or https://registry.terraform.io/modules/imperva/dsf-dra-analytics/aws/latest?tab=outputs.
+
+## Usage
+
+To use this module, add the following to your Terraform configuration:
+
+```
+provider "azurerm" {
+ features {}
+}
+
+module "dra_analytics" {
+ source = "imperva/dsf-dra-analytics/azurerm"
+
+ resource_group = azurerm_resource_group.example.name
+ subnet_id = azurerm_subnet.example.id
+ ssh_public_key = var.ssh_public_key
+
+ image_vhd_details = {
+ image = {
+ resource_group_name = var.image_details.resource_group_name
+ image_id = var.image_details.image_id
+ }
+ }
+ admin_registration_password = var.admin_registration_password
+ analytics_ssh_password = var.analytics_ssh_password
+ archiver_password = var.archiver_password
+
+ admin_server_public_ip = module.dra_admin.public_ip
+ admin_server_private_ip = module.dra_admin.private_ip
+
+ allowed_all_cidrs = var.allowed_all_cidrs
+}
+```
+
+To see a complete example of how to use this module in a DSF deployment with other modules, check out the [examples](../../../examples/azure/) directory.
+
+We recommend using a specific version of the module (and not the latest).
+See available released versions in the main repo README [here](https://github.com/imperva/dsfkit#version-history).
+
+Specify the module's version by adding the version parameter. For example:
+
+```
+module "dsf_dra_analytics" {
+ source = "imperva/dsf-dra-analytics/azure"
+ version = "x.y.z"
+}
+```
+
+## Additional Information
+
+For more information about the DSF DRA Analytics and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/z-kb-articles-km/page/4e487f3c.html).
+
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/azurerm/dra-analytics/image.tf b/modules/azurerm/dra-analytics/image.tf
new file mode 100644
index 000000000..8d7357849
--- /dev/null
+++ b/modules/azurerm/dra-analytics/image.tf
@@ -0,0 +1,28 @@
+locals {
+ # vm user
+ vm_default_user = "adminuser"
+ vm_user = var.vm_user != null ? var.vm_user : local.vm_default_user
+
+ create_image_from_vhd = var.image_vhd_details.vhd != null ? true : false
+ use_existing_image = var.image_vhd_details.image != null ? true : false
+
+ image_id = (local.use_existing_image ?
+ "${data.azurerm_subscription.subscription.id}/resourceGroups/${var.image_vhd_details.image.resource_group_name}/providers/Microsoft.Compute/images/${var.image_vhd_details.image.image_id}" :
+ "${azurerm_image.created_image[0].id}")
+}
+
+resource "azurerm_image" "created_image" {
+ count = local.create_image_from_vhd ? 1 : 0
+
+ name = join("-", [var.name, "image"])
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+
+ os_disk {
+ os_type = "Linux"
+ caching = "ReadWrite"
+ os_state = "Generalized"
+ blob_uri = "https://${var.image_vhd_details.vhd.storage_account_name}.blob.core.windows.net/${var.image_vhd_details.vhd.container_name}/${var.image_vhd_details.vhd.path_to_vhd}"
+ }
+ tags = var.tags
+}
diff --git a/modules/azurerm/dra-analytics/main.tf b/modules/azurerm/dra-analytics/main.tf
new file mode 100644
index 000000000..5e6ef0cd5
--- /dev/null
+++ b/modules/azurerm/dra-analytics/main.tf
@@ -0,0 +1,127 @@
+locals {
+ security_group_id = length(var.security_group_ids) == 0 ? azurerm_network_security_group.dsf_base_sg.id : var.security_group_ids[0]
+
+ incoming_folder_path = "/opt/itpba/incoming"
+
+ public_ip = azurerm_linux_virtual_machine.vm.public_ip_address
+ private_ip = azurerm_linux_virtual_machine.vm.private_ip_address
+
+ install_script = templatefile("${path.module}/setup.tftpl", {
+ vault_name = azurerm_key_vault.vault.name
+ analytics_archiver_password_name = azurerm_key_vault_secret.analytics_archiver_password.name
+ admin_analytics_registration_password_name = azurerm_key_vault_secret.admin_analytics_registration_password.name
+ analytics_ssh_password_secret_name = azurerm_key_vault_secret.ssh_password.name
+ archiver_user = var.archiver_user
+ archiver_password = var.archiver_password
+ admin_server_private_ip = var.admin_server_private_ip
+ })
+
+ readiness_script = templatefile("${path.module}/readiness.tftpl", {
+ admin_server_public_ip = try(var.admin_server_public_ip, var.admin_server_private_ip)
+ })
+}
+
+resource "azurerm_network_interface" "nic" {
+ name = var.name
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+
+ ip_configuration {
+ name = join("-", [var.name, "nic"])
+ subnet_id = var.subnet_id
+ private_ip_address_allocation = "Dynamic"
+ }
+ tags = var.tags
+}
+
+resource "azurerm_network_interface_security_group_association" "nic_sg_association" {
+ network_interface_id = azurerm_network_interface.nic.id
+ network_security_group_id = local.security_group_id
+}
+
+resource "azurerm_linux_virtual_machine" "vm" {
+ name = var.name
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+ size = var.instance_size
+ admin_username = local.vm_user
+
+ network_interface_ids = [
+ azurerm_network_interface.nic.id
+ ]
+
+ admin_ssh_key {
+ public_key = var.ssh_public_key
+ username = local.vm_user
+ }
+
+ os_disk {
+ disk_size_gb = var.storage_details.disk_size
+ caching = var.storage_details.volume_caching
+ storage_account_type = var.storage_details.storage_account_type
+ }
+
+ source_image_id = local.image_id
+
+ identity {
+ type = "UserAssigned"
+ identity_ids = [
+ azurerm_user_assigned_identity.user_assigned_identity.id
+ ]
+ }
+ custom_data = base64encode(local.install_script)
+
+ # Ignore changes to the custom_data attribute (Don't replace on userdata change)
+ lifecycle {
+ ignore_changes = [
+ custom_data
+ ]
+ }
+
+ tags = var.tags
+}
+
+resource "azurerm_user_assigned_identity" "user_assigned_identity" {
+ name = var.name
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+}
+
+data "azurerm_subscription" "subscription" {}
+
+resource "azurerm_role_assignment" "vm_identity_role_assignment" {
+ scope = data.azurerm_subscription.subscription.id
+ principal_id = azurerm_user_assigned_identity.user_assigned_identity.principal_id
+ role_definition_name = "Storage Blob Data Reader"
+}
+
+module "statistics" {
+ source = "../../../modules/azurerm/statistics"
+ count = var.send_usage_statistics ? 1 : 0
+
+ deployment_name = var.name
+ product = "DRA"
+ resource_type = "dra-analytics"
+ artifact = local.image_id
+ location = var.resource_group.location
+}
+
+resource "null_resource" "readiness" {
+ provisioner "local-exec" {
+ command = local.readiness_script
+ interpreter = ["/bin/bash", "-c"]
+ }
+ depends_on = [
+ azurerm_linux_virtual_machine.vm,
+ module.statistics
+ ]
+}
+
+module "statistics_success" {
+ source = "../../../modules/azurerm/statistics"
+ count = var.send_usage_statistics ? 1 : 0
+
+ id = module.statistics[0].id
+ status = "success"
+ depends_on = [null_resource.readiness]
+}
diff --git a/modules/azurerm/dra-analytics/outputs.tf b/modules/azurerm/dra-analytics/outputs.tf
new file mode 100644
index 000000000..1445681a8
--- /dev/null
+++ b/modules/azurerm/dra-analytics/outputs.tf
@@ -0,0 +1,33 @@
+output "public_ip" {
+ description = "Public elastic IP address of the DSF base instance"
+ value = local.public_ip
+ depends_on = [
+ azurerm_network_interface_security_group_association.nic_sg_association,
+ azurerm_role_assignment.vm_identity_role_assignment
+ ]
+}
+
+output "private_ip" {
+ description = "Private IP address of the DSF base instance"
+ value = local.private_ip
+ depends_on = [
+ azurerm_network_interface_security_group_association.nic_sg_association,
+ azurerm_role_assignment.vm_identity_role_assignment
+ ]
+}
+
+output "archiver_user" {
+ value = var.archiver_user
+}
+
+output "archiver_password" {
+ value = var.archiver_password
+}
+
+output "incoming_folder_path" {
+ value = local.incoming_folder_path
+}
+
+output "ssh_user" {
+ value = "cbadmin"
+}
diff --git a/modules/azurerm/dra-analytics/readiness.tftpl b/modules/azurerm/dra-analytics/readiness.tftpl
new file mode 100644
index 000000000..339221dc0
--- /dev/null
+++ b/modules/azurerm/dra-analytics/readiness.tftpl
@@ -0,0 +1,8 @@
+ while true; do
+ response=$(curl -k -s -o /dev/null -w "%%{http_code}" --request GET 'https://${admin_server_public_ip}:8443/mvc/login')
+ if [ $response -eq 200 ]; then
+ exit 0
+ else
+ sleep 60
+ fi
+ done
\ No newline at end of file
diff --git a/modules/azurerm/dra-analytics/secrets.tf b/modules/azurerm/dra-analytics/secrets.tf
new file mode 100644
index 000000000..5b61f128c
--- /dev/null
+++ b/modules/azurerm/dra-analytics/secrets.tf
@@ -0,0 +1,68 @@
+data "azurerm_client_config" "current" {}
+
+resource "azurerm_key_vault" "vault" {
+ name = trim(substr(var.name, -24, -1), "-")
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+ sku_name = "standard"
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ enabled_for_deployment = true
+ soft_delete_retention_days = 7
+ purge_protection_enabled = false
+ tags = var.tags
+}
+
+resource "azurerm_key_vault_access_policy" "vault_vm_access_policy" {
+ key_vault_id = azurerm_key_vault.vault.id
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ object_id = azurerm_user_assigned_identity.user_assigned_identity.principal_id
+
+ secret_permissions = [
+ "Get",
+ ]
+}
+
+resource "azurerm_key_vault_access_policy" "vault_owner_access_policy" {
+ key_vault_id = azurerm_key_vault.vault.id
+ tenant_id = data.azurerm_client_config.current.tenant_id
+ object_id = data.azurerm_client_config.current.object_id
+ secret_permissions = [
+ "Delete",
+ "Get",
+ "Purge",
+ "Set",
+ ]
+}
+
+resource "azurerm_key_vault_secret" "analytics_archiver_password" {
+ name = join("-", [var.name, "analytics", "archiver", "password"])
+ value = var.archiver_password
+ key_vault_id = azurerm_key_vault.vault.id
+ content_type = "DRA Analytics archiver password"
+ tags = var.tags
+ depends_on = [
+ azurerm_key_vault_access_policy.vault_owner_access_policy
+ ]
+}
+
+resource "azurerm_key_vault_secret" "admin_analytics_registration_password" {
+ name = join("-", [var.name, "admin", "analytics", "registration", "password"])
+ value = var.admin_registration_password
+ key_vault_id = azurerm_key_vault.vault.id
+ content_type = "DRA admin registration password"
+ tags = var.tags
+ depends_on = [
+ azurerm_key_vault_access_policy.vault_owner_access_policy
+ ]
+}
+
+resource "azurerm_key_vault_secret" "ssh_password" {
+ name = join("-", [var.name, "analytics", "ssh", "password"])
+ value = var.analytics_ssh_password
+ key_vault_id = azurerm_key_vault.vault.id
+ content_type = "DRA Analytics ssh password"
+ tags = var.tags
+ depends_on = [
+ azurerm_key_vault_access_policy.vault_owner_access_policy
+ ]
+}
\ No newline at end of file
diff --git a/modules/azurerm/dra-analytics/setup.tftpl b/modules/azurerm/dra-analytics/setup.tftpl
new file mode 100644
index 000000000..4613c909b
--- /dev/null
+++ b/modules/azurerm/dra-analytics/setup.tftpl
@@ -0,0 +1,53 @@
+#!/bin/bash
+set -x
+exec > >(tee /var/log/user-data.log|logger -t user-data ) 2>&1
+echo BEGIN
+
+function wait-for-admin(){
+ while ! nc -z ${admin_server_private_ip} 8443; do
+ sleep 0.1
+ done
+}
+
+function install-azure-cli(){
+ rpm --import https://packages.microsoft.com/keys/microsoft.asc
+ dnf install -y https://packages.microsoft.com/config/rhel/9.0/packages-microsoft-prod.rpm
+ dnf install -y https://packages.microsoft.com/config/rhel/8/packages-microsoft-prod.rpm
+ dnf install azure-cli -y
+ az login --identity
+}
+
+date '+%Y-%m-%d %H:%M:%S'
+my_nameserver=$(ifconfig eth0 | grep "inet " | awk '{print $2}')
+my_ip=$(ifconfig eth0 | grep "inet " | awk '{print $2}')
+my_default_gw=$(ip route show | grep default | awk '{print $3}')
+my_cidr=$(awk -F. '{
+ split($0, octets)
+ for (i in octets) {
+ mask += 8 - log(2**8 - octets[i])/log(2);
+ }
+ print mask
+}' <<< $(ifconfig eth0 | grep "inet " | awk '{print $4}'))
+sudo su
+export ITP_HOME=/opt/itp
+export ITPBA_HOME=/opt/itpba
+export CATALINA_HOME=/opt/apache-tomcat
+chmod +x /opt/itp_global_conf/auto_deploy.sh
+wait-for-admin
+sed -i 's/^hosts:.*/hosts: files dns/' /etc/nsswitch.conf
+
+install-azure-cli
+
+archiver_password=$(az keyvault secret show --name ${analytics_archiver_password_name} --vault-name ${vault_name} --query "value" --output tsv)
+admin_registration_password=$(az keyvault secret show --name ${admin_analytics_registration_password_name} --vault-name ${vault_name} --query "value" --output tsv)
+ssh_password=$(az keyvault secret show --name ${analytics_ssh_password_secret_name} --vault-name ${vault_name} --query "value" --output tsv)
+
+/opt/itp_global_conf/auto_deploy.sh --hostname "$(hostname)" --ip-address "$my_ip" --dns-servers "$my_nameserver" --registration-password "$admin_registration_password" --cidr "$my_cidr" --default-gateway "$my_default_gw" --machine-type "Analytics" --analytics-user "${archiver_user}" --analytics-password "$archiver_password" --admin-server-ip "${admin_server_private_ip}"
+
+# Allow using ssh to the Analytics server with username and password as a WA, because currently there is an issue with exchanging the keys from the hub to the Analytics server
+sed -i 's/.*PasswordAuthentication.*/PasswordAuthentication yes/g' /etc/ssh/sshd_config
+systemctl restart sshd
+
+echo $ssh_password | passwd --stdin cbadmin
+
+#/opt/itp_global_conf/auto_deploy_cloud.sh --hostname "$(hostname)" --dhcp 1 --ip-address "$my_ip" --dns-servers "$my_nameserver" --registration-password "$admin_registration_password" --cidr "$my_cidr" --default-gateway "$my_default_gw" --machine-type "Analytics" --analytics-user "${archiver_user}" --analytics-password "$archiver_password" --admin-server-ip "${admin_server_private_ip}"
diff --git a/modules/azurerm/dra-analytics/sg.tf b/modules/azurerm/dra-analytics/sg.tf
new file mode 100644
index 000000000..d8b228b10
--- /dev/null
+++ b/modules/azurerm/dra-analytics/sg.tf
@@ -0,0 +1,74 @@
+locals {
+ security_groups_config = [ # https://docs.imperva.com/bundle/v4.11-data-risk-analytics-installation-guide/page/63052.htm
+ {
+ name = ["dra", "admin"]
+ internet_access = false
+ udp = []
+ tcp = [8443]
+ cidrs = concat(var.allowed_admin_cidrs, var.allowed_all_cidrs)
+ },
+ {
+ name = ["other"]
+ internet_access = true
+ udp = []
+ tcp = [22]
+ cidrs = concat(var.allowed_ssh_cidrs, var.allowed_all_cidrs)
+ },
+ {
+ name = ["agent", "gateway"]
+ internet_access = false
+ udp = []
+ tcp = [22]
+ cidrs = concat(var.allowed_agent_gateways_cidrs, var.allowed_all_cidrs)
+ },
+ {
+ name = ["hub"]
+ internet_access = false
+ udp = []
+ tcp = [22]
+ cidrs = concat(var.allowed_hub_cidrs, var.allowed_all_cidrs)
+ }
+ ]
+
+ # Skip sg creation if external sg list is given
+ _security_groups_config = length(var.security_group_ids) == 0 ? local.security_groups_config : []
+}
+
+##############################################################################
+### Ingress security group
+##############################################################################
+
+resource "azurerm_network_security_group" "dsf_base_sg" {
+ name = var.name
+ location = var.resource_group.location
+ resource_group_name = var.resource_group.name
+
+ dynamic "security_rule" {
+ for_each = { for idx, config in local._security_groups_config : idx => config if length(config.cidrs) > 0 }
+ content {
+ name = join("-", [var.name, "tcp", join("-", security_rule.value.name)])
+ priority = 100 + security_rule.key
+ direction = "Inbound"
+ access = "Allow"
+ protocol = "Tcp"
+ source_port_range = "*"
+ destination_port_ranges = security_rule.value.tcp
+ # Azure doesn't allow overlapping cidr blocks in a single rule. that's what the code below fixes
+ source_address_prefixes = [for k, v in { for v in security_rule.value.cidrs : v => {
+ cidr = v,
+ min_ip_int = (tonumber(split(".", cidrhost(v, 0))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, 0))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, 0))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, 0))[3])
+ max_ip_int = (tonumber(split(".", cidrhost(v, -1))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, -1))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, -1))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, -1))[3])
+ } } : v.cidr if !anytrue([for i in { for v in security_rule.value.cidrs : v => {
+ cidr = v,
+ min_ip_int = (tonumber(split(".", cidrhost(v, 0))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, 0))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, 0))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, 0))[3])
+ max_ip_int = (tonumber(split(".", cidrhost(v, -1))[0]) * pow(256, 3)) + (tonumber(split(".", cidrhost(v, -1))[1]) * pow(256, 2)) + (tonumber(split(".", cidrhost(v, -1))[2]) * pow(256, 1)) + tonumber(split(".", cidrhost(v, -1))[3])
+ } } : v.max_ip_int <= i.max_ip_int && v.min_ip_int >= i.min_ip_int if v.cidr != i.cidr])]
+ destination_address_prefix = "*"
+ # The below setup is a workaround for "Provider produced inconsistent final plan" error
+ description = ""
+ destination_port_range = ""
+ source_address_prefix = ""
+ }
+ }
+ tags = var.tags
+}
\ No newline at end of file
diff --git a/modules/azurerm/dra-analytics/variables.tf b/modules/azurerm/dra-analytics/variables.tf
new file mode 100644
index 000000000..b69f4f5bf
--- /dev/null
+++ b/modules/azurerm/dra-analytics/variables.tf
@@ -0,0 +1,248 @@
+variable "name" {
+ type = string
+ default = "imperva-dsf-dra-analytics"
+ description = "Friendly name to identify all resources"
+ validation {
+ condition = length(var.name) >= 3
+ error_message = "Name must be at least 3 characters"
+ }
+ validation {
+ condition = can(regex("^\\p{L}.*", var.name))
+ error_message = "Name must start with a letter"
+ }
+}
+
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
+}
+
+variable "resource_group" {
+ type = object({
+ name = string
+ location = string
+ })
+ description = "Resource group details"
+}
+
+variable "subnet_id" {
+ type = string
+ description = "Subnet id for the Analytics Server"
+ validation {
+ condition = can(regex(".*Microsoft.Network/virtualNetworks/.*/subnets/.*", var.subnet_id))
+ error_message = "The variable must match the pattern 'Microsoft.Network/virtualNetworks//subnets/'"
+ }
+}
+
+variable "instance_size" {
+ type = string
+ default = "Standard_E4as_v5" # 4 cores & 32GB ram
+ description = "VM instance size for the Analytics Server"
+}
+
+variable "storage_details" {
+ type = object({
+ disk_size = number
+ volume_caching = string
+ storage_account_type = string
+ })
+ description = "Compute instance volume attributes for the Analytics Server"
+ default = {
+ disk_size = 1010
+ volume_caching = "ReadWrite"
+ storage_account_type = "Standard_LRS"
+ }
+}
+
+variable "ssh_public_key" {
+ type = string
+ description = "SSH public key to access machine"
+ nullable = false
+}
+
+variable "image_vhd_details" {
+ type = object({
+ image = optional(object({
+ resource_group_name = string
+ image_id = string
+ }))
+ vhd = optional(object({
+ path_to_vhd = string
+ storage_account_name = string
+ container_name = string
+ }))
+ })
+ description = "Image or VHD details for the Admin Server"
+ default = null
+
+ validation {
+ condition = try((var.image_vhd_details.image != null && var.image_vhd_details.vhd == null || (var.image_vhd_details.image == null && var.image_vhd_details.vhd != null)), false)
+ error_message = "Either one of 'image' or 'vhd' should be specified but not both."
+ }
+ validation {
+ condition = var.image_vhd_details.image == null || try(var.image_vhd_details.image.resource_group_name != null && var.image_vhd_details.image.image_id != null, false)
+ error_message = "Image value must either be null or specified for all"
+ }
+ validation {
+ condition = var.image_vhd_details.vhd == null || try(var.image_vhd_details.vhd.path_to_vhd != null && var.image_vhd_details.vhd.storage_account_name != null && var.image_vhd_details.vhd.container_name != null, false)
+ error_message = "VHD value must either be null or specified for all"
+ }
+}
+
+variable "vm_user" {
+ type = string
+ default = "cbadmin"
+ description = "VM user to use for SSH. Keep empty to use the default user."
+}
+
+variable "admin_registration_password" {
+ type = string
+ description = "Password to be used to register Analytics Server to Admin Server"
+
+ validation {
+ condition = length(var.admin_registration_password) >= 7
+ error_message = "Password must be at least 7 characters long"
+ }
+
+ validation {
+ condition = can(regex("[A-Z]", var.admin_registration_password))
+ error_message = "Password must contain at least one uppercase letter"
+ }
+
+ validation {
+ condition = can(regex("[a-z]", var.admin_registration_password))
+ error_message = "Password must contain at least one lowercase letter"
+ }
+
+ validation {
+ condition = can(regex("\\d", var.admin_registration_password))
+ error_message = "Password must contain at least one digit"
+ }
+
+ validation {
+ condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.admin_registration_password))
+ error_message = "Password must contain at least one of the following special characters: *+=#%^:/~.,[]_"
+ }
+}
+
+variable "analytics_ssh_password" {
+ type = string
+ description = "Password to be used to ssh to the Analytics server"
+
+ validation {
+ condition = length(var.analytics_ssh_password) >= 7
+ error_message = "Password must be at least 7 characters long"
+ }
+
+ validation {
+ condition = can(regex("[A-Z]", var.analytics_ssh_password))
+ error_message = "Password must contain at least one uppercase letter"
+ }
+
+ validation {
+ condition = can(regex("[a-z]", var.analytics_ssh_password))
+ error_message = "Password must contain at least one lowercase letter"
+ }
+
+ validation {
+ condition = can(regex("\\d", var.analytics_ssh_password))
+ error_message = "Password must contain at least one digit"
+ }
+
+ validation {
+ condition = can(regex("[*+=#%^:/~.,\\[\\]_]", var.analytics_ssh_password))
+ error_message = "Password must contain at least one of the following special characters: *+=#%^:/~.,[]_"
+ }
+}
+
+variable "archiver_user" {
+ type = string
+ default = "archiver-user"
+ description = "User to be used to upload archive files for the Analytics server"
+}
+
+variable "archiver_password" {
+ type = string
+ description = "Password to be used to upload archive files for analysis"
+}
+
+variable "admin_server_private_ip" {
+ type = string
+ description = "Private IP of the Admin Server"
+}
+
+variable "admin_server_public_ip" {
+ type = string
+ description = "Public IP of the Admin Server"
+}
+
+variable "security_group_ids" {
+ type = list(string)
+ description = "Security group Ids to attach to the instance. If provided, no security groups are created and all allowed_*_cidrs variables are ignored."
+ validation {
+ condition = length(var.security_group_ids) == 0 || length(var.security_group_ids) == 1
+ error_message = "Can't contain more than a single element"
+ }
+ validation {
+ condition = alltrue([for item in var.security_group_ids : can(regex(".*Microsoft.Network/networkSecurityGroups/.*", item))])
+ error_message = "One or more of the security group ids list is invalid. Each item should match the pattern '.*Microsoft.Network/networkSecurityGroups/"
+ }
+ default = []
+}
+
+variable "allowed_admin_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing the Analytics Server to access the DSF Admin Server instance"
+ validation {
+ condition = alltrue([for item in var.allowed_admin_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_hub_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing hub access"
+ validation {
+ condition = alltrue([for item in var.allowed_hub_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_ssh_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing ssh access"
+ validation {
+ condition = alltrue([for item in var.allowed_ssh_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_agent_gateways_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing agent gateway access for legacy deployment"
+ validation {
+ condition = alltrue([for item in var.allowed_agent_gateways_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "allowed_all_cidrs" {
+ type = list(string)
+ description = "List of ingress CIDR patterns allowing access to all relevant protocols (E.g vpc cidr range)"
+ validation {
+ condition = alltrue([for item in var.allowed_all_cidrs : can(cidrnetmask(item))])
+ error_message = "Each item of this list must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+ default = []
+}
+
+variable "send_usage_statistics" {
+ type = bool
+ default = true
+ description = "Set to true to send usage statistics."
+}
diff --git a/modules/azurerm/dra-analytics/versions.tf b/modules/azurerm/dra-analytics/versions.tf
new file mode 100644
index 000000000..b13422df7
--- /dev/null
+++ b/modules/azurerm/dra-analytics/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.3.1, < 1.8.0"
+
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = ">=3.0.0"
+ }
+ }
+}
diff --git a/modules/azurerm/hub/README.md b/modules/azurerm/hub/README.md
index e87327f99..b90bb05c7 100644
--- a/modules/azurerm/hub/README.md
+++ b/modules/azurerm/hub/README.md
@@ -98,4 +98,4 @@ SSH access is required to provision this module. To SSH into the DSF Hub instanc
For more information about the DSF Hub and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v4.12-sonar-user-guide/page/80401.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/azurerm/hub/dra_association.tf b/modules/azurerm/hub/dra_association.tf
new file mode 100644
index 000000000..756f1f062
--- /dev/null
+++ b/modules/azurerm/hub/dra_association.tf
@@ -0,0 +1,37 @@
+locals {
+ # we are using one password for all services and we have one DRA only
+ admin_password = var.dra_details == null ? "" : urlencode(var.dra_details.password)
+ archiver_username = var.dra_details == null ? "" : var.dra_details.archiver_username
+ archiver_password = var.dra_details == null ? "" : urlencode(var.dra_details.archiver_password)
+ admin_address = var.dra_details == null ? "" : var.dra_details.address
+ dra_association_commands = var.dra_details == null ? "" : <<-EOF
+ curl -k --max-time 10000 -X POST -G 'https://127.0.0.1:8443/register-to-dra' -d adminIpOrHostname=${local.admin_address} -d adminRegistrationPassword=${local.admin_password} -d adminReportingServer=true -d analyticsArchiveUsername=${local.archiver_username} -d analyticsArchivePassword=${local.archiver_password} -d resumeDraJobs=true --header "Authorization: Bearer ${module.hub_instance.access_tokens["archiver"].token}"
+ EOF
+}
+
+resource "null_resource" "dra_association" {
+ count = var.dra_details != null ? 1 : 0
+
+ connection {
+ type = "ssh"
+ user = module.hub_instance.ssh_user
+ private_key = file(var.ssh_key.ssh_private_key_file_path)
+ host = var.use_public_ip ? module.hub_instance.public_ip : module.hub_instance.private_ip
+
+ bastion_host = local.bastion_host
+ bastion_private_key = local.bastion_private_key
+ bastion_user = local.bastion_user
+
+ script_path = local.script_path
+ }
+
+ provisioner "remote-exec" {
+ inline = concat([local.dra_association_commands])
+ }
+ depends_on = [
+ module.hub_instance.ready
+ ]
+ triggers = {
+ key = local.dra_association_commands
+ }
+}
diff --git a/modules/azurerm/hub/main.tf b/modules/azurerm/hub/main.tf
index adc61a687..57eb98284 100644
--- a/modules/azurerm/hub/main.tf
+++ b/modules/azurerm/hub/main.tf
@@ -45,7 +45,7 @@ module "hub_instance" {
name = var.friendly_name
subnet_id = var.subnet_id
public_ssh_key = var.ssh_key.ssh_public_key
- instance_type = var.instance_type
+ instance_size = var.instance_size
storage_details = var.storage_details
vm_image = var.vm_image
vm_image_id = var.vm_image_id
diff --git a/modules/azurerm/hub/variables.tf b/modules/azurerm/hub/variables.tf
index 12fac5209..9b8201b45 100644
--- a/modules/azurerm/hub/variables.tf
+++ b/modules/azurerm/hub/variables.tf
@@ -109,10 +109,10 @@ variable "allowed_all_cidrs" {
default = []
}
-variable "instance_type" {
+variable "instance_size" {
type = string
default = "Standard_E8_v5" # 8 cores & 64GB ram
- description = "instance type for the DSF hub"
+ description = "instance size for the DSF hub"
}
variable "storage_details" {
@@ -296,9 +296,29 @@ variable "base_directory" {
description = "The base directory where all Sonar related directories will be installed"
}
+variable "dra_details" {
+ description = "List of the DSF DRA to onboard to Sonar Hub"
+ type = object({
+ name = string
+ address = string
+ password = string
+ archiver_username = string
+ archiver_password = string
+ })
+ validation {
+ condition = (var.dra_details == null || (can(var.dra_details.name) && can(var.dra_details.address)))
+ error_message = "Each DRA Admin must specify name and address"
+ }
+ validation {
+ condition = (var.dra_details == null || (can(var.dra_details.password)) && can(var.dra_details.archiver_username) && can(var.dra_details.archiver_password))
+ error_message = "Each DRA Admin must specify admin password, archiver username and archiver password"
+ }
+ default = null
+}
+
variable "cloud_init_timeout" {
type = number
- default = 900
+ default = 1200
description = "Max time to wait for the machine to start"
}
diff --git a/modules/azurerm/hub/versions.tf b/modules/azurerm/hub/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/hub/versions.tf
+++ b/modules/azurerm/hub/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/mssql-db/README.md b/modules/azurerm/mssql-db/README.md
new file mode 100644
index 000000000..57526784f
--- /dev/null
+++ b/modules/azurerm/mssql-db/README.md
@@ -0,0 +1,57 @@
+# DSF MsSQL
+[![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags)
+
+This Terraform module provisions MsSQL instance and configure audit on it.
+It should be used for poc / pov / lab purposes.
+
+## Requirements
+* Terraform v0.13 and up
+* An Azure account
+* Permissions to create MsSQL, Eventhub and Storage account (for configuring the audit). Required permissions can be found [here](/permissions_samples/azure/OnboardMssqlRdsWithDataPermissions.txt).
+
+## Resources Provisioned
+This Terraform module provisions several resources on AWS to create and onboard the MsSQL with synthetic data on it. These resources include:
+* A MsSQL instance
+* A security group to allow the required network access to and from the MsSQL instance
+
+## Inputs
+
+Refer to [variables.tf](variables.tf) for additional variables with default values and additional info.
+
+## Outputs
+
+Refer to [outputs.tf](outputs.tf) for additional variables with default values and additional info.
+
+## Usage
+
+To use this module, add the following to your Terraform configuration:
+
+```
+provider "azurerm" {
+ features {
+ }
+}
+
+module "mssql" {
+ source = "imperva/dsf-poc-db-onboarder/azurerm//modules/mssql-db"
+ resource_group = var.resource_group
+}
+```
+
+To see a complete example of how to use this module in a DSF deployment with other modules, check out the [examples](../../../examples/) directory.
+
+We recommend using a specific version of the module (and not the latest).
+See available released versions in the main repo README [here](https://github.com/imperva/dsfkit#version-history).
+
+Specify the module's version by adding the version parameter. For example:
+
+```
+module "dsf_mssql" {
+ source = "imperva/dsf-poc-db-onboarder/azurerm//modules/mssql-db"
+ version = "x.y.z"
+}
+```
+
+## Additional Information
+
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/azurerm/mssql-db/main.tf b/modules/azurerm/mssql-db/main.tf
new file mode 100644
index 000000000..a3a48c1e7
--- /dev/null
+++ b/modules/azurerm/mssql-db/main.tf
@@ -0,0 +1,113 @@
+resource "random_password" "db_password" {
+ length = 15
+ special = true
+}
+
+resource "random_pet" "db_id" {}
+
+locals {
+ db_username = var.username
+ db_password = length(var.password) > 0 ? var.password : random_password.db_password.result
+ db_identifier = length(var.identifier) > 0 ? var.identifier : "edsf-db-demo-${random_pet.db_id.id}"
+ db_address = "${local.db_identifier}.database.windows.net"
+ server_name = local.db_identifier
+ database_name = local.db_identifier
+ eventhub_ns_name = local.db_identifier
+ eventhub_name = local.db_identifier
+}
+
+resource "azurerm_mssql_server" "server" {
+ name = local.server_name
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+ version = "12.0"
+ administrator_login = local.db_username
+ administrator_login_password = local.db_password
+ minimum_tls_version = "1.2"
+
+ tags = var.tags
+}
+
+resource "azurerm_mssql_firewall_rule" "allow_inbound" {
+ count = length(var.security_group_ingress_cidrs)
+
+ name = join("-", [local.server_name, count.index])
+ server_id = azurerm_mssql_server.server.id
+ start_ip_address = cidrhost(var.security_group_ingress_cidrs[count.index], 0)
+ end_ip_address = cidrhost(var.security_group_ingress_cidrs[count.index], -1)
+}
+
+resource "azurerm_mssql_database" "db" {
+ name = local.database_name
+ server_id = azurerm_mssql_server.server.id
+ sample_name = "AdventureWorksLT"
+ tags = var.tags
+}
+
+data "azurerm_subscription" "current" {}
+
+resource "azurerm_mssql_server_extended_auditing_policy" "policy" {
+ server_id = azurerm_mssql_server.server.id
+ storage_endpoint = azurerm_storage_account.sa.primary_blob_endpoint
+ storage_account_access_key = azurerm_storage_account.sa.primary_access_key
+ storage_account_access_key_is_secondary = false
+ retention_in_days = 0
+
+ enabled = true
+ log_monitoring_enabled = true
+
+ storage_account_subscription_id = data.azurerm_subscription.current.subscription_id
+}
+
+resource "azurerm_eventhub_namespace" "ns" {
+ name = local.eventhub_ns_name
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+ sku = "Standard"
+ tags = var.tags
+}
+
+resource "azurerm_eventhub" "eventhub" {
+ name = local.eventhub_name
+ namespace_name = azurerm_eventhub_namespace.ns.name
+ resource_group_name = var.resource_group.name
+
+ partition_count = 2
+ message_retention = 1
+}
+
+data "azurerm_eventhub_namespace_authorization_rule" "auth_rule" {
+ name = "RootManageSharedAccessKey"
+ namespace_name = azurerm_eventhub_namespace.ns.name
+ resource_group_name = var.resource_group.name
+}
+
+resource "azurerm_monitor_diagnostic_setting" "settings" {
+ name = "sonar_diagnostic_settings"
+ target_resource_id = "${azurerm_mssql_database.db.server_id}/databases/master" # creates an expilicit dependency on the database
+
+ eventhub_authorization_rule_id = data.azurerm_eventhub_namespace_authorization_rule.auth_rule.id
+ eventhub_name = azurerm_eventhub.eventhub.name
+
+ enabled_log {
+ category = "SQLSecurityAuditEvents"
+ }
+}
+
+resource "azurerm_storage_account" "sa" {
+ name = "sonar${replace(random_pet.db_id.id, "-", "")}"
+ resource_group_name = var.resource_group.name
+ location = var.resource_group.location
+
+ account_tier = "Standard"
+ account_replication_type = "LRS"
+ account_kind = "StorageV2"
+
+ allow_nested_items_to_be_public = false
+
+ identity {
+ type = "SystemAssigned"
+ }
+
+ tags = var.tags
+}
diff --git a/modules/azurerm/mssql-db/output.tf b/modules/azurerm/mssql-db/output.tf
new file mode 100644
index 000000000..a51414789
--- /dev/null
+++ b/modules/azurerm/mssql-db/output.tf
@@ -0,0 +1,39 @@
+output "db_username" {
+ value = "${local.db_username}@${local.db_identifier}"
+}
+
+output "db_password" {
+ value = nonsensitive(local.db_password)
+}
+
+output "db_name" {
+ value = local.db_identifier
+}
+
+output "db_identifier" {
+ value = local.db_identifier
+}
+
+output "db_address" {
+ value = local.db_address
+}
+
+output "db_id" {
+ value = azurerm_mssql_database.db.id
+}
+
+output "db_server_id" {
+ value = azurerm_mssql_server.server.id
+}
+
+output "db_engine" {
+ value = "mssql"
+}
+
+output "db_port" {
+ value = 1433
+}
+
+output "sql_cmd" {
+ value = "sqlcmd -S ${local.db_address} --database-name ${local.db_identifier} -U ${local.db_username}@${local.db_identifier} -P'${nonsensitive(local.db_password)}' -Q 'SELECT AddressID, AddressLine1, AddressLine2, City, StateProvince, CountryRegion, PostalCode, rowguid, ModifiedDate FROM SalesLT.Address;'"
+}
diff --git a/modules/azurerm/mssql-db/variables.tf b/modules/azurerm/mssql-db/variables.tf
new file mode 100644
index 000000000..4a094be16
--- /dev/null
+++ b/modules/azurerm/mssql-db/variables.tf
@@ -0,0 +1,54 @@
+variable "resource_group" {
+ type = object({
+ name = string
+ location = string
+ })
+ description = "Resource group details"
+}
+
+variable "username" {
+ type = string
+ description = "Master username must contain 1–16 alphanumeric characters, the first character must be a letter, and name cannot be a word reserved by the database engine."
+ default = "edsf-admin"
+ validation {
+ condition = length(var.username) > 1
+ error_message = "Master username name must be at least 1 characters"
+ }
+}
+
+variable "password" {
+ type = string
+ description = "Master password must contain 8–41 printable ASCII characters, and cannot contain /, \", @, or a space."
+ default = ""
+ validation {
+ condition = length(var.password) == 0 || length(var.password) > 7
+ error_message = "Master password name must be at least 8 characters"
+ }
+}
+
+variable "identifier" {
+ type = string
+ description = "Name of your MsSQL DB from 3 to 63 alphanumeric characters or hyphens, first character must be a letter, must not end with a hyphen or contain two consecutive hyphens."
+ default = ""
+ validation {
+ condition = length(var.identifier) == 0 || length(var.identifier) > 3
+ error_message = "identifier name must be at least 3 characters"
+ }
+}
+
+variable "security_group_ingress_cidrs" {
+ type = list(string)
+ description = "List of allowed ingress cidr ranges for access to the database"
+ validation {
+ condition = alltrue([
+ for address in var.security_group_ingress_cidrs : can(cidrnetmask(address))
+ ]) && (length(var.security_group_ingress_cidrs) > 0)
+ error_message = "Each item of the 'security_group_ingress_cidrs' must be in a valid CIDR block format. For example: [\"10.106.108.0/25\"]"
+ }
+}
+
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
+}
diff --git a/modules/azurerm/mssql-db/versions.tf b/modules/azurerm/mssql-db/versions.tf
new file mode 100644
index 000000000..b13422df7
--- /dev/null
+++ b/modules/azurerm/mssql-db/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.3.1, < 1.8.0"
+
+ required_providers {
+ azurerm = {
+ source = "hashicorp/azurerm"
+ version = ">=3.0.0"
+ }
+ }
+}
diff --git a/modules/azurerm/mx/README.md b/modules/azurerm/mx/README.md
index f228a2ec2..dca01fa1d 100644
--- a/modules/azurerm/mx/README.md
+++ b/modules/azurerm/mx/README.md
@@ -84,4 +84,4 @@ API access is required to provision this module. Please make sure to pass the re
For more information about the DSF MX and its features, refer to the official documentation [here](https://docs.imperva.com/bundle/v14.11-dam-management-server-manager-user-guide/page/10068.htm).
-For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.5).
\ No newline at end of file
+For additional information about DSF deployment using terraform, refer to the main repo README [here](https://github.com/imperva/dsfkit/tree/1.7.8).
\ No newline at end of file
diff --git a/modules/azurerm/mx/configuration.tf b/modules/azurerm/mx/configuration.tf
index fb88285b3..85cf275d8 100644
--- a/modules/azurerm/mx/configuration.tf
+++ b/modules/azurerm/mx/configuration.tf
@@ -3,6 +3,7 @@ locals {
configuration_elements = concat(
local.service_group_configuration,
+ local.dra_configuration,
local.hub_configuration
)
commands = <<-EOF
diff --git a/modules/azurerm/mx/dra.tf b/modules/azurerm/mx/dra.tf
new file mode 100644
index 000000000..1ef21d652
--- /dev/null
+++ b/modules/azurerm/mx/dra.tf
@@ -0,0 +1,76 @@
+locals {
+ dra_action_set = "Send to DRA Behavior Analytics"
+ # todo - currently it is not working because there is a bug in the mx
+ dra_all_events_audit_policy = "CounterBreach for Database - All Events"
+ dra_all_logins_audit_policy = "CounterBreach for Database - Logins Logouts"
+
+ dra_configuration = var.dra_details == null ? [] : [
+ {
+ name = "send_to_dra_action_set"
+ method = "POST"
+ url_path = "SecureSphere/api/v1/conf/actionSets/${local.dra_action_set}"
+ payload = jsonencode({ "type" : "archiving" })
+ },
+ {
+ name = "send_to_dra_action_set_action"
+ method = "POST"
+ url_path = "SecureSphere/api/v1/conf/actionSets/${local.dra_action_set}/scp"
+ payload = jsonencode({
+ "type" : "ScpArchive",
+ "host" : try(var.dra_details.address, null),
+ "port" : try(var.dra_details.port, null),
+ "password" : try(var.dra_details.password, null),
+ "username" : try(var.dra_details.username, null),
+ "remoteDirectory" : try(var.dra_details.remoteDirectory, null),
+ "useAuthenticationKey" : false,
+ "authenticationKeyPath" : " ",
+ "authenticationKeyPassphrase" : " "
+ }
+ )
+ },
+ {
+ name = "dra_all_events_audit_policy"
+ method = "PUT"
+ url_path = "SecureSphere/api/v1/conf/auditPolicies/${local.dra_all_events_audit_policy}"
+ payload = jsonencode({
+ "counterbreach-policy-enabled" : true,
+ "archiving-action-set" : local.dra_action_set,
+ "archiving-settings" : "Default Archiving Settings"
+ "archive-scheduling" : {
+ "occurs" : "recurring",
+ "recurring" : {
+ "frequency" : "daily",
+ "daily" : {
+ "every-number-of-days" : 1
+ },
+ "starting-from" : formatdate("YYYY-MM-DD", timestamp()),
+ "at-time" : "03:00:00"
+ }
+ }
+ }
+ )
+ },
+ {
+ name = "dra_all_logins_audit_policy"
+ method = "PUT"
+ url_path = "SecureSphere/SecureSphere/api/v1/conf/auditPolicies/${local.dra_all_logins_audit_policy}"
+ payload = jsonencode({
+ "counterbreach-policy-enabled" : true,
+ "archiving-action-set" : local.dra_action_set,
+ "archiving-settings" : "Default Archiving Settings"
+ "archive-scheduling" : {
+ "occurs" : "recurring",
+ "recurring" : {
+ "frequency" : "daily",
+ "daily" : {
+ "every-number-of-days" : 1
+ },
+ "starting-from" : formatdate("YYYY-MM-DD", timestamp()),
+ "at-time" : "02:30:00"
+ }
+ }
+ }
+ )
+ }
+ ]
+}
\ No newline at end of file
diff --git a/modules/azurerm/mx/variables.tf b/modules/azurerm/mx/variables.tf
index 48ffcca32..d6f9a7d35 100644
--- a/modules/azurerm/mx/variables.tf
+++ b/modules/azurerm/mx/variables.tf
@@ -211,6 +211,18 @@ variable "create_server_group" {
default = false
}
+variable "dra_details" {
+ description = "Details of the DRA for sending audit logs in the legacy format. More info in https://docs.imperva.com/bundle/v4.14-data-risk-analytics-installation-guide/page/60553.htm"
+ type = object({
+ address = string
+ port = number
+ username = string
+ password = string
+ remoteDirectory = string
+ })
+ default = null
+}
+
variable "hub_details" {
description = "Details of the DSF hub for sending audit logs"
type = object({
diff --git a/modules/azurerm/mx/versions.tf b/modules/azurerm/mx/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/mx/versions.tf
+++ b/modules/azurerm/mx/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/poc-db-onboarder/README.md b/modules/azurerm/poc-db-onboarder/README.md
new file mode 100644
index 000000000..a32fa2b45
--- /dev/null
+++ b/modules/azurerm/poc-db-onboarder/README.md
@@ -0,0 +1,8 @@
+# DSF POC DB Onboarder
+[![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags)
+
+## Sonar versions
+4.11 and up
+
+## Requirements
+* [Terraform version](versions.tf)
diff --git a/modules/azurerm/poc-db-onboarder/main.tf b/modules/azurerm/poc-db-onboarder/main.tf
new file mode 100644
index 000000000..271001535
--- /dev/null
+++ b/modules/azurerm/poc-db-onboarder/main.tf
@@ -0,0 +1,66 @@
+locals {
+ bastion_host = var.hub_proxy_info.proxy_address
+ bastion_private_key = try(file(var.hub_proxy_info.proxy_private_ssh_key_path), "")
+ bastion_user = var.hub_proxy_info.proxy_ssh_user
+ script_path = var.terraform_script_path_folder == null ? null : (join("/", [var.terraform_script_path_folder, "terraform_%RAND%.sh"]))
+
+ server_type_by_engine_map = {
+ "mssql" : "AZURE MS SQL SERVER"
+ }
+}
+
+data "azurerm_subscription" "current" {}
+
+resource "azurerm_role_assignment" "dsf_base_owner_role_assignment" {
+ scope = data.azurerm_subscription.current.id
+ role_definition_name = "Owner"
+ principal_id = var.assignee_role
+}
+
+module "onboard_db_to_dsf" {
+ source = "../../../modules/null/poc-db-onboarder"
+
+ assignee_gw = var.assignee_gw
+
+ usc_access_token = var.usc_access_token
+ enable_audit = var.enable_audit
+
+ database_data = {
+ id = {
+ name = "asset_id"
+ value = var.database_details.db_server_id
+ }
+ name = var.database_details.db_identifier
+ location = var.resource_group.location
+ hostname = var.database_details.db_address
+ port = var.database_details.db_port
+ server_type = local.server_type_by_engine_map[var.database_details.db_engine]
+ }
+
+ cloud_account_data = {
+ id = {
+ name = "asset_id"
+ value = data.azurerm_subscription.current.id
+ }
+ name = data.azurerm_subscription.current.display_name
+ type = "AZURE"
+ connections_data = [
+ {
+ reason = "default"
+ connectionData = {
+ auth_mechanism = "managed_identity"
+ subscription_id = data.azurerm_subscription.current.subscription_id,
+ }
+ }
+ ]
+ }
+
+ database_additional_data = {
+ location = var.resource_group.location
+ }
+
+ hub_info = var.hub_info
+ hub_proxy_info = var.hub_proxy_info
+ terraform_script_path_folder = var.terraform_script_path_folder
+ depends_on = [azurerm_role_assignment.dsf_base_owner_role_assignment]
+}
diff --git a/modules/azurerm/poc-db-onboarder/variables.tf b/modules/azurerm/poc-db-onboarder/variables.tf
new file mode 100644
index 000000000..30422ea63
--- /dev/null
+++ b/modules/azurerm/poc-db-onboarder/variables.tf
@@ -0,0 +1,89 @@
+variable "resource_group" {
+ type = object({
+ name = string
+ location = string
+ })
+ description = "Resource group details"
+}
+
+variable "hub_info" {
+ type = object({
+ hub_ip_address = string
+ hub_private_ssh_key_path = string
+ hub_ssh_user = string
+ })
+
+ description = "Hub info"
+}
+
+variable "hub_proxy_info" {
+ type = object({
+ proxy_address = string
+ proxy_private_ssh_key_path = string
+ proxy_ssh_user = string
+ })
+
+ description = "Proxy address, private key file path and user used for ssh to a private DSF Hub. Keep empty if a proxy is not used."
+ default = {
+ proxy_address = null
+ proxy_private_ssh_key_path = null
+ proxy_ssh_user = null
+ }
+}
+
+variable "assignee_gw" {
+ type = string
+ description = "jsonar uid of the assignee DSF Agentless Gateway"
+ validation {
+ condition = length(var.assignee_gw) >= 35
+ error_message = "Should be uuid in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ }
+}
+
+variable "assignee_role" {
+ type = string
+ description = "Principal ID of the asset assignee"
+}
+
+variable "usc_access_token" {
+ type = string
+ description = "DSF Hub access token with USC scope"
+}
+
+variable "database_details" {
+ type = object({
+ db_server_id = string
+ db_port = number
+ db_engine = string
+ db_identifier = string
+ db_address = string
+ })
+ description = "database details"
+
+ validation {
+ condition = contains(["mssql"], var.database_details.db_engine)
+ error_message = "Allowed values for db engine: 'mssql'"
+ }
+}
+
+variable "terraform_script_path_folder" {
+ type = string
+ description = "Terraform script path folder to create terraform temporary script files on a private DSF node. Use '.' to represent the instance home directory"
+ default = null
+ validation {
+ condition = var.terraform_script_path_folder != ""
+ error_message = "Terraform script path folder cannot be an empty string"
+ }
+}
+
+variable "enable_audit" {
+ type = bool
+ description = "Enable audit for asset"
+ default = true
+}
+
+variable "tags" {
+ description = "A map of tags to add to all resources"
+ type = map(string)
+ default = {}
+}
diff --git a/modules/azurerm/poc-db-onboarder/versions.tf b/modules/azurerm/poc-db-onboarder/versions.tf
new file mode 100644
index 000000000..272db1955
--- /dev/null
+++ b/modules/azurerm/poc-db-onboarder/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.3.1, < 1.8.0"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 4.23.0"
+ }
+ }
+}
diff --git a/modules/azurerm/sonar-base-instance/main.tf b/modules/azurerm/sonar-base-instance/main.tf
index 73430b995..6ff26c78f 100644
--- a/modules/azurerm/sonar-base-instance/main.tf
+++ b/modules/azurerm/sonar-base-instance/main.tf
@@ -13,7 +13,7 @@ locals {
disk_data_iops = var.storage_details.disk_iops_read_write
disk_data_cache = "ReadWrite"
- security_group_id = length(var.security_group_ids) == 0 ? azurerm_network_security_group.dsf_base_sg.id : var.security_group_ids[0]
+ security_group_id = length(var.security_group_ids) == 0 ? azurerm_network_security_group.dsf_base_sg[0].id : var.security_group_ids[0]
}
resource "azurerm_public_ip" "vm_public_ip" {
@@ -44,7 +44,7 @@ resource "azurerm_linux_virtual_machine" "dsf_base_instance" {
name = var.name
resource_group_name = var.resource_group.name
location = var.resource_group.location
- size = var.instance_type
+ size = var.instance_size
admin_username = local.vm_user
custom_data = base64encode(local.install_script)
@@ -94,16 +94,16 @@ resource "azurerm_linux_virtual_machine" "dsf_base_instance" {
}
resource "azurerm_user_assigned_identity" "dsf_base" {
- name = var.name
+ # dots are somewhat common in server names, but aren't allowed in identities
+ name = replace(var.name, ".", "-")
resource_group_name = var.resource_group.name
location = var.resource_group.location
}
-data "azurerm_subscription" "subscription" {
-}
+data "azurerm_subscription" "current" {}
resource "azurerm_role_assignment" "dsf_base_storage_role_assignment" {
- scope = "${data.azurerm_subscription.subscription.id}/resourceGroups/${var.binaries_location.az_resource_group}/providers/Microsoft.Storage/storageAccounts/${var.binaries_location.az_storage_account}/blobServices/default/containers/${var.binaries_location.az_container}"
+ scope = "${data.azurerm_subscription.current.id}/resourceGroups/${var.binaries_location.az_resource_group}/providers/Microsoft.Storage/storageAccounts/${var.binaries_location.az_storage_account}/blobServices/default/containers/${var.binaries_location.az_container}"
role_definition_name = "Storage Blob Data Reader"
principal_id = azurerm_user_assigned_identity.dsf_base.principal_id
}
diff --git a/modules/azurerm/sonar-base-instance/outputs.tf b/modules/azurerm/sonar-base-instance/outputs.tf
index cc5e1291b..b77359ee3 100644
--- a/modules/azurerm/sonar-base-instance/outputs.tf
+++ b/modules/azurerm/sonar-base-instance/outputs.tf
@@ -20,7 +20,7 @@ output "private_ip" {
output "principal_id" {
description = "Principal ID of the DSF node"
- value = azurerm_linux_virtual_machine.dsf_base_instance.identity[0].principal_id
+ value = azurerm_user_assigned_identity.dsf_base.principal_id
}
output "main_node_sonarw_public_key" {
diff --git a/modules/azurerm/sonar-base-instance/secret.tf b/modules/azurerm/sonar-base-instance/secret.tf
index 4b506f68d..9677630e9 100644
--- a/modules/azurerm/sonar-base-instance/secret.tf
+++ b/modules/azurerm/sonar-base-instance/secret.tf
@@ -34,7 +34,13 @@ locals {
data "azurerm_client_config" "current" {}
resource "azurerm_key_vault" "vault" {
- name = trim(substr(var.name, -24, -1), "-")
+ # the vault name has quite a few restrictions:
+ # - alphanumeric only
+ # - 3 to 24 characters only
+ # - must start with a letter and end with a letter or number
+ # for these reason, and to keep it from loosing uniqueness between different names,
+ # we use a base64 hash of the server name to get the maximum entropy possible in 24 characters
+ name = format("a%s", substr(replace(replace(replace(base64sha256(var.name), "+", ""), "/", ""), "=", ""), 0, 23))
location = var.resource_group.location
resource_group_name = var.resource_group.name
enabled_for_deployment = true
@@ -69,7 +75,8 @@ resource "azurerm_key_vault_access_policy" "vault_vm_access_policy" {
}
resource "azurerm_key_vault_secret" "sonarw_private_key_secret" {
- name = join("-", [var.name, "sonarw", "private", "key"])
+ # dots are somewhat common in server names, but aren't allowed in vault secrets
+ name = join("-", [replace(var.name, ".", "-"), "sonarw", "private", "key"])
value = chomp(local.main_node_sonarw_private_key)
key_vault_id = azurerm_key_vault.vault.id
content_type = "sonarw ssh private key"
@@ -80,7 +87,8 @@ resource "azurerm_key_vault_secret" "sonarw_private_key_secret" {
}
resource "azurerm_key_vault_secret" "password_key_secret" {
- name = join("-", [var.name, "password"])
+ # dots are somewhat common in server names, but aren't allowed in vault secrets
+ name = join("-", [replace(var.name, ".", "-"), "password"])
value = chomp(var.password)
key_vault_id = azurerm_key_vault.vault.id
content_type = "password"
@@ -91,8 +99,9 @@ resource "azurerm_key_vault_secret" "password_key_secret" {
}
resource "azurerm_key_vault_secret" "access_tokens" {
- count = length(local.access_tokens)
- name = join("-", [var.name, local.access_tokens[count.index].name, "access", "token"])
+ count = length(local.access_tokens)
+ # dots are somewhat common in server names, but aren't allowed in vault secrets
+ name = join("-", [replace(var.name, ".", "-"), local.access_tokens[count.index].name, "access", "token"])
value = random_uuid.access_tokens[count.index].result
key_vault_id = azurerm_key_vault.vault.id
content_type = "access token"
diff --git a/modules/azurerm/sonar-base-instance/sg.tf b/modules/azurerm/sonar-base-instance/sg.tf
index f1634ea22..647ba1a2f 100644
--- a/modules/azurerm/sonar-base-instance/sg.tf
+++ b/modules/azurerm/sonar-base-instance/sg.tf
@@ -1,19 +1,16 @@
-locals {
- # Skip sg creation if external sg list is given
- _security_groups_config = length(var.security_group_ids) == 0 ? var.security_groups_config : []
-}
-
##############################################################################
### Ingress security group
##############################################################################
resource "azurerm_network_security_group" "dsf_base_sg" {
+ count = length(var.security_group_ids) == 0 ? 1 : 0
+
name = var.name
location = var.resource_group.location
resource_group_name = var.resource_group.name
dynamic "security_rule" {
- for_each = { for idx, config in local._security_groups_config : idx => config if length(config.cidrs) > 0 && length(config.tcp) > 0 }
+ for_each = { for idx, config in var.security_groups_config : idx => config if length(config.cidrs) > 0 && length(config.tcp) > 0 }
content {
name = join("-", [var.name, "tcp", join("-", security_rule.value.name)])
priority = 100 + 2 * security_rule.key
@@ -41,7 +38,7 @@ resource "azurerm_network_security_group" "dsf_base_sg" {
}
dynamic "security_rule" {
- for_each = { for idx, config in local._security_groups_config : idx => config if length(config.cidrs) > 0 && length(config.udp) > 0 }
+ for_each = { for idx, config in var.security_groups_config : idx => config if length(config.cidrs) > 0 && length(config.udp) > 0 }
content {
name = join("-", [var.name, "udp", join("-", security_rule.value.name)])
priority = 100 + 2 * security_rule.key + 1
diff --git a/modules/azurerm/sonar-base-instance/variables.tf b/modules/azurerm/sonar-base-instance/variables.tf
index d9d307bab..862d55b1d 100644
--- a/modules/azurerm/sonar-base-instance/variables.tf
+++ b/modules/azurerm/sonar-base-instance/variables.tf
@@ -105,9 +105,9 @@ variable "vm_user" {
description = "VM user to use for SSH. Keep empty to use the default user."
}
-variable "instance_type" {
+variable "instance_size" {
type = string
- description = "vm type for the DSF base instance"
+ description = "VM size for the DSF base instance"
}
variable "password" {
diff --git a/modules/azurerm/sonar-base-instance/versions.tf b/modules/azurerm/sonar-base-instance/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/sonar-base-instance/versions.tf
+++ b/modules/azurerm/sonar-base-instance/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/azurerm/statistics/versions.tf b/modules/azurerm/statistics/versions.tf
index 2236d91ec..b13422df7 100644
--- a/modules/azurerm/statistics/versions.tf
+++ b/modules/azurerm/statistics/versions.tf
@@ -1,5 +1,5 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
required_providers {
azurerm = {
diff --git a/modules/null/agent-gw-cluster-setup/versions.tf b/modules/null/agent-gw-cluster-setup/versions.tf
index 3ec2b2811..36219a637 100644
--- a/modules/null/agent-gw-cluster-setup/versions.tf
+++ b/modules/null/agent-gw-cluster-setup/versions.tf
@@ -1,3 +1,3 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
}
diff --git a/modules/null/federation/versions.tf b/modules/null/federation/versions.tf
index 3ec2b2811..36219a637 100644
--- a/modules/null/federation/versions.tf
+++ b/modules/null/federation/versions.tf
@@ -1,3 +1,3 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
}
diff --git a/modules/null/hadr/versions.tf b/modules/null/hadr/versions.tf
index 3ec2b2811..36219a637 100644
--- a/modules/null/hadr/versions.tf
+++ b/modules/null/hadr/versions.tf
@@ -1,3 +1,3 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
}
diff --git a/modules/null/poc-db-onboarder/README.md b/modules/null/poc-db-onboarder/README.md
new file mode 100644
index 000000000..a32fa2b45
--- /dev/null
+++ b/modules/null/poc-db-onboarder/README.md
@@ -0,0 +1,8 @@
+# DSF POC DB Onboarder
+[![GitHub tag](https://img.shields.io/github/v/tag/imperva/dsfkit.svg)](https://github.com/imperva/dsfkit/tags)
+
+## Sonar versions
+4.11 and up
+
+## Requirements
+* [Terraform version](versions.tf)
diff --git a/modules/null/poc-db-onboarder/main.tf b/modules/null/poc-db-onboarder/main.tf
new file mode 100644
index 000000000..b71930572
--- /dev/null
+++ b/modules/null/poc-db-onboarder/main.tf
@@ -0,0 +1,84 @@
+locals {
+ bastion_host = var.hub_proxy_info.proxy_address
+ bastion_private_key = try(file(var.hub_proxy_info.proxy_private_ssh_key_path), "")
+ bastion_user = var.hub_proxy_info.proxy_ssh_user
+ script_path = var.terraform_script_path_folder == null ? null : (join("/", [var.terraform_script_path_folder, "terraform_%RAND%.sh"]))
+}
+
+locals {
+ applianceType = "DSF_HUB"
+ admin_email = "admin@email.com"
+
+ cloud_account_data = {
+ data = {
+ applianceId = 1,
+ applianceType = local.applianceType,
+ serverType = var.cloud_account_data.type,
+ gatewayId = var.assignee_gw
+ id = var.cloud_account_data.id.value,
+ assetData = merge({
+ admin_email = local.admin_email,
+ asset_display_name = "Auto Onboarded Account: (${var.cloud_account_data.name})",
+ (var.cloud_account_data.id.name) = var.cloud_account_data.id.value,
+ "Server Host Name" = "${var.cloud_account_data.type}.com",
+ connections = var.cloud_account_data.connections_data
+ },
+ var.cloud_account_additional_data)
+ }
+ }
+
+ database_data = {
+ data : {
+ applianceId : 1,
+ applianceType : local.applianceType,
+ gatewayId : var.assignee_gw,
+ parentAssetId : local.cloud_account_data.data.id,
+ serverType : var.database_data.server_type,
+ id = var.database_data.id.value,
+ assetData : merge({
+ admin_email = local.admin_email,
+ asset_display_name : var.database_data.name,
+ (var.database_data.id.name) = var.database_data.id.value,
+ "Server Host Name" : var.database_data.hostname,
+ "Server Port" : var.database_data.port,
+ "Server IP" : var.database_data.hostname,
+ isMonitored : var.enable_audit
+ },
+ var.database_additional_data
+ )
+ }
+ }
+}
+
+resource "null_resource" "onboard_db_to_dsf" {
+ connection {
+ type = "ssh"
+ user = var.hub_info.hub_ssh_user
+ private_key = file(var.hub_info.hub_private_ssh_key_path)
+ host = var.hub_info.hub_ip_address
+
+ bastion_host = local.bastion_host
+ bastion_private_key = local.bastion_private_key
+ bastion_user = local.bastion_user
+
+ script_path = local.script_path
+ }
+
+ provisioner "remote-exec" {
+ inline = [
+ templatefile("${path.module}/onboard.tftpl", {
+ cloud_account_data = jsonencode(local.cloud_account_data),
+ account_id = urlencode(local.cloud_account_data.data.id)
+ database_asset_data = jsonencode(local.database_data)
+ database_id = urlencode(local.database_data.data.id)
+ usc_access_token = var.usc_access_token
+ enable_audit = var.enable_audit
+ })
+ ]
+ }
+
+ triggers = {
+ db_id = var.database_data.id.name
+# always_run = "${timestamp()}"
+ }
+}
diff --git a/modules/null/poc-db-onboarder/onboard.tftpl b/modules/null/poc-db-onboarder/onboard.tftpl
new file mode 100644
index 000000000..fda62871f
--- /dev/null
+++ b/modules/null/poc-db-onboarder/onboard.tftpl
@@ -0,0 +1,81 @@
+#!/bin/bash
+set -e
+set -x
+
+client_id="terraform-automation"
+reason="Token autogenerated by terraform"
+
+function curl_fail_on_error() {
+ OUTPUT_FILE=$(mktemp)
+ HTTP_CODE=$(curl --silent --output $OUTPUT_FILE --write-out "%%{http_code}" "$@")
+ if [[ $HTTP_CODE -lt 200 || $HTTP_CODE -gt 299 ]] ; then
+ >&2 cat $OUTPUT_FILE; >&2 echo
+ return 22
+ fi
+ cat $OUTPUT_FILE; echo
+ rm $OUTPUT_FILE
+}
+
+usc_access_token=${usc_access_token}
+
+# Add cloud account
+if ! curl --fail -k 'https://127.0.0.1:8443/dsf/api/v1/cloud-accounts/${account_id}' --header "Authorization: Bearer $usc_access_token" &>/dev/null; then
+ echo ********Adding new cloud account********
+ if curl_fail_on_error -k --location --request POST 'https://127.0.0.1:8443/dsf/api/v1/cloud-accounts' \
+ --header "Authorization: Bearer $usc_access_token" \
+ --header 'Content-Type: application/json' \
+ --data '${cloud_account_data}'; then
+ echo ********Cloud account was added successfully********
+ else
+ curl_fail_on_error -k 'https://127.0.0.1:8443/dsf/api/v1/cloud-accounts/${account_id}' --header "Authorization: Bearer $usc_access_token" >/dev/null
+ echo ********Cloud account already exists********
+ fi
+fi
+
+# Add database asset
+if ! curl --fail -k 'https://127.0.0.1:8443/dsf/api/v1/data-sources/${database_id}' --header "Authorization: Bearer $usc_access_token" &>/dev/null; then
+ echo ********Adding new database asset********
+ curl_fail_on_error -k --location --request POST 'https://127.0.0.1:8443/dsf/api/v1/data-sources' \
+ --header "Authorization: Bearer $usc_access_token" \
+ --header 'Content-Type: application/json' \
+ --data '${database_asset_data}'
+%{ if enable_audit ~}
+ echo ********Sleeping 1 minute before syncing gateway and enabling audit logs********
+ sleep 60
+%{ endif ~}
+fi
+
+%{ if enable_audit ~}
+# Syncing cloud account and db with the gateway
+echo ********Syncing cloud account asset with gateway********
+curl_fail_on_error -k --location --request POST 'https://127.0.0.1:8443/dsf/api/v1/cloud-accounts/${account_id}/operations/sync-with-gateway' \
+ --header "Authorization: Bearer $usc_access_token" \
+ --header 'Content-Type: application/json'
+
+echo ********Syncing DB asset with gateway********
+curl_fail_on_error -k --location --request POST 'https://127.0.0.1:8443/dsf/api/v1/data-sources/${database_id}/operations/sync-with-gateway' \
+ --header "Authorization: Bearer $usc_access_token" \
+ --header 'Content-Type: application/json'
+
+# Enable audit
+echo ********Enabling audit on new asset********
+curl_fail_on_error -k --location --request POST 'https://127.0.0.1:8443/dsf/api/v1/data-sources/${database_id}/operations/enable-audit-collection' \
+ --header "Authorization: Bearer $usc_access_token" \
+ --header 'Content-Type: application/json'
+
+# Verify log aggregator is active
+max_sleep=600
+while true; do
+ if [ "$(curl_fail_on_error -k 'https://127.0.0.1:8443/dsf/api/v1/log-aggregators/${database_id}' --header "Authorization: Bearer $usc_access_token" | jq -r .data.auditState)" == "YES" ]; then
+ echo ********Log aggregator is found********
+ break
+ fi
+ sleep 20
+ max_sleep=$(($max_sleep - 20))
+ if [ "$max_sleep" -le 0 ]; then
+ echo ********Log aggregator is NOT found********
+ exit 1
+ fi
+done
+%{ endif ~}
+echo DONE
diff --git a/modules/null/poc-db-onboarder/variables.tf b/modules/null/poc-db-onboarder/variables.tf
new file mode 100644
index 000000000..b9cd509c7
--- /dev/null
+++ b/modules/null/poc-db-onboarder/variables.tf
@@ -0,0 +1,94 @@
+variable "hub_info" {
+ type = object({
+ hub_ip_address = string
+ hub_private_ssh_key_path = string
+ hub_ssh_user = string
+ })
+
+ nullable = false
+ description = "Hub info"
+}
+
+variable "hub_proxy_info" {
+ type = object({
+ proxy_address = string
+ proxy_private_ssh_key_path = string
+ proxy_ssh_user = string
+ })
+
+ description = "Proxy address, private key file path and user used for ssh to a private DSF Hub. Keep empty if a proxy is not used."
+ default = {
+ proxy_address = null
+ proxy_private_ssh_key_path = null
+ proxy_ssh_user = null
+ }
+}
+
+variable "assignee_gw" {
+ type = string
+ description = "jsonar uid of the assignee DSF Agentless Gateway"
+ nullable = false
+ validation {
+ condition = length(var.assignee_gw) >= 35
+ error_message = "Should be uuid in the form of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
+ }
+}
+
+variable "usc_access_token" {
+ type = string
+ description = "DSF Hub access token with USC scope"
+}
+
+variable "cloud_account_data" {
+ type = object({
+ id = object({
+ name = string
+ value = string
+ })
+ name = string
+ type = string
+ connections_data = list(any)
+ })
+ description = "Cloud account data"
+}
+
+variable "cloud_account_additional_data" {
+ type = any
+ description = "Cloud account additinal data"
+ default = {}
+}
+
+variable "database_data" {
+ type = object({
+ server_type = string
+ id = object({
+ name = string
+ value = string
+ })
+ name = string
+ hostname = string
+ port = number
+ })
+}
+
+variable "database_additional_data" {
+ type = any
+ description = "Database additinal data"
+ default = {}
+}
+
+variable "terraform_script_path_folder" {
+ type = string
+ description = "Terraform script path folder to create terraform temporary script files on a private DSF node. Use '.' to represent the instance home directory"
+ default = null
+ validation {
+ condition = var.terraform_script_path_folder != ""
+ error_message = "Terraform script path folder cannot be an empty string"
+ }
+}
+
+variable "enable_audit" {
+ type = bool
+ description = "Enable audit for asset"
+ default = true
+}
diff --git a/modules/null/poc-db-onboarder/versions.tf b/modules/null/poc-db-onboarder/versions.tf
new file mode 100644
index 000000000..36219a637
--- /dev/null
+++ b/modules/null/poc-db-onboarder/versions.tf
@@ -0,0 +1,3 @@
+terraform {
+ required_version = ">= 1.3.1, < 1.8.0"
+}
diff --git a/modules/null/statistics/versions.tf b/modules/null/statistics/versions.tf
index 3ec2b2811..36219a637 100644
--- a/modules/null/statistics/versions.tf
+++ b/modules/null/statistics/versions.tf
@@ -1,3 +1,3 @@
terraform {
- required_version = ">= 1.3.1, < 1.7.0"
+ required_version = ">= 1.3.1, < 1.8.0"
}
diff --git a/modules/sonar_python_upgrader_1_7_5.zip b/modules/sonar_python_upgrader_1_7_8.zip
similarity index 75%
rename from modules/sonar_python_upgrader_1_7_5.zip
rename to modules/sonar_python_upgrader_1_7_8.zip
index 50dbfb968..ec768ba26 100644
Binary files a/modules/sonar_python_upgrader_1_7_5.zip and b/modules/sonar_python_upgrader_1_7_8.zip differ
diff --git a/permissions_samples/azure/GeneralRequiredPermissions.txt b/permissions_samples/azure/GeneralRequiredPermissions.txt
index 3e5ff1292..08f63481c 100644
--- a/permissions_samples/azure/GeneralRequiredPermissions.txt
+++ b/permissions_samples/azure/GeneralRequiredPermissions.txt
@@ -52,7 +52,10 @@
"Microsoft.Authorization/roleAssignments/delete",
"Microsoft.Compute/virtualMachines/extensions/read",
"Microsoft.Compute/virtualMachines/extensions/write",
- "Microsoft.Compute/virtualMachines/extensions/delete"
+ "Microsoft.Compute/virtualMachines/extensions/delete",
+ "Microsoft.Compute/images/read",
+ "Microsoft.Compute/images/write",
+ "Microsoft.Compute/images/delete"
],
"notActions": [],
"dataActions": [],
diff --git a/sed.expr b/sed.expr
index f831cf50c..ffa1f1e08 100644
--- a/sed.expr
+++ b/sed.expr
@@ -11,6 +11,8 @@ s;imperva/dsf-poc-db-onboarder/aws//modules/rds-mysql-db;../../../../modules/aws
s;imperva/dsf-poc-db-onboarder/aws//modules/rds-postgres-db;../../../../modules/aws/rds-postgres-db;
s;imperva/dsf-poc-db-onboarder/aws//modules/rds-mssql-db;../../../../modules/aws/rds-mssql-db;
s;imperva/dsf-poc-db-onboarder/aws;../../../../modules/aws/poc-db-onboarder;
+s;imperva/dsf-poc-db-onboarder/azurerm//modules/mssql-db;../../../../modules/azurerm/mssql-db;
+s;imperva/dsf-poc-db-onboarder/azurerm;../../../../modules/azurerm/poc-db-onboarder;
s;imperva/dsf-mx/aws;../../../../modules/aws/mx;
s;imperva/dsf-agent-gw/aws;../../../../modules/aws/agent-gw;
s;imperva/dsf-db-with-agent/aws;../../../../modules/aws/db-with-agent;
@@ -18,4 +20,9 @@ s;imperva/dsf-dra-admin/aws;../../../../modules/aws/dra-admin;
s;imperva/dsf-dra-analytics/aws;../../../../modules/aws/dra-analytics;
s;imperva/dsf-agent-gw-cluster-setup/null;../../../../modules/null/agent-gw-cluster-setup;
s;imperva/dsf-sonar-upgrader/aws;../../../modules/aws/sonar-upgrader;
+s;imperva/dsf-mx/azurerm;../../../../modules/azurerm/mx;
+s;imperva/dsf-agent-gw/azurerm;../../../../modules/azurerm/agent-gw;
+s;imperva/dsf-db-with-agent/azurerm;../../../../modules/azurerm/db-with-agent;
+s;imperva/dsf-dra-admin/azurerm;../../../../modules/azurerm/dra-admin;
+s;imperva/dsf-dra-analytics/azurerm;../../../../modules/azurerm/dra-analytics;
/latest release tag/c\
\ No newline at end of file