Skip to content

(Distributed) Test installation assistant - 12472342083 - - Launched by @fcaffieri #231

(Distributed) Test installation assistant - 12472342083 - - Launched by @fcaffieri

(Distributed) Test installation assistant - 12472342083 - - Launched by @fcaffieri #231

run-name: (Distributed) Test installation assistant - ${{ github.run_id }} - ${{ inputs.SYSTEMS }} - Launched by @${{ github.actor }}
name: (Distributed) Test installation assistant
on:
pull_request:
paths:
- 'cert_tool/**'
- 'common_functions/**'
- 'config/**'
- 'install_functions/**'
- 'passwords_tool/**'
- 'tests/**'
workflow_dispatch:
inputs:
REPOSITORY:
description: 'Repository environment'
required: true
default: 'pre-release'
type: choice
options:
- staging
- pre-release
WAZUH_INSTALLATION_ASSISTANT_REFERENCE:
description: 'Branch or tag of the wazuh-installation-assistant repository'
required: true
default: '4.11.0'
AUTOMATION_REFERENCE:
description: 'Branch or tag of the wazuh-automation repository'
required: true
default: '4.11.0'
SYSTEMS:
description: 'Operating Systems (list of comma-separated quoted strings enclosed in square brackets)'
required: true
default: '["CentOS_8", "AmazonLinux_2","Ubuntu_22", "RHEL8"]'
type: string
VERBOSITY:
description: 'Verbosity level on playbooks execution'
required: true
default: '-v'
type: choice
options:
- -v
- -vv
- -vvv
- -vvvv
DESTROY:
description: 'Destroy instances after run'
required: true
default: true
type: boolean
env:
COMPOSITE_NAME: "linux-SUBNAME-amd64"
SESSION_NAME: "Installation-Assistant-Test"
REGION: "us-east-1"
TMP_PATH: "/tmp/test"
ANSIBLE_CALLBACK: "yaml"
RESOURCES_PATH: "${{ github.workspace }}"
PKG_REPOSITORY: "${{ inputs.REPOSITORY }}"
TEST_NAME: "test_installation_assistant"
REPOSITORY_URL: "${{ github.server_url }}/${{ github.repository }}.git"
ALLOCATOR_PATH: "/tmp/allocator_instance"
INSTANCE_NAMES: "instance_1 instance_2 instance_3"
permissions:
id-token: write # This is required for requesting the JWT
contents: read # This is required for actions/checkout
jobs:
run-test:
runs-on: ubuntu-latest
strategy:
fail-fast: false # If a job fails, the rest of jobs will not be canceled
matrix:
system: ${{ github.event_name == 'pull_request' && '["Ubuntu_22"]' || fromJson(inputs.SYSTEMS) }} # If the worklflow is executed by a PR, set the OSs
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ inputs.WAZUH_INSTALLATION_ASSISTANT_REFERENCE }}
- name: View parameters
run: echo "${{ toJson(inputs) }}"
- name: Set COMPOSITE_NAME variable
run: |
case "${{ matrix.system }}" in
"CentOS_7")
SUBNAME="centos-7"
;;
"CentOS_8")
SUBNAME="centos-8"
;;
"AmazonLinux_2")
SUBNAME="amazon-2"
;;
"Ubuntu_16")
SUBNAME="ubuntu-16.04"
;;
"Ubuntu_18")
SUBNAME="ubuntu-18.04"
;;
"Ubuntu_20")
SUBNAME="ubuntu-20.04"
;;
"Ubuntu_22")
SUBNAME="ubuntu-22.04"
;;
"RHEL7")
SUBNAME="redhat-7"
;;
"RHEL8")
SUBNAME="redhat-8"
;;
*)
echo "Invalid SYSTEM selection" >&2
exit 1
;;
esac
COMPOSITE_NAME="${COMPOSITE_NAME/SUBNAME/$SUBNAME}"
echo "COMPOSITE_NAME=$COMPOSITE_NAME" >> $GITHUB_ENV
- name: Install python and create virtual environment
run: |
sudo apt-get update
sudo apt-get install -y python3 python3-venv
python3 -m venv testing_venv
source testing_venv/bin/activate
python3 -m pip install --upgrade pip
echo PATH=$PATH >> $GITHUB_ENV
- name: Install Ansible
run: |
pip install ansible-core==2.16
pip install pyyaml
ansible-galaxy collection install community.general
- name: Set up AWS credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: ${{ secrets.AWS_IAM_ROLE }}
role-session-name: ${{ env.SESSION_NAME }}
aws-region: ${{ env.REGION }}
- name: Checkout wazuh/wazuh-automation repository
uses: actions/checkout@v4
with:
repository: wazuh/wazuh-automation
ref: ${{ inputs.AUTOMATION_REFERENCE }}
token: ${{ secrets.GH_CLONE_TOKEN }}
path: wazuh-automation
- name: Install and set allocator requirements
run: pip3 install -r wazuh-automation/deployability/deps/requirements.txt
- name: Allocate instances and create inventory
id: allocator_instance
run: |
instance_names=($INSTANCE_NAMES)
inventory_file="$ALLOCATOR_PATH/inventory"
inventory_indexers="$ALLOCATOR_PATH/inventory_indexers"
inventory_managers="$ALLOCATOR_PATH/inventory_managers"
inventory_dashboards="$ALLOCATOR_PATH/inventory_dashboards"
inventory_common="$ALLOCATOR_PATH/inventory_common"
inventory_file="$ALLOCATOR_PATH/inventory"
mkdir -p $ALLOCATOR_PATH
echo "[indexers]" > $inventory_indexers
echo "[managers]" > $inventory_managers
echo "[dashboards]" > $inventory_dashboards
echo "[all:vars]" > $inventory_common
for i in ${!instance_names[@]}; do
instance_name=${instance_names[$i]}
# Provision instance in parallel
(
python3 wazuh-automation/deployability/modules/allocation/main.py \
--action create --provider aws --size large \
--composite-name ${{ env.COMPOSITE_NAME }} \
--working-dir $ALLOCATOR_PATH --track-output $ALLOCATOR_PATH/track_${instance_name}.yml \
--inventory-output $ALLOCATOR_PATH/inventory_${instance_name}.yml \
--instance-name gha_${{ github.run_id }}_${{ env.TEST_NAME }}_${instance_name} --label-team devops --label-termination-date 1d
instance_id=$(grep '^identifier' $ALLOCATOR_PATH/track_${instance_name}.yml | awk '{print $2}')
private_ip=$(aws ec2 describe-instances \
--instance-ids $instance_id \
--query 'Reservations[*].Instances[*].PrivateIpAddress' \
--output text)
sed 's/: */=/g' $ALLOCATOR_PATH/inventory_${instance_name}.yml > $ALLOCATOR_PATH/inventory_mod_${instance_name}.yml
sed -i 's/-o StrictHostKeyChecking=no/\"-o StrictHostKeyChecking=no\"/g' $ALLOCATOR_PATH/inventory_mod_${instance_name}.yml
source $ALLOCATOR_PATH/inventory_mod_${instance_name}.yml
# Add instance to corresponding group
if [[ $i -eq 0 ]]; then
echo "indexer1 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_indexers
echo "master ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file manager_type=master instance_type=indexer_manager" >> $inventory_managers
echo "ansible_user=$ansible_user" >> $inventory_common
echo "ansible_port=$ansible_port" >> $inventory_common
echo "ansible_ssh_common_args='$ansible_ssh_common_args'" >> $inventory_common
elif [[ $i -eq 1 ]]; then
echo "indexer2 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_indexers
echo "worker1 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file manager_type=worker instance_type=indexer_manager" >> $inventory_managers
else
echo "indexer3 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_indexers
echo "worker2 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file manager_type=worker instance_type=indexer_manager_dashboard" >> $inventory_managers
echo "dashboard ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_dashboards
fi
) &
done
# Wait for all provisioning tasks to complete
wait
# Combine the temporary inventories into one
cat $inventory_indexers > $inventory_file
cat $inventory_managers >> $inventory_file
cat $inventory_dashboards >> $inventory_file
cat $inventory_common >> $inventory_file
- name: Execute provision playbook
run: |
INSTALL_DEPS=true
INSTALL_PYTHON=true
INSTALL_PIP_DEPS=true
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/provision.yml \
-i $ALLOCATOR_PATH/inventory \
-l indexers \
-e "repository=$REPOSITORY_URL" \
-e "reference=${{ github.ref_name }}" \
-e "tmp_path=$TMP_PATH" \
-e "install_deps=$INSTALL_DEPS" \
-e "install_python=$INSTALL_PYTHON" \
-e "install_pip_deps=$INSTALL_PIP_DEPS" \
"${{ inputs.VERBOSITY }}"
- name: Execute certificates generation playbook
run: |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_generate_certificates.yml \
-i $ALLOCATOR_PATH/inventory \
-e "resources_path=$RESOURCES_PATH" \
"${{ inputs.VERBOSITY }}"
- name: Copy certificates to nodes
run: |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_copy_certificates.yml \
-i $ALLOCATOR_PATH/inventory \
-l indexers \
-e "tmp_path=$TMP_PATH" \
-e "resources_path=$RESOURCES_PATH" \
"${{ inputs.VERBOSITY }}"
- name: Execute indexer installation playbook
run: |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_install_indexer.yml \
-i $ALLOCATOR_PATH/inventory \
-l indexers \
-e "tmp_path=$TMP_PATH" \
-e "pkg_repository=$PKG_REPOSITORY" \
"${{ inputs.VERBOSITY }}"
- name: Execute indexer cluster start playbook
run: |
INDEXER_ADMIN_PASSWORD="admin"
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_start_indexer_cluster.yml \
-i $ALLOCATOR_PATH/inventory \
-l indexers \
-e "tmp_path=$TMP_PATH" \
-e "pkg_repository=$PKG_REPOSITORY" \
"${{ inputs.VERBOSITY }}"
- name: Execute server installation playbook
run: |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_install_wazuh.yml \
-i $ALLOCATOR_PATH/inventory \
-l managers \
-e "tmp_path=$TMP_PATH" \
-e "pkg_repository=$PKG_REPOSITORY" \
"${{ inputs.VERBOSITY }}"
- name: Execute dashboard installation playbook
run: |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_install_dashboard.yml \
-i $ALLOCATOR_PATH/inventory \
-l dashboards \
-e "tmp_path=$TMP_PATH" \
-e "pkg_repository=$PKG_REPOSITORY" \
"${{ inputs.VERBOSITY }}"
- name: Execute Python test playbook
run: |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_tests.yml \
-i $ALLOCATOR_PATH/inventory \
-l managers \
-e "tmp_path=$TMP_PATH" \
-e "test_name=$TEST_NAME" \
"${{ inputs.VERBOSITY }}"
- name: Compress Allocator VM directory
id: compress_allocator_files
if: always() && steps.allocator_instance.outcome == 'success' && inputs.DESTROY == false
run: |
zip -P "${{ secrets.ZIP_ARTIFACTS_PASSWORD }}" -r $ALLOCATOR_PATH.zip $ALLOCATOR_PATH
- name: Upload Allocator VM directory as artifact
if: always() && steps.compress_allocator_files.outcome == 'success' && inputs.DESTROY == false
uses: actions/upload-artifact@v4
with:
name: allocator-instance-${{ matrix.system }}
path: ${{ env.ALLOCATOR_PATH }}.zip
- name: Delete allocated VMs
if: always() && steps.allocator_instance.outcome == 'success' && inputs.DESTROY == true
run: |
instance_names=($INSTANCE_NAMES)
for i in ${!instance_names[@]}; do
instance_name=${instance_names[$i]}
track_file="$ALLOCATOR_PATH/track_${instance_name}.yml"
echo "Deleting instance: $instance_name using track file $track_file"
(
# Delete instance
python3 wazuh-automation/deployability/modules/allocation/main.py \
--action delete --provider aws --track-output $track_file
) &
done
# Wait for all deletion tasks to complete
wait