(Distributed) Test installation assistant - 12518530891 - ["CentOS_8"] - Launched by @fcaffieri #252
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
run-name: (Distributed) Test installation assistant - ${{ github.run_id }} - ${{ inputs.SYSTEMS }} - Launched by @${{ github.actor }} | |
name: (Distributed) Test installation assistant | |
on: | |
pull_request: | |
paths: | |
- 'cert_tool/**' | |
- 'common_functions/**' | |
- 'config/**' | |
- 'install_functions/**' | |
- 'passwords_tool/**' | |
- 'tests/**' | |
workflow_dispatch: | |
inputs: | |
REPOSITORY: | |
description: 'Repository environment' | |
required: true | |
default: 'pre-release' | |
type: choice | |
options: | |
- staging | |
- pre-release | |
WAZUH_INSTALLATION_ASSISTANT_REFERENCE: | |
description: 'Branch or tag of the wazuh-installation-assistant repository' | |
required: true | |
default: 'main' | |
AUTOMATION_REFERENCE: | |
description: 'Branch or tag of the wazuh-automation repository' | |
required: true | |
default: 'master' | |
SYSTEMS: | |
description: 'Operating Systems (list of comma-separated quoted strings enclosed in square brackets)' | |
required: true | |
default: '["CentOS_8", "AmazonLinux_2","Ubuntu_22", "RHEL8"]' | |
type: string | |
VERBOSITY: | |
description: 'Verbosity level on playbooks execution' | |
required: true | |
default: '-v' | |
type: choice | |
options: | |
- -v | |
- -vv | |
- -vvv | |
- -vvvv | |
DESTROY: | |
description: 'Destroy instances after run' | |
required: true | |
default: true | |
type: boolean | |
env: | |
COMPOSITE_NAME: "linux-SUBNAME-amd64" | |
SESSION_NAME: "Installation-Assistant-Test" | |
REGION: "us-east-1" | |
TMP_PATH: "/tmp/test" | |
ANSIBLE_CALLBACK: "yaml" | |
RESOURCES_PATH: "${{ github.workspace }}" | |
PKG_REPOSITORY: "${{ inputs.REPOSITORY }}" | |
TEST_NAME: "test_installation_assistant" | |
REPOSITORY_URL: "${{ github.server_url }}/${{ github.repository }}.git" | |
ALLOCATOR_PATH: "/tmp/allocator_instance" | |
INSTANCE_NAMES: "instance_1 instance_2 instance_3" | |
permissions: | |
id-token: write # This is required for requesting the JWT | |
contents: read # This is required for actions/checkout | |
jobs: | |
run-test: | |
runs-on: ubuntu-22.04 | |
strategy: | |
fail-fast: false # If a job fails, the rest of jobs will not be canceled | |
matrix: | |
system: ${{ github.event_name == 'pull_request' && '["Ubuntu_22"]' || fromJson(inputs.SYSTEMS) }} # If the worklflow is executed by a PR, set the OSs | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v4 | |
with: | |
ref: ${{ inputs.WAZUH_INSTALLATION_ASSISTANT_REFERENCE }} | |
- name: View parameters | |
run: echo "${{ toJson(inputs) }}" | |
- name: Set COMPOSITE_NAME variable | |
run: | | |
case "${{ matrix.system }}" in | |
"CentOS_7") | |
SUBNAME="centos-7" | |
;; | |
"CentOS_8") | |
SUBNAME="centos-8" | |
;; | |
"AmazonLinux_2") | |
SUBNAME="amazon-2" | |
;; | |
"Ubuntu_16") | |
SUBNAME="ubuntu-16.04" | |
;; | |
"Ubuntu_18") | |
SUBNAME="ubuntu-18.04" | |
;; | |
"Ubuntu_20") | |
SUBNAME="ubuntu-20.04" | |
;; | |
"Ubuntu_22") | |
SUBNAME="ubuntu-22.04" | |
;; | |
"RHEL7") | |
SUBNAME="redhat-7" | |
;; | |
"RHEL8") | |
SUBNAME="redhat-8" | |
;; | |
*) | |
echo "Invalid SYSTEM selection" >&2 | |
exit 1 | |
;; | |
esac | |
COMPOSITE_NAME="${COMPOSITE_NAME/SUBNAME/$SUBNAME}" | |
echo "COMPOSITE_NAME=$COMPOSITE_NAME" >> $GITHUB_ENV | |
- name: Install Ansible | |
run: sudo apt-get update && sudo apt install -y python3 && python3 -m pip install --user ansible-core==2.16 && pip install pyyaml && ansible-galaxy collection install community.general | |
- name: Set up AWS credentials | |
uses: aws-actions/configure-aws-credentials@v4 | |
with: | |
role-to-assume: ${{ secrets.AWS_IAM_ROLE }} | |
role-session-name: ${{ env.SESSION_NAME }} | |
aws-region: ${{ env.REGION }} | |
- name: Checkout wazuh/wazuh-automation repository | |
uses: actions/checkout@v4 | |
with: | |
repository: wazuh/wazuh-automation | |
ref: ${{ inputs.AUTOMATION_REFERENCE }} | |
token: ${{ secrets.GH_CLONE_TOKEN }} | |
path: wazuh-automation | |
- name: Install and set allocator requirements | |
run: pip3 install -r wazuh-automation/deployability/deps/requirements.txt | |
- name: Allocate instances and create inventory | |
id: allocator_instance | |
run: | | |
instance_names=($INSTANCE_NAMES) | |
inventory_file="$ALLOCATOR_PATH/inventory" | |
inventory_indexers="$ALLOCATOR_PATH/inventory_indexers" | |
inventory_managers="$ALLOCATOR_PATH/inventory_managers" | |
inventory_dashboards="$ALLOCATOR_PATH/inventory_dashboards" | |
inventory_common="$ALLOCATOR_PATH/inventory_common" | |
inventory_file="$ALLOCATOR_PATH/inventory" | |
mkdir -p $ALLOCATOR_PATH | |
echo "[indexers]" > $inventory_indexers | |
echo "[managers]" > $inventory_managers | |
echo "[dashboards]" > $inventory_dashboards | |
echo "[all:vars]" > $inventory_common | |
for i in ${!instance_names[@]}; do | |
instance_name=${instance_names[$i]} | |
# Provision instance in parallel | |
( | |
python3 wazuh-automation/deployability/modules/allocation/main.py \ | |
--action create --provider aws --size large \ | |
--composite-name ${{ env.COMPOSITE_NAME }} \ | |
--working-dir $ALLOCATOR_PATH --track-output $ALLOCATOR_PATH/track_${instance_name}.yml \ | |
--inventory-output $ALLOCATOR_PATH/inventory_${instance_name}.yml \ | |
--instance-name gha_${{ github.run_id }}_${{ env.TEST_NAME }}_${instance_name} --label-team devops --label-termination-date 1d | |
instance_id=$(grep '^identifier' $ALLOCATOR_PATH/track_${instance_name}.yml | awk '{print $2}') | |
private_ip=$(aws ec2 describe-instances \ | |
--instance-ids $instance_id \ | |
--query 'Reservations[*].Instances[*].PrivateIpAddress' \ | |
--output text) | |
sed 's/: */=/g' $ALLOCATOR_PATH/inventory_${instance_name}.yml > $ALLOCATOR_PATH/inventory_mod_${instance_name}.yml | |
sed -i 's/-o StrictHostKeyChecking=no/\"-o StrictHostKeyChecking=no\"/g' $ALLOCATOR_PATH/inventory_mod_${instance_name}.yml | |
source $ALLOCATOR_PATH/inventory_mod_${instance_name}.yml | |
# Add instance to corresponding group | |
if [[ $i -eq 0 ]]; then | |
echo "indexer1 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_indexers | |
echo "master ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file manager_type=master instance_type=indexer_manager" >> $inventory_managers | |
echo "ansible_user=$ansible_user" >> $inventory_common | |
echo "ansible_port=$ansible_port" >> $inventory_common | |
echo "ansible_ssh_common_args='$ansible_ssh_common_args'" >> $inventory_common | |
elif [[ $i -eq 1 ]]; then | |
echo "indexer2 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_indexers | |
echo "worker1 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file manager_type=worker instance_type=indexer_manager" >> $inventory_managers | |
else | |
echo "indexer3 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_indexers | |
echo "worker2 ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file manager_type=worker instance_type=indexer_manager_dashboard" >> $inventory_managers | |
echo "dashboard ansible_host=$ansible_host private_ip=$private_ip ansible_ssh_private_key_file=$ansible_ssh_private_key_file" >> $inventory_dashboards | |
fi | |
) & | |
done | |
# Wait for all provisioning tasks to complete | |
wait | |
# Combine the temporary inventories into one | |
cat $inventory_indexers > $inventory_file | |
cat $inventory_managers >> $inventory_file | |
cat $inventory_dashboards >> $inventory_file | |
cat $inventory_common >> $inventory_file | |
- name: Execute provision playbook | |
id: provision_instance | |
if: success() && steps.allocator_instance.outcome == 'success' | |
run: | | |
set +e | |
INSTALL_DEPS=true | |
INSTALL_PYTHON=true | |
INSTALL_PIP_DEPS=true | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/provision.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-l indexers \ | |
-e "repository=$REPOSITORY_URL" \ | |
-e "reference=${{ github.ref_name }}" \ | |
-e "tmp_path=$TMP_PATH" \ | |
-e "install_deps=$INSTALL_DEPS" \ | |
-e "install_python=$INSTALL_PYTHON" \ | |
-e "install_pip_deps=$INSTALL_PIP_DEPS" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "provision_instance=$EXIT_CODE" >> $GITHUB_OUTPUT | |
if [ $EXIT_CODE != 0 ]; then | |
exit 1 | |
fi | |
- name: Execute certificates generation playbook | |
id: generate_certificates | |
if: success() && steps.provision_instance.outcome == 'success' | |
run: | | |
set +e | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_generate_certificates.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-e "resources_path=$RESOURCES_PATH" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "generate_certificates=$EXIT_CODE" >> $GITHUB_OUTPUT | |
if [ $EXIT_CODE != 0 ]; then | |
exit 1 | |
fi | |
- name: Copy certificates to nodes | |
id: copy_certificates | |
if: success() && steps.generate_certificates.outcome == 'success' | |
run: | | |
set +e | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_copy_certificates.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-l indexers \ | |
-e "tmp_path=$TMP_PATH" \ | |
-e "resources_path=$RESOURCES_PATH" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "copy_certificates=$EXIT_CODE" >> $GITHUB_OUTPUT | |
if [ $EXIT_CODE != 0 ]; then | |
exit 1 | |
fi | |
- name: Execute indexer installation playbook | |
id: install_indexer | |
if: success() && steps.copy_certificates.outcome == 'success' | |
run: | | |
set +e | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_install_indexer.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-l indexers \ | |
-e "tmp_path=$TMP_PATH" \ | |
-e "pkg_repository=$PKG_REPOSITORY" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "install_indexer=$EXIT_CODE" >> $GITHUB_OUTPUT | |
if [ $EXIT_CODE != 0 ]; then | |
exit 1 | |
fi | |
- name: Execute indexer cluster start playbook | |
id: start_indexer | |
if: success() && steps.install_indexer.outcome == 'success' | |
run: | | |
set +e | |
INDEXER_ADMIN_PASSWORD="admin" | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_start_indexer_cluster.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-l indexers \ | |
-e "tmp_path=$TMP_PATH" \ | |
-e "pkg_repository=$PKG_REPOSITORY" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "start_indexer=$EXIT_CODE" >> $GITHUB_OUTPUT | |
if [ $EXIT_CODE != 0 ]; then | |
exit 1 | |
fi | |
- name: Execute server installation playbook | |
id: install_server | |
if: success() && steps.start_indexer.outcome == 'success' | |
run: | | |
set +e | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_install_wazuh.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-l managers \ | |
-e "tmp_path=$TMP_PATH" \ | |
-e "pkg_repository=$PKG_REPOSITORY" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "install_server=$EXIT_CODE" >> $GITHUB_OUTPUT | |
if [ $EXIT_CODE != 0 ]; then | |
exit 1 | |
fi | |
- name: Execute dashboard installation playbook | |
id: install_dashboard | |
if: success() && steps.install_server.outcome == 'success' | |
run: | | |
set +e | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_install_dashboard.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-l dashboards \ | |
-e "tmp_path=$TMP_PATH" \ | |
-e "pkg_repository=$PKG_REPOSITORY" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "install_dashboard=$EXIT_CODE" >> $GITHUB_OUTPUT | |
if [ $EXIT_CODE != 0 ]; then | |
exit 1 | |
fi | |
- name: Execute Python test playbook | |
id: execute_test | |
if: success() && steps.install_dashboard.outcome == 'success' | |
run: | | |
set +e | |
ANSIBLE_STDOUT_CALLBACK=$ANSIBLE_CALLBACK ansible-playbook .github/workflows/ansible-playbooks/distributed_tests.yml \ | |
-i $ALLOCATOR_PATH/inventory \ | |
-l managers \ | |
-e "tmp_path=$TMP_PATH" \ | |
-e "test_name=$TEST_NAME" \ | |
"${{ inputs.VERBOSITY }}" | |
EXIT_CODE=$? | |
echo "execute_test=$EXIT_CODE" >> $GITHUB_OUTPUT | |
- name: Compress Allocator VM directory | |
id: compress_allocator_files | |
if: always() && steps.allocator_instance.outcome == 'success' && inputs.DESTROY == false | |
run: | | |
zip -P "${{ secrets.ZIP_ARTIFACTS_PASSWORD }}" -r $ALLOCATOR_PATH.zip $ALLOCATOR_PATH | |
- name: Upload Allocator VM directory as artifact | |
if: always() && steps.compress_allocator_files.outcome == 'success' && inputs.DESTROY == false | |
uses: actions/upload-artifact@v4 | |
with: | |
name: allocator-instance-${{ matrix.system }} | |
path: ${{ env.ALLOCATOR_PATH }}.zip | |
- name: Delete allocated VMs | |
if: always() && steps.allocator_instance.outcome == 'success' && inputs.DESTROY == true | |
run: | | |
instance_names=($INSTANCE_NAMES) | |
for i in ${!instance_names[@]}; do | |
instance_name=${instance_names[$i]} | |
track_file="$ALLOCATOR_PATH/track_${instance_name}.yml" | |
echo "Deleting instance: $instance_name using track file $track_file" | |
( | |
# Delete instance | |
python3 wazuh-automation/deployability/modules/allocation/main.py \ | |
--action delete --provider aws --track-output $track_file | |
) & | |
done | |
# Wait for all deletion tasks to complete | |
wait | |
- name: Set final workflow status | |
if: always() | |
shell: bash | |
run: | | |
PROVISION_INSTANCE="${{ steps.provision_instance.outputs.provision_instance }}" | |
GENERATE_CERTIFICATES="${{ steps.generate_certificates.outputs.generate_certificates }}" | |
COPY_CERTIFICATES="${{ steps.copy_certificates.outputs.copy_certificates }}" | |
INSTALL_INDEXER="${{ steps.install_indexer.outputs.install_indexer }}" | |
START_INDEXER="${{ steps.start_indexer.outputs.start_indexer }}" | |
INSTALL_SERVER="${{ steps.install_server.outputs.install_server }}" | |
INSTALL_DASHBOARD="${{ steps.install_dashboard.outputs.install_dashboard }}" | |
EXECUTE_TEST="${{ steps.execute_test.outputs.execute_test }}" | |
if [ "$PROVISION_INSTANCE" != "0" ] ; then | |
echo "::error :: Failed provisioning instances" | |
exit 1 | |
fi | |
if [ "$GENERATE_CERTIFICATES" != "0" ] ; then | |
echo "::error :: Failed generating certificates" | |
exit 1 | |
fi | |
if [ "$COPY_CERTIFICATES" != "0" ] ; then | |
echo "::error :: Failed copy certificates" | |
exit 1 | |
fi | |
if [ "$INSTALL_INDEXER" != "0" ] ; then | |
echo "::error :: Failed installing Wazuh indexer" | |
exit 1 | |
fi | |
if [ "$START_INDEXER" != "0" ] ; then | |
echo "::error :: Failed starting Wazuh indexer" | |
exit 1 | |
fi | |
if [ "$INSTALL_SERVER" != "0" ] ; then | |
echo "::error :: Failed installing Wazuh server" | |
exit 1 | |
fi | |
if [ "$INSTALL_DASHBOARD" != "0" ] ; then | |
echo "::error :: Failed installing Wazuh dashboard" | |
exit 1 | |
fi | |
if [ "$EXECUTE_TEST" != "0" ]; then | |
echo "::warning ::Installation had issues but tests passed" | |
exit 0 | |
fi | |
echo "All steps completed successfully" | |
exit 0 |