diff --git a/changelogs/fragments/20231219-deploy_flask_app-update-arguments-spec.yml b/changelogs/fragments/20231219-deploy_flask_app-update-arguments-spec.yml new file mode 100644 index 00000000..a629aa91 --- /dev/null +++ b/changelogs/fragments/20231219-deploy_flask_app-update-arguments-spec.yml @@ -0,0 +1,10 @@ +--- +breaking_changes: + - >- + roles/deploy_flask_app - Add parameter ``deploy_flask_app_bastion_ssh_private_key`` to define + the path to the ssh private key file to use to connect to the bastion host (https://github.com/redhat-cop/cloud.aws_ops/issues/109). + - >- + roles/deploy_flask_app - The following parameters no longer required have been removed + ``deploy_flask_app_bastion_host_required_packages``, ``deploy_flask_app_local_registry_port``, + ``deploy_flask_app_local_registry_pwd``, ``deploy_flask_app_local_registry_user``, + ``deploy_flask_app_git_repository`` (https://github.com/redhat-cop/cloud.aws_ops/issues/103). diff --git a/playbooks/webapp/README.md b/playbooks/webapp/README.md index bf7bdef3..ffd54b22 100644 --- a/playbooks/webapp/README.md +++ b/playbooks/webapp/README.md @@ -101,16 +101,6 @@ To delete the webapp: * **deploy_flask_app_bastion_host_name** (str): Name for the EC2 instance. Default: `"{{ resource_prefix }}-bastion"` * **bastion_host_type** (str): Instance type for the EC2 instance. Default: `t2.xlarge` * **deploy_flask_app_bastion_host_username** (str): Username for the bastion host SSH user. Default: `fedora` -* **deploy_flask_app_bastion_host_required_packages** (list, elements str): Packages to be installed on the bastion host. Default: - ```yaml - - python3 - - python-virtualenv - - sshpass - - git - - podman - - httpd-tools - - ansible - ``` ### Networking @@ -142,12 +132,8 @@ To delete the webapp: ### Webapp -* **deploy_flask_app_git_repository** (str): Git repository for the webapp. Default: `https://github.com/abikouo/webapp_pyflask_demo.git` * **deploy_flask_app_number_of_workers** (int): Number of worker instances to create. Default: `2` * **deploy_flask_app_workers_instance_type** (str): EC2 instance type for workers. Default: `t2.xlarge` -* **deploy_flask_app_local_registry_user** (str): Username for local Podman registry. Default: `ansible` -* **deploy_flask_app_local_registry_pwd** (str): Password for local Podman registry. Default: `testing123` -* **deploy_flask_app_local_registry_port** (int): Port for the local Podman registery. Default: `"{{ app_listening_port }}"` * **deploy_flask_app_config** (dict, elements dict): Configuration values for the webapp, passed as corresponding env variables FLASK_APP, FLASK_ENV, ADMIN_USER, and ADMIN_PASSWORD when the app is deployed. Default: ```yaml app_dir: /app/pyapp diff --git a/playbooks/webapp/files/ec2-trust-policy.json b/playbooks/webapp/files/ec2-trust-policy.json new file mode 100644 index 00000000..63d22eae --- /dev/null +++ b/playbooks/webapp/files/ec2-trust-policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2008-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } diff --git a/playbooks/webapp/files/run_app.yaml b/playbooks/webapp/files/run_app.yaml deleted file mode 100644 index 62033421..00000000 --- a/playbooks/webapp/files/run_app.yaml +++ /dev/null @@ -1,70 +0,0 @@ ---- -- name: Run app - hosts: all - gather_facts: false - strategy: free - become: true - - vars: - container_name: webapp-container01 - - tasks: - - name: Update ssh_config - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regex: "{{ item.regex }}" - line: "{{ item.line }}" - loop: - - regex: ^(# *)?ClientAliveInterval - line: ClientAliveInterval 1200 - - regex: ^(# *)?ClientAliveCountMax - line: ClientAliveCountMax 3 - - - name: Install Podman - ansible.builtin.yum: - name: - - podman - sslverify: false - validate_certs: false - update_cache: true - state: present - - - name: Pull image from private registry - ansible.builtin.shell: - cmd: > - podman login {{ registry_host_port }} - -u {{ registry_login.user }} - -p {{ registry_login.password }} - --tls-verify=false && - podman pull {{ registry_host_port }}/ansible-webapp - --tls-verify=false - changed_when: false - - - name: Check running container - ansible.builtin.shell: - cmd: > - podman container ps -a - -f name={{ container_name }} - --format=.Names - register: container - changed_when: false - - - name: Run application instance - ansible.builtin.shell: - cmd: > - podman run --rm - -e FLASK_APP="{{ application_dir }}" - -e FLASK_ENV="{{ application_env }}" - -e DATABASE_HOST="{{ application_db.host }}" - -e DATABASE_INSTANCE="{{ application_db.instance }}" - -e DATABASE_USER="{{ application_db.dbuser_name }}" - -e DATABASE_PASSWORD="{{ application_db.dbuser_password }}" - -e ADMIN_USER="{{ application_db.admin_user }}" - -e ADMIN_PASSWORD="{{ application_db.admin_password }}" - -e WORKER_HOSTNAME="{{ inventory_hostname }}" - -e WORKERS_HOSTS="{{ workers_hosts }}" - -p 5000:5000 --name {{ container_name }} - -d {{ registry_host_port }}/ansible-webapp - when: - - container.stdout == "" - changed_when: true diff --git a/playbooks/webapp/migrate_webapp.yaml b/playbooks/webapp/migrate_webapp.yaml index 83886ad1..461dd6a7 100644 --- a/playbooks/webapp/migrate_webapp.yaml +++ b/playbooks/webapp/migrate_webapp.yaml @@ -53,16 +53,23 @@ rds_snapshot_arn: "{{ result.db_snapshot_arn }}" region: "{{ dest_region }}" - - name: Deploy app + - name: Create workers and deploy application when: operation == "create" - ansible.builtin.import_role: - name: cloud.aws_ops.deploy_flask_app - vars: - deploy_flask_app_private_subnet_id: "{{ private_subnet.subnet.id }}" - deploy_flask_app_vpc_id: "{{ vpc.vpc.id }}" - deploy_flask_app_vm_info: "{{ vm_result }}" - deploy_flask_app_rds_info: "{{ rds_result }}" - deploy_flask_app_region: "{{ dest_region }}" + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key | default(omit) }}" + aws_secret_key: "{{ aws_secret_key | default(omit) }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ dest_region }}" + block: + - name: Deploy app + ansible.builtin.import_role: + name: cloud.aws_ops.deploy_flask_app + vars: + deploy_flask_app_private_subnet_id: "{{ private_subnet.subnet.id }}" + deploy_flask_app_vpc_id: "{{ vpc.vpc.id }}" + deploy_flask_app_vm_info: "{{ vm_result }}" + deploy_flask_app_rds_info: "{{ rds_result }}" - name: Delete RDS snapshots from different regions amazon.aws.rds_instance_snapshot: diff --git a/playbooks/webapp/tasks/add_route53_records.yaml b/playbooks/webapp/tasks/add_route53_records.yaml index 70935d5f..3cd2d2bc 100644 --- a/playbooks/webapp/tasks/add_route53_records.yaml +++ b/playbooks/webapp/tasks/add_route53_records.yaml @@ -43,6 +43,7 @@ failover: "PRIMARY" health_check: "{{ healthchk_primary_result.health_check.id }}" alias_hosted_zone_id: "{{ primary_lb.elb.hosted_zone_id }}" + overwrite: true register: alias_record_primary_result - name: Add an alias record that points to an aws ELB in the replica region @@ -57,6 +58,7 @@ failover: "SECONDARY" health_check: "{{ healthchk_replica_result.health_check.id }}" alias_hosted_zone_id: "{{ replica_lb.elb.hosted_zone_id }}" + overwrite: true register: alias_record_replica_result - name: Pause for 30 secs for the alias records to be active diff --git a/playbooks/webapp/tasks/create.yaml b/playbooks/webapp/tasks/create.yaml index c885e592..68f459bb 100644 --- a/playbooks/webapp/tasks/create.yaml +++ b/playbooks/webapp/tasks/create.yaml @@ -202,45 +202,50 @@ db_instance_identifier: "{{ rds_identifier }}" register: rds_result - - name: Set 'sshkey_file' variable + - name: Set variable for SSH private key file path ansible.builtin.set_fact: - sshkey_file: ~/private-key-{{ deploy_flask_app_sshkey_pair_name }}-{{ region | default(aws_region) }} + deploy_flask_app_bastion_ssh_private_key_path: "~/.{{ resource_prefix }}_id_rsa" + when: deploy_flask_app_bastion_ssh_private_key_path is undefined - - name: Create key pair to connect to the VM + - name: Create key pair to connect to the workers amazon.aws.ec2_key: name: "{{ deploy_flask_app_sshkey_pair_name }}" - register: rsa_key + register: keypair_result - - name: Save private key into file + - name: Save private key into a file ansible.builtin.copy: - content: "{{ rsa_key.key.private_key }}" - dest: "{{ sshkey_file }}" - mode: 0400 - when: rsa_key is changed - - - name: Check if the vm exists - amazon.aws.ec2_instance_info: - filters: - instance-type: "{{ bastion_host_type }}" - key-name: "{{ deploy_flask_app_sshkey_pair_name }}" - vpc-id: "{{ vpc.vpc.id }}" - instance-state-name: running - register: vm_result + content: "{{ keypair_result.key.private_key }}" + dest: "{{ deploy_flask_app_bastion_ssh_private_key_path }}" + mode: 0600 + when: keypair_result is changed + + - name: Ensure IAM instance role exists + amazon.aws.iam_role: + name: "{{ ec2_iam_role_name }}" + assume_role_policy_document: "{{ lookup('file', 'ec2-trust-policy.json') }}" + state: present + create_instance_profile: true + wait: true + register: role_output - name: Create a virtual machine - when: vm_result.instances | length == 0 amazon.aws.ec2_instance: name: "{{ deploy_flask_app_bastion_host_name }}" instance_type: "{{ bastion_host_type }}" image_id: "{{ images.images.0.image_id }}" key_name: "{{ deploy_flask_app_sshkey_pair_name }}" subnet_id: "{{ subnet.subnet.id }}" + ebs_optimized: true + instance_role: "{{ role_output.iam_role.role_name }}" network: assign_public_ip: true groups: - "{{ secgroup.group_id }}" security_groups: - "{{ secgroup.group_id }}" + user_data: | + #!/bin/bash + yum install -y python3 python-virtualenv sshpass netcat ansible wait: true state: started register: vm_result diff --git a/playbooks/webapp/tasks/create_aurora_db_cluster.yaml b/playbooks/webapp/tasks/create_aurora_db_cluster.yaml index e92c4b9f..6c1acddf 100644 --- a/playbooks/webapp/tasks/create_aurora_db_cluster.yaml +++ b/playbooks/webapp/tasks/create_aurora_db_cluster.yaml @@ -44,12 +44,6 @@ create_rds_global_cluster_replica_cluster_vpc_security_group_ids: - "{{ rds_replica_sg.security_groups[0].group_id }}" - - name: Get primary instance info - amazon.aws.rds_instance_info: - db_instance_identifier: "{{ rds_primary_cluster_instance_name }}" - region: "{{ rds_primary_cluster_region }}" - register: primary_instance_info_result - - name: Get primary cluster info amazon.aws.rds_cluster_info: db_cluster_identifier: "{{ rds_primary_cluster_name }}" @@ -62,12 +56,6 @@ region: "{{ rds_replica_cluster_region }}" register: replica_cluster_info_result - - name: Get replica instance info - amazon.aws.rds_instance_info: - db_instance_identifier: "{{ rds_replica_cluster_instance_name }}" - region: "{{ rds_replica_cluster_region }}" - register: replica_instance_info_result - - name: Get global db info amazon.aws.rds_global_cluster_info: global_cluster_identifier: "{{ rds_global_cluster_name }}" diff --git a/playbooks/webapp/tasks/delete.yaml b/playbooks/webapp/tasks/delete.yaml index 1ae28294..5e8f111e 100644 --- a/playbooks/webapp/tasks/delete.yaml +++ b/playbooks/webapp/tasks/delete.yaml @@ -8,7 +8,6 @@ region: "{{ region | default(aws_region) }}" block: - - name: Get vpc information amazon.aws.ec2_vpc_net_info: filters: @@ -22,54 +21,37 @@ ansible.builtin.set_fact: vpc_id: "{{ vpc.vpcs.0.vpc_id }}" - - name: Get bastion instance info + # Delete Load balancer + - name: List Load balancer(s) from VPC + community.aws.elb_classic_lb_info: + register: load_balancers + + - name: Set fact for list of load balancers to delete + ansible.builtin.set_fact: + load_balancers_to_delete: "{{ load_balancers.elbs | selectattr('vpc_id', 'equalto', vpc_id) | map(attribute='load_balancer_name') | list }}" + + - name: Delete load balancer(s) + amazon.aws.elb_classic_lb: + name: "{{ item }}" + wait: true + state: absent + with_items: "{{ load_balancers_to_delete }}" + + # Delete EC2 instances + - name: Get EC2 instance info amazon.aws.ec2_instance_info: filters: - instance-type: "{{ bastion_host_type }}" - key-name: "{{ deploy_flask_app_sshkey_pair_name }}" vpc-id: "{{ vpc_id }}" - instance-state-name: running - register: bastion - - - name: Delete EC2 instances with dependant Resources - when: bastion.instances | length == 1 - block: - - name: Set 'instance_host_name' variable - ansible.builtin.set_fact: - instance_host_name: "{{ bastion.instances.0.public_dns_name | split('.') | first }}" - - - name: Delete workers key pair - amazon.aws.ec2_key: - name: "{{ instance_host_name }}-key" - state: absent - - - name: Delete load balancer - amazon.aws.elb_classic_lb: - name: "{{ instance_host_name }}-lb" - wait: true - state: absent - - - name: List workers - amazon.aws.ec2_instance_info: - filters: - tag:Name: "{{ instance_host_name }}-workers" - instance-state-name: running - register: running - - - name: Delete workers - when: running.instances | length != 0 - amazon.aws.ec2_instance: - instance_ids: "{{ running.instances | map(attribute='instance_id') | list }}" - wait: true - state: terminated - - - name: Delete bastion host - amazon.aws.ec2_instance: - instance_ids: - - "{{ bastion.instances.0.instance_id }}" - wait: true - state: terminated + register: ec2_instances + + - name: Delete ec2 instances from VPC + amazon.aws.ec2_instance: + instance_ids: "{{ ec2_instances.instances | map(attribute='instance_id') | list }}" + wait: true + state: terminated + when: ec2_instances.instances | length > 0 + # Delete RDS instance - name: Delete RDS instance amazon.aws.rds_instance: state: absent @@ -77,29 +59,18 @@ skip_final_snapshot: true wait: true - - name: Delete key pair to connect to the bastion VM + - name: Delete keys pairs created for the deployment amazon.aws.ec2_key: - name: "{{ deploy_flask_app_sshkey_pair_name }}" + name: "{{ item }}" state: absent + with_items: "{{ ec2_instances.instances | map(attribute='key_name') | unique | list }}" - name: Delete RDS subnet group amazon.aws.rds_subnet_group: name: "{{ rds_subnet_group_name }}" state: absent - - name: List Security group from VPC - amazon.aws.ec2_security_group_info: - filters: - vpc-id: "{{ vpc_id }}" - tag:prefix: "{{ resource_prefix }}" - register: secgroups - - - name: Delete security groups - amazon.aws.ec2_security_group: - state: absent - group_id: "{{ item }}" - with_items: "{{ secgroups.security_groups | map(attribute='group_id') | list }}" - + # Delete VPC route table - name: List routes table from VPC amazon.aws.ec2_vpc_route_table_info: filters: @@ -115,6 +86,7 @@ state: absent with_items: "{{ route_table.route_tables | map(attribute='id') | list }}" + # Delete NAT Gateway - name: Get NAT gateway amazon.aws.ec2_vpc_nat_gateway_info: filters: @@ -128,20 +100,39 @@ wait: true with_items: "{{ nat_gw.result | map(attribute='nat_gateway_id') | list }}" + # Delete Internet gateway - name: Delete internet gateway amazon.aws.ec2_vpc_igw: vpc_id: "{{ vpc_id }}" state: absent + # Delete Subnets + - name: List Subnets from VPC + amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: "{{ vpc_id }}" + register: vpc_subnets + - name: Delete subnets amazon.aws.ec2_vpc_subnet: cidr: "{{ item }}" state: absent vpc_id: "{{ vpc_id }}" - with_items: "{{ subnet_cidr }}" + with_items: "{{ vpc_subnets.subnets | map(attribute='cidr_block') | list }}" + + # Delete Security groups + - name: List Security group from VPC + amazon.aws.ec2_security_group_info: + filters: + vpc-id: "{{ vpc_id }}" + register: secgroups + + - name: Delete security groups + amazon.aws.ec2_security_group: + state: absent + group_id: "{{ item }}" + with_items: "{{ secgroups.security_groups | rejectattr('group_name', 'equalto', 'default') | map(attribute='group_id') | list }}" - # As ec2_vpc_route_table can't delete route table, the vpc still has dependencies and cannot be deleted. - # You need to do it delete it manually using either the console or the cli. - name: Delete VPC amazon.aws.ec2_vpc_net: name: "{{ vpc_name }}" diff --git a/playbooks/webapp/tasks/deploy_app_into_region.yaml b/playbooks/webapp/tasks/deploy_app_into_region.yaml new file mode 100644 index 00000000..8aad0dd1 --- /dev/null +++ b/playbooks/webapp/tasks/deploy_app_into_region.yaml @@ -0,0 +1,43 @@ +--- +- name: Deploy application into regions + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key | default(omit) }}" + aws_secret_key: "{{ aws_secret_key | default(omit) }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ region }}" + block: + - name: Get VPC info + amazon.aws.ec2_vpc_net_info: + filters: + "tag:Name": "{{ vpc_name }}" + register: vpc_result + + - name: Get Private Subnet for Workers + amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: "{{ vpc_result.vpcs[0].id }}" + cidr: "{{ subnet_cidr[1] }}" + register: _subnets + + - name: Get VM info + amazon.aws.ec2_instance_info: + filters: + "tag:Name": "{{ deploy_flask_app_bastion_host_name }}" + instance-state-name: ["running"] + register: vm_result + + - name: Get RDS instance info + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ rds_cluster_name }}" + register: rds_result + + - name: Deploy app into region + ansible.builtin.include_role: + name: cloud.aws_ops.deploy_flask_app + vars: + deploy_flask_app_private_subnet_id: "{{ _subnets.subnets[0].id }}" + deploy_flask_app_vpc_id: "{{ vpc_result.vpcs[0].id }}" + deploy_flask_app_bastion_instance_id: "{{ vm_result.instances.0.instance_id }}" + deploy_flask_app_rds_host: "{{ rds_result.instances.0.endpoint.address }}" + deploy_flask_app_rds_dbname: "{{ rds_result.instances.0.db_name }}" diff --git a/playbooks/webapp/vars/main.yaml b/playbooks/webapp/vars/main.yaml index b04589a8..05df567c 100644 --- a/playbooks/webapp/vars/main.yaml +++ b/playbooks/webapp/vars/main.yaml @@ -12,8 +12,9 @@ resource_tags: prefix: "{{ resource_prefix }}" operation: create -image_filter: Fedora-Cloud-Base-35-*gp2-0 +image_filter: Fedora-Cloud-Base-38-* public_secgroup_name: "{{ resource_prefix }}-sg" +ec2_iam_role_name: "{{ resource_prefix }}-role" rds_subnet_group_name: "{{ resource_prefix }}-rds-sg" rds_secgroup_name: "{{ resource_prefix }}-rds-sec" rds_identifier: "{{ resource_prefix }}-rds-01" @@ -22,7 +23,7 @@ rds_instance_class: db.m6g.large rds_instance_name: mysampledb123 rds_engine: postgres rds_engine_version: "14.8" -bastion_host_type: t2.xlarge +bastion_host_type: t3.micro bastion_host_venv_path: ~/env rds_listening_port: 5432 @@ -30,29 +31,16 @@ rds_listening_port: 5432 deploy_flask_app_sshkey_pair_name: "{{ resource_prefix }}-key" deploy_flask_app_bastion_host_name: "{{ resource_prefix }}-bastion" deploy_flask_app_bastion_host_username: fedora -deploy_flask_app_bastion_host_required_packages: - - python3 - - python-virtualenv - - sshpass - - git - - gcc - - podman - - httpd-tools - - ansible-core -deploy_flask_app_workers_instance_type: t2.xlarge +deploy_flask_app_workers_instance_type: t3.micro deploy_flask_app_workers_user_name: fedora deploy_flask_app_number_of_workers: 2 deploy_flask_app_listening_port: 5000 -deploy_flask_app_git_repository: https://github.com/abikouo/webapp_pyflask_demo.git deploy_flask_app_config: env: development admin_user: admin admin_password: admin app_dir: /app/pyapp deploy_flask_app_force_init: false -deploy_flask_app_local_registry_user: ansible -deploy_flask_app_local_registry_pwd: testing123 -deploy_flask_app_local_registry_port: "{{ deploy_flask_app_listening_port }}" deploy_flask_app_rds_master_password: L#5cH2mgy_ deploy_flask_app_rds_master_username: ansible @@ -71,4 +59,8 @@ rds_replica_cluster_region: us-east-2 rds_replica_cluster_instance_name: "{{ resource_prefix }}-replica-instance" # vars for route53 records +route53_zone_name: "ansiblecloud.xyz" route53_subdomain: "flaskapp" + +# A bucket to save RSA key into +bucket_name: "bucket-rsa-{{ resource_prefix }}" diff --git a/playbooks/webapp/webapp.yaml b/playbooks/webapp/webapp.yaml index 0f169154..265a7162 100644 --- a/playbooks/webapp/webapp.yaml +++ b/playbooks/webapp/webapp.yaml @@ -14,19 +14,21 @@ - name: Run operation create/delete ansible.builtin.import_tasks: tasks/{{ operation }}.yaml -- name: Deploy Flask App - hosts: localhost - gather_facts: false - vars_files: - - vars/main.yaml - - tasks: - - name: Deploy app + - name: Create workers and deploy application when: operation == "create" - ansible.builtin.include_role: - name: cloud.aws_ops.deploy_flask_app - vars: - deploy_flask_app_private_subnet_id: "{{ private_subnet.subnet.id }}" - deploy_flask_app_vpc_id: "{{ vpc.vpc.id }}" - deploy_flask_app_vm_info: "{{ vm_result }}" - deploy_flask_app_rds_info: "{{ rds_result }}" + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key | default(omit) }}" + aws_secret_key: "{{ aws_secret_key | default(omit) }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ region | default(aws_region) }}" + block: + - name: Deploy app + ansible.builtin.include_role: + name: cloud.aws_ops.deploy_flask_app + vars: + deploy_flask_app_private_subnet_id: "{{ private_subnet.subnet.id }}" + deploy_flask_app_vpc_id: "{{ vpc.vpc.id }}" + deploy_flask_app_bastion_instance_id: "{{ vm_result.instance_ids.0 }}" + deploy_flask_app_rds_host: "{{ rds_result.instances.0.endpoint.address }}" + deploy_flask_app_rds_dbname: "{{ rds_result.instances.0.db_name }}" diff --git a/playbooks/webapp/webapp_ha_aurora.yaml b/playbooks/webapp/webapp_ha_aurora.yaml index a16a1ccd..f030b776 100644 --- a/playbooks/webapp/webapp_ha_aurora.yaml +++ b/playbooks/webapp/webapp_ha_aurora.yaml @@ -1,106 +1,86 @@ --- -- name: Webapp HA +- name: Configure inventory for High availability Aurora cluster hosts: localhost gather_facts: false + + vars_files: + - vars/main.yaml + + tasks: + - name: Add different hosts + ansible.builtin.add_host: + groups: + - aurora + name: "aurora_{{ item.region }}" + ansible_connection: local + region: "{{ item.region }}" + ansible_python_interpreter: "{{ ansible_python_interpreter }}" + deploy_flask_app_bastion_ssh_private_key_path: "~/.{{ resource_prefix }}{{ item.region }}_id_rsa" + rds_cluster_name: "{{ item.rds_cluster_name }}" + with_items: + - region: "{{ rds_primary_cluster_region }}" + rds_cluster_name: "{{ rds_primary_cluster_instance_name }}" + - region: "{{ rds_replica_cluster_region }}" + rds_cluster_name: "{{ rds_replica_cluster_instance_name }}" + +- name: Webapp HA + hosts: aurora + gather_facts: false + strategy: free + vars_files: - vars/main.yaml tasks: - - name: Create resources and Deploy App + - name: Create resources in region + ansible.builtin.include_tasks: tasks/create.yaml when: operation == "create" - block: - - name: Create resources in primary region - ansible.builtin.include_tasks: tasks/create.yaml - vars: - region: "{{ creation_region }}" - rds_instance_class: db.r5.large - rds_engine: aurora-postgresql - loop: - - "{{ rds_primary_cluster_region }}" - - "{{ rds_replica_cluster_region }}" - loop_control: - loop_var: creation_region - - - name: Create Aurora db cluster - ansible.builtin.import_tasks: tasks/create_aurora_db_cluster.yaml - vars: - rds_instance_class: db.r5.large - rds_engine: aurora-postgresql + vars: + rds_instance_class: db.r5.large + rds_engine: aurora-postgresql - # ================= Deploy App in the primary region ================= +- name: Create Aurora db cluster + hosts: localhost + vars_files: + - vars/main.yaml - - name: Get VPC info from primary region - amazon.aws.ec2_vpc_net_info: - filters: - "tag:Name": "{{ vpc_name }}" - region: "{{ rds_primary_cluster_region }}" - register: primary_vpc + tasks: + - name: Create Aurora db cluster + ansible.builtin.import_tasks: tasks/create_aurora_db_cluster.yaml + when: operation == "create" + vars: + rds_instance_class: db.r5.large + rds_engine: aurora-postgresql - - name: Get primary private subnet for workers - amazon.aws.ec2_vpc_subnet_info: - filters: - vpc-id: "{{ primary_vpc.vpcs[0].id }}" - cidr: "{{ subnet_cidr[1] }}" - region: "{{ rds_primary_cluster_region }}" - register: primary_private_subnet +- name: Deploy application into regions + hosts: localhost + gather_facts: false - - name: Get VM info in the primary region - amazon.aws.ec2_instance_info: - filters: - "tag:Name": "{{ deploy_flask_app_bastion_host_name }}" - instance-state-name: [ "running"] - region: "{{ rds_primary_cluster_region }}" - register: primary_vm_result + vars_files: + - vars/main.yaml - - name: Deploy app in primary region - ansible.builtin.include_role: - name: cloud.aws_ops.deploy_flask_app + tasks: + - name: Deploy application and add Route53 records + when: operation == "create" + block: + - name: Deploy application into primary region + ansible.builtin.import_tasks: tasks/deploy_app_into_region.yaml vars: - deploy_flask_app_private_subnet_id: "{{ primary_private_subnet.subnets[0].id }}" - deploy_flask_app_vpc_id: "{{ primary_vpc.vpcs[0].id }}" - deploy_flask_app_vm_info: "{{ primary_vm_result }}" - deploy_flask_app_rds_info: "{{ primary_instance_info_result }}" - deploy_flask_app_region: "{{ rds_primary_cluster_region }}" + region: "{{ rds_primary_cluster_region }}" + rds_cluster_name: "{{ rds_primary_cluster_instance_name }}" + deploy_flask_app_bastion_ssh_private_key_path: "~/.{{ resource_prefix }}{{ rds_primary_cluster_region }}_id_rsa" - name: Get load balancer name from the primary region ansible.builtin.set_fact: primary_lb: "{{ deploy_flask_app_lb_result }}" - # ================= Deploy App in the replica region ================= - - - name: Get VPC info from replica region - amazon.aws.ec2_vpc_net_info: - filters: - "tag:Name": "{{ vpc_name }}" - region: "{{ rds_replica_cluster_region }}" - register: replica_vpc - - - name: Get VM info in the replica region - amazon.aws.ec2_instance_info: - filters: - "tag:Name": "{{ deploy_flask_app_bastion_host_name }}" - instance-state-name: [ "running"] - region: "{{ rds_replica_cluster_region }}" - register: replica_vm_result - - - name: Get replica private subnet for workers - amazon.aws.ec2_vpc_subnet_info: - filters: - vpc-id: "{{ replica_vpc.vpcs[0].id }}" - cidr: "{{ subnet_cidr[1] }}" - region: "{{ rds_replica_cluster_region }}" - register: replica_private_subnet - - - name: Deploy app in replica region - ansible.builtin.include_role: - name: cloud.aws_ops.deploy_flask_app + - name: Deploy application into replica region + ansible.builtin.import_tasks: tasks/deploy_app_into_region.yaml vars: - deploy_flask_app_private_subnet_id: "{{ replica_private_subnet.subnets[0].id }}" - deploy_flask_app_vpc_id: "{{ replica_vpc.vpcs[0].id }}" - deploy_flask_app_vm_info: "{{ replica_vm_result }}" - deploy_flask_app_rds_info: "{{ replica_instance_info_result }}" - deploy_flask_app_region: "{{ rds_replica_cluster_region }}" + region: "{{ rds_replica_cluster_region }}" + rds_cluster_name: "{{ rds_replica_cluster_instance_name }}" + deploy_flask_app_bastion_ssh_private_key_path: "~/.{{ resource_prefix }}{{ rds_replica_cluster_region }}_id_rsa" - name: Get load balancer name from the replica region ansible.builtin.set_fact: @@ -109,34 +89,45 @@ - name: Add Route53 configurations ansible.builtin.include_tasks: tasks/add_route53_records.yaml - # ================================================================================ +# ================================================================================ - - name: Delete resources +- name: Delete Route53 records and Aurora cluster + hosts: localhost + gather_facts: false + + vars_files: + - vars/main.yaml + + tasks: + - name: Delete Route 53 records and health checks + ansible.builtin.import_tasks: tasks/delete_route53_records.yaml when: operation == "delete" - block: - - name: Delete Route 53 records and health checks - ansible.builtin.import_tasks: tasks/delete_route53_records.yaml + - name: Delete Aurora DB + ansible.builtin.include_role: + name: cloud.aws_ops.create_rds_global_cluster + when: operation == "delete" + vars: + create_rds_global_cluster_operation: delete + create_rds_global_cluster_global_cluster_name: "{{ rds_global_cluster_name }}" + create_rds_global_cluster_primary_cluster_name: "{{ rds_primary_cluster_name }}" + create_rds_global_cluster_primary_cluster_region: "{{ rds_primary_cluster_region }}" + create_rds_global_cluster_primary_cluster_instance_name: "{{ rds_primary_cluster_instance_name }}" + create_rds_global_cluster_replica_cluster_name: "{{ rds_replica_cluster_name }}" + create_rds_global_cluster_replica_cluster_region: "{{ rds_replica_cluster_region }}" + create_rds_global_cluster_replica_cluster_instance_name: "{{ rds_replica_cluster_instance_name }}" + +- name: Delete EC2 resources + hosts: aurora + gather_facts: false + strategy: free - - name: Delete Aurora DB - ansible.builtin.include_role: - name: cloud.aws_ops.create_rds_global_cluster - vars: - create_rds_global_cluster_operation: delete - create_rds_global_cluster_global_cluster_name: "{{ rds_global_cluster_name }}" - create_rds_global_cluster_primary_cluster_name: "{{ rds_primary_cluster_name }}" - create_rds_global_cluster_primary_cluster_region: "{{ rds_primary_cluster_region }}" - create_rds_global_cluster_primary_cluster_instance_name: "{{ rds_primary_cluster_instance_name }}" - create_rds_global_cluster_replica_cluster_name: "{{ rds_replica_cluster_name }}" - create_rds_global_cluster_replica_cluster_region: "{{ rds_replica_cluster_region }}" - create_rds_global_cluster_replica_cluster_instance_name: "{{ rds_replica_cluster_instance_name }}" - - - name: Delete all resources - ansible.builtin.include_tasks: tasks/delete.yaml - vars: - region: "{{ deletion_region }}" - loop: - - "{{ rds_primary_cluster_region }}" - - "{{ rds_replica_cluster_region }}" - loop_control: - loop_var: deletion_region + vars_files: + - vars/main.yaml + + tasks: + - name: Delete all resources + ansible.builtin.include_tasks: tasks/delete.yaml + when: operation == "delete" + vars: + rds_identifier: "{{ rds_cluster_name }}" diff --git a/roles/create_rds_global_cluster/README.md b/roles/create_rds_global_cluster/README.md index 51f8202b..fbaa88f4 100644 --- a/roles/create_rds_global_cluster/README.md +++ b/roles/create_rds_global_cluster/README.md @@ -32,9 +32,9 @@ Role Variables - **create_rds_global_cluster_global_cluster_name** - Name of the Amazon Aurora global cluster. **required** - **create_rds_global_cluster_engine** - Engine of the Amazon Aurora global and rds clusters. Default is aurora-postgresql. - **create_rds_global_cluster_engine_version** - Engine version of the Amazon Aurora global and rds clusters. -- **create_rds_global_cluster_instance_class** - Instance class of instance in primary and replica cluster. **required** -- **create_rds_global_cluster_master_username** - Username of the rds clusters master user. **required** -- **create_rds_global_cluster_master_user_password** - Password of the rds clusters master user. **required** +- **create_rds_global_cluster_instance_class** - Instance class of instance in primary and replica cluster. **Required** when __create_rds_global_cluster_operation__ is set to __create__. +- **create_rds_global_cluster_master_username** - Username of the rds clusters master user. **Required** when __create_rds_global_cluster_operation__ is set to __create__. +- **create_rds_global_cluster_master_user_password** - Password of the rds clusters master user. **Required** when __create_rds_global_cluster_operation__ is set to __create__. **Primary cluster variables** - **create_rds_global_cluster_primary_cluster_name** - Name of the primary cluster. Default is $create_rds_global_cluster_global_cluster_name. diff --git a/roles/create_rds_global_cluster/meta/argument_specs.yml b/roles/create_rds_global_cluster/meta/argument_specs.yml index 15046a2f..d95cb6d0 100644 --- a/roles/create_rds_global_cluster/meta/argument_specs.yml +++ b/roles/create_rds_global_cluster/meta/argument_specs.yml @@ -20,15 +20,15 @@ argument_specs: create_rds_global_cluster_instance_class: description: - Instance class of instance in primary and replica cluster. - required: true + - Required when I(create_rds_global_cluster_operation=create). create_rds_global_cluster_master_username: description: - Username of the rds clusters master user. - required: true + - Required when I(create_rds_global_cluster_operation=create). create_rds_global_cluster_master_user_password: description: - Password of the rds clusters master user. - required: true + - Required when I(create_rds_global_cluster_operation=create). create_rds_global_cluster_primary_cluster_name: description: - Name of the primary cluster. diff --git a/roles/create_rds_global_cluster/tasks/create.yml b/roles/create_rds_global_cluster/tasks/create.yml index d7f4a171..ae3f82b0 100644 --- a/roles/create_rds_global_cluster/tasks/create.yml +++ b/roles/create_rds_global_cluster/tasks/create.yml @@ -5,6 +5,21 @@ group/amazon.cloud.aws: "{{ aws_setup_credentials__output }}" block: + - name: Fail when 'create_rds_global_cluster_instance_class' is not defined + ansible.builtin.fail: + msg: "'create_rds_global_cluster_instance_class' is required to create the global cluster." + when: create_rds_global_cluster_instance_class is undefined + + - name: Fail when 'create_rds_global_cluster_master_username' is not defined + ansible.builtin.fail: + msg: "'create_rds_global_cluster_master_username' is required to create the global cluster." + when: create_rds_global_cluster_master_username is undefined + + - name: Fail when 'create_rds_global_cluster_master_user_password' is not defined + ansible.builtin.fail: + msg: "'create_rds_global_cluster_master_user_password' is required to create the global cluster." + when: create_rds_global_cluster_master_user_password is undefined + - name: Create rds global database amazon.cloud.rds_global_cluster: global_cluster_identifier: "{{ create_rds_global_cluster_global_cluster_name }}" diff --git a/roles/deploy_flask_app/README.md b/roles/deploy_flask_app/README.md index 690013b9..2a54087a 100644 --- a/roles/deploy_flask_app/README.md +++ b/roles/deploy_flask_app/README.md @@ -23,32 +23,24 @@ Role Variables ## variables to create new hosts and groups in inventory of in memory playbook. -* **deploy_flask_app_region** (str): Region where the app is to be deployed. -* **deploy_flask_app_bastion_host_username** (str): Username for the bastion host SSH user. * **deploy_flask_app_private_subnet_id** (str): Private subnet id of the bastion host * **deploy_flask_app_vpc_id** (str): vpc id for the host. -* **deploy_flask_app_rds_info** (dict): A dict of information for the backend RDS. This dict has the output of amazon.aws.rds_instance_info mode. +* **deploy_flask_app_rds_host** (str): The RDS endpoint address. +* **deploy_flask_app_rds_dbname** (str): The RDS database name. * **deploy_flask_app_rds_master_username** (str): Username for the RDS instance. * **deploy_flask_app_rds_master_password** (str): password for the RDS instance. -* **deploy_flask_app_vm_info** (dict): A dict of information for the vm to use. This dict has the output of amazon.aws.ec2_instance_info module. -* **deploy_flask_app_sshkey_pair_name** (str): Name for the EC2 key pair. ## variables needed for the deployment # Bastion host -* **deploy_flask_app_bastion_host_name** (str): Name for the EC2 instance. -* **deploy_flask_app_bastion_host_required_packages** (list): Packages to be installed on the bastion host. +* **deploy_flask_app_bastion_host_username** (str): Username for the bastion host SSH user. +* **deploy_flask_app_bastion_instance_id** (str): The instance id of the virtual machine used as bastion. +* **deploy_flask_app_bastion_ssh_private_key_path** (path): The path to the ssh private key file to use to connect to the bastion host. * **deploy_flask_app_number_of_workers** (int): Number of instances to create. -* **deploy_flask_app_workers_instance_type** (str): RC2 instance type for workers. -* **deploy_flask_app_workers_user_name** (str): Username for the workers. # App -* **deploy_flask_app_git_repository** (str): Git repository to be cloned for the webapp. * **deploy_flask_app_listening_port** (int): Load balancer port. * **deploy_flask_app_force_init** (bool): A boolean value True to force init the app and False to not force init. -* **deploy_flask_app_local_registry_port** (int): Port for the local Podman registry. -* **deploy_flask_app_local_registry_user** (str): Registry user name. -* **deploy_flask_app_local_registry_pwd** (str): Registry password. * **deploy_flask_app_config** (dict): A dict of config parameterys for the app. **env** (str): Flask env. **admin_user** (str): App config's admin username. diff --git a/roles/deploy_flask_app/defaults/main.yml b/roles/deploy_flask_app/defaults/main.yml new file mode 100644 index 00000000..b93c0b82 --- /dev/null +++ b/roles/deploy_flask_app/defaults/main.yml @@ -0,0 +1,5 @@ +--- +deploy_flask_app_workers_ssh_private_key: /tmp/id_rsa +deploy_flask_app_workers_inventory_file: /tmp/workers_inventory.yaml +deploy_flask_app_workers_playbook_file: /tmp/deploy_app.yaml +deploy_flask_app_container_image: docker.io/aubinredhat/webapp:1.0.0 diff --git a/roles/deploy_flask_app/files/run_app.yaml b/roles/deploy_flask_app/files/run_app.yaml deleted file mode 100644 index b221927b..00000000 --- a/roles/deploy_flask_app/files/run_app.yaml +++ /dev/null @@ -1,68 +0,0 @@ ---- -- name: Run app - hosts: all - gather_facts: false - strategy: free - become: true - - vars: - container_name: webapp-container01 - - tasks: - - name: Update ssh_config - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regex: "{{ item.regex }}" - line: "{{ item.line }}" - loop: - - regex: ^(# *)?ClientAliveInterval - line: ClientAliveInterval 1200 - - regex: ^(# *)?ClientAliveCountMax - line: ClientAliveCountMax 3 - - - name: Install Podman - ansible.builtin.yum: - name: - - podman - update_cache: True - state: present - - - name: Pull image from private registry - ansible.builtin.shell: - cmd: > - podman login {{ registry_host_port }} - -u {{ registry_login.user }} - -p {{ registry_login.password }} - --tls-verify=false && - podman pull {{ registry_host_port }}/ansible-webapp - --tls-verify=false - changed_when: false - - - name: Check running container - ansible.builtin.shell: - cmd: > - podman container ps -a - -f name={{ container_name }} - --format=.Names - register: container - changed_when: false - - - name: Run application instance - ansible.builtin.shell: - cmd: > - podman run --rm - -e FLASK_APP="{{ application_dir }}" - -e FLASK_ENV="{{ application_env }}" - -e DATABASE_HOST="{{ application_db.host }}" - -e DATABASE_INSTANCE="{{ application_db.instance }}" - -e DATABASE_USER="{{ application_db.dbuser_name }}" - -e DATABASE_PASSWORD="{{ application_db.dbuser_password }}" - -e ADMIN_USER="{{ application_db.admin_user }}" - -e ADMIN_PASSWORD="{{ application_db.admin_password }}" - -e WORKER_HOSTNAME="{{ inventory_hostname }}" - -e WORKERS_HOSTS="{{ workers_hosts }}" - -p 5000:5000 --name {{ container_name }} - -d {{ registry_host_port }}/ansible-webapp - when: - - container.stdout == "" - changed_when: true diff --git a/roles/deploy_flask_app/meta/argument_specs.yml b/roles/deploy_flask_app/meta/argument_specs.yml index 6c63ba4f..297c0741 100644 --- a/roles/deploy_flask_app/meta/argument_specs.yml +++ b/roles/deploy_flask_app/meta/argument_specs.yml @@ -4,23 +4,20 @@ argument_specs: version_added: 2.0.0 short_description: Deploy flask app in AWS. options: - deploy_flask_app_region: - description: (Optional) Region where the app has to be deployed. - type: str - required: False deploy_flask_app_bastion_host_username: description: Username for the bastion host SSH user. type: str required: True - deploy_flask_app_bastion_host_name: - description: Name for the EC2 instance. + deploy_flask_app_bastion_instance_id: + description: The instance Id of the EC2 bastion virtual machine. type: str required: True - deploy_flask_app_bastion_host_required_packages: - description: Packages to be installed on the bastion host. - type: list - elements: str + version_added: 2.1.0 + deploy_flask_app_bastion_ssh_private_key_path: + description: The path to ssh private key file to use to connect to the bastion host. + type: path required: True + version_added: 2.1.0 deploy_flask_app_private_subnet_id: description: Private subnet id of the bastion host. type: str @@ -29,13 +26,13 @@ argument_specs: description: vpc id for the host. type: str required: True - deploy_flask_app_sshkey_pair_name: - description: Name for the EC2 key pair. + deploy_flask_app_rds_host: + description: The RDS endpoint address. type: str required: True - deploy_flask_app_rds_info: - description: A dict of information for the backend RDS. This dict has the output of amazon.aws.rds_instance_info module. - type: dict + deploy_flask_app_rds_dbname: + description: The RDS Database name. + type: str required: True deploy_flask_app_rds_master_username: description: Master username of the RDS backend. @@ -45,10 +42,6 @@ argument_specs: description: Master password of the RDS backend. type: str required: True - deploy_flask_app_vm_info: - description: A dict of information for the vm to use. This dict has the output of amazon.aws.ec2_instance_info module. - type: dict - required: True deploy_flask_app_number_of_workers: description: Number of instances to create. type: int @@ -60,10 +53,6 @@ argument_specs: description: Username for the workers. type: str required: True - deploy_flask_app_git_repository: - description: Git repository to be cloned for the webapp. - type: str - required: True deploy_flask_app_listening_port: description: Load balancer port. type: int @@ -72,18 +61,6 @@ argument_specs: description: A boolean value True to force init the app and False to not force init. type: bool required: True - deploy_flask_app_local_registry_user: - description: Registry user name. - type: str - required: True - deploy_flask_app_local_registry_pwd: - description: Registry password. - type: str - required: True - deploy_flask_app_local_registry_port: - description: Registry port. - type: int - required: True deploy_flask_app_config: description: A dict of config parameterys for the app. type: dict diff --git a/roles/deploy_flask_app/meta/main.yaml b/roles/deploy_flask_app/meta/main.yaml deleted file mode 100644 index 3bf1568b..00000000 --- a/roles/deploy_flask_app/meta/main.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -dependencies: - - role: cloud.aws_ops.aws_setup_credentials -allow_duplicates: true diff --git a/roles/deploy_flask_app/tasks/bastion_setup.yaml b/roles/deploy_flask_app/tasks/bastion_setup.yaml deleted file mode 100644 index f13e550c..00000000 --- a/roles/deploy_flask_app/tasks/bastion_setup.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -- name: Deploy resource from Bastion - delegate_to: bastion - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key | default(omit) }}" - aws_secret_key: "{{ aws_secret_key | default(omit) }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ deploy_flask_app_region | default(aws_region) }}" - block: - - name: Update ssh_config - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regex: "{{ item.regex }}" - line: "{{ item.line }}" - loop: - - regex: ^(# *)?ClientAliveInterval - line: ClientAliveInterval 1200 - - regex: ^(# *)?ClientAliveCountMax - line: ClientAliveCountMax 3 - become: true - - - name: Install required packages - ansible.builtin.yum: - name: "{{ deploy_flask_app_bastion_host_required_packages }}" - state: present - become: true - - - name: Generate ssh key for existing user - ansible.builtin.user: - name: "{{ deploy_flask_app_bastion_host_username }}" - state: present - generate_ssh_key: true - - - name: Get content of public key - ansible.builtin.slurp: - src: ~/.ssh/id_rsa.pub - register: deploy_flask_app_sshkey diff --git a/roles/deploy_flask_app/tasks/deploy_app.yaml b/roles/deploy_flask_app/tasks/deploy_app.yaml deleted file mode 100644 index e8691be6..00000000 --- a/roles/deploy_flask_app/tasks/deploy_app.yaml +++ /dev/null @@ -1,225 +0,0 @@ ---- -- name: Create Cloud Resources (workers, load balancer, etc) - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key | default(omit) }}" - aws_secret_key: "{{ aws_secret_key | default(omit) }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ deploy_flask_app_region | default(aws_region) }}" - - block: - - name: Set variables - ansible.builtin.set_fact: - deploy_flask_app_localhost_key_pair: "{{ deploy_flask_app_setup.add_host.host_vars.ansible_host_name }}-key" - deploy_flask_app_instance_name: "{{ deploy_flask_app_setup.add_host.host_vars.ansible_host_name }}-workers" - - - name: Create key pair to connect to the VM - amazon.aws.ec2_key: - name: "{{ deploy_flask_app_localhost_key_pair }}" - key_material: "{{ deploy_flask_app_sshkey.content | b64decode }}" - - - name: List running instances - amazon.aws.ec2_instance_info: - filters: - tag:Name: "{{ deploy_flask_app_instance_name }}" - instance-state-name: running - register: deploy_flask_app_vms - - - name: Compute number of instances to create/delete - ansible.builtin.set_fact: - deploy_flask_app_expected_instances: "{{ deploy_flask_app_number_of_workers | int - deploy_flask_app_vms.instances | length }}" - - - name: Create list of targets hosts - amazon.aws.ec2_instance: - name: "{{ deploy_flask_app_instance_name }}" - instance_type: "{{ deploy_flask_app_workers_instance_type }}" - image_id: "{{ deploy_flask_app_setup.add_host.host_vars.host_config.image_id }}" - key_name: "{{ deploy_flask_app_localhost_key_pair }}" - subnet_id: "{{ deploy_flask_app_setup.add_host.host_vars.host_config.private_subnet_id }}" - network: - assign_public_ip: false - delete_on_termination: true - groups: - - "{{ deploy_flask_app_setup.add_host.host_vars.host_config.group_id }}" - security_groups: - - "{{ deploy_flask_app_setup.add_host.host_vars.host_config.group_id }}" - wait: true - count: "{{ deploy_flask_app_expected_instances }}" - state: started - register: deploy_flask_app_workers - when: deploy_flask_app_expected_instances | int > 0 - - - name: List running instances (once again) - amazon.aws.ec2_instance_info: - filters: - tag:Name: "{{ deploy_flask_app_instance_name }}" - instance-state-name: running - register: deploy_flask_app_vms - - - name: Create list of instances (join) - ansible.builtin.set_fact: - deploy_flask_app_instances_list: [] - - name: Update join_instances - ansible.builtin.set_fact: - deploy_flask_app_instances_list: "{{ deploy_flask_app_instances_list + [item.instance_id + ':' + item.private_ip_address] }}" - with_items: "{{ deploy_flask_app_vms.instances }}" - -- name: Set variables - ansible.builtin.set_fact: - deploy_flask_app_workers_instances: "{{ deploy_flask_app_vms.instances }}" - deploy_flask_app_workers_join: "{{ deploy_flask_app_instances_list | join(',') }}" - -- name: Create inventory file - ansible.builtin.template: - src: inventory.j2 - dest: ~/inventory.ini - mode: 0644 - delegate_to: bastion - -- name: Create vars file - ansible.builtin.template: - src: vars.yaml.j2 - dest: ~/vars.yaml - mode: 0644 - delegate_to: bastion - -- name: Create private registry and store webapp container image - delegate_to: bastion - block: - - name: Clone git repository for web application - ansible.builtin.git: - repo: "{{ deploy_flask_app_git_repository }}" - dest: ~/webapp - - - name: Build webapp container image - ansible.builtin.command: - cmd: podman build -t webapp . - args: - chdir: ~/webapp - changed_when: false - - - name: Check running registry - ansible.builtin.shell: - cmd: > - podman container - ps -a - -f name=registry500x - --format=.Names - register: deploy_flask_app_container - become: true - changed_when: false - - - name: Create private registry - become: true - when: - - deploy_flask_app_container.stdout == "" - block: - - name: Create folders for the registry - ansible.builtin.file: - path: /opt/registry/{{ item }} - state: directory - mode: 0644 - with_items: - - auth - - certs - - data - - - name: Generate credentials for accessing the registry - ansible.builtin.shell: - cmd: > - htpasswd -bBc /opt/registry/auth/htpasswd - {{ deploy_flask_app_local_registry_user }} - {{ deploy_flask_app_local_registry_pwd }} - changed_when: false - - - name: Start the registry - ansible.builtin.shell: - cmd: > - podman run --name registry500x - -p {{ deploy_flask_app_listening_port }}:5000 - -v /opt/registry/data:/var/lib/registry:z - -v /opt/registry/auth:/auth:z - -e "REGISTRY_AUTH=htpasswd" - -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" - -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd - -e REGISTRY_COMPATIBILITY_SCHEMA1_ENABLED=true - -d - docker.io/library/registry:latest - changed_when: false - - - name: Push image into private registry - ansible.builtin.shell: - cmd: > - podman login 127.0.0.1:{{ deploy_flask_app_listening_port }} -u '{{ deploy_flask_app_local_registry_user }}' -p '{{ deploy_flask_app_local_registry_pwd }}' --tls-verify=false && - podman tag webapp 127.0.0.1:{{ deploy_flask_app_listening_port }}/ansible-webapp && - podman push 127.0.0.1:{{ deploy_flask_app_listening_port }}/ansible-webapp --tls-verify=false - changed_when: false - -- name: Initialize database tables - ansible.builtin.shell: - cmd: > - podman run --rm - -e FLASK_APP="{{ deploy_flask_app_config.app_dir }}" - -e FLASK_ENV="{{ deploy_flask_app_config.env }}" - -e DATABASE_HOST="{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.host }}" - -e DATABASE_INSTANCE="{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.name }}" - -e DATABASE_USER="{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.master_username }}" - -e DATABASE_PASSWORD="{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.master_user_password }}" - -e ADMIN_USER="{{ deploy_flask_app_config.admin_user }}" - -e ADMIN_PASSWORD="{{ deploy_flask_app_config.admin_password }}" - -e WORKER_HOSTNAME="{{ inventory_hostname }}" - -e WORKERS_HOSTS="bastion" - webapp flask {{ deploy_flask_app_force_init | bool | ternary('force-init-db', 'init-db') }} - run_once: true - changed_when: false - delegate_to: bastion - -- name: Copy playbook into bastion host - ansible.builtin.copy: - src: run_app.yaml - dest: ~/playbook.yaml - mode: 0644 - delegate_to: bastion - -- name: Deploy application into workers - ansible.builtin.shell: - cmd: > - ansible-playbook playbook.yaml -i inventory.ini -vvv - -e '@vars.yaml' - -e registry_host_port='{{ deploy_flask_app_setup.add_host.host_vars.host_config.private_ip }}:{{ deploy_flask_app_listening_port }}' - args: - chdir: ~/ - changed_when: false - delegate_to: bastion - -- name: Create load balancer - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key | default(omit) }}" - aws_secret_key: "{{ aws_secret_key | default(omit) }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ deploy_flask_app_region | default(aws_region) }}" - amazon.aws.elb_classic_lb: - state: present - name: "{{ deploy_flask_app_setup.add_host.host_vars.ansible_host_name }}-lb" - listeners: - - load_balancer_port: "{{ deploy_flask_app_listening_port }}" - instance_port: 5000 - protocol: HTTP - instance_protocol: HTTP - instance_ids: "{{ deploy_flask_app_vms.instances | map(attribute='instance_id') | list }}" - security_group_ids: - - "{{ deploy_flask_app_setup.add_host.host_vars.host_config.group_id }}" - subnets: - - "{{ deploy_flask_app_setup.add_host.host_vars.host_config.public_subnet_id }}" - scheme: internet-facing - wait: true - wait_timeout: 360 - retries: 5 - delay: 10 - until: deploy_flask_app_lb_result is successful - register: deploy_flask_app_lb_result - -- name: Debug application url - ansible.builtin.debug: - msg: "Application url: {{ deploy_flask_app_lb_result.elb.dns_name }}:{{ deploy_flask_app_listening_port }}" diff --git a/roles/deploy_flask_app/tasks/main.yaml b/roles/deploy_flask_app/tasks/main.yaml index 34df2040..6d796351 100644 --- a/roles/deploy_flask_app/tasks/main.yaml +++ b/roles/deploy_flask_app/tasks/main.yaml @@ -1,13 +1,31 @@ --- -- name: Deploy flask app. - module_defaults: - group/aws: "{{ aws_setup_credentials__output }}" +- name: Deploy flask application block: - - name: Create new host in inventory for use in later plays. - ansible.builtin.include_tasks: setup.yaml + - name: Create infrastructure - workers and load balancer + ansible.builtin.include_tasks: setup_infra.yaml - - name: Deploy resource from Bastion - ansible.builtin.include_tasks: bastion_setup.yaml + - name: Start application container into workers + ansible.builtin.include_tasks: start_containers.yaml - - name: Deploy App - ansible.builtin.include_tasks: deploy_app.yaml + - name: Create load balancer + amazon.aws.elb_classic_lb: + state: present + name: "{{ deploy_flask_app__resource_prefix }}-lb" + listeners: + - load_balancer_port: "{{ deploy_flask_app_listening_port }}" + instance_port: 5000 + protocol: HTTP + instance_protocol: HTTP + instance_ids: "{{ deploy_flask_app_vms.instances | map(attribute='instance_id') | list }}" + security_group_ids: + - "{{ deploy_flask_app__group_id }}" + subnets: + - "{{ deploy_flask_app__public_subnet_id }}" + scheme: internet-facing + wait: true + wait_timeout: 600 + register: deploy_flask_app_lb_result + + - name: Display application URL + ansible.builtin.debug: + msg: "Application accessible at http://{{ deploy_flask_app_lb_result.elb.dns_name }}:{{ deploy_flask_app_listening_port }}" diff --git a/roles/deploy_flask_app/tasks/setup.yaml b/roles/deploy_flask_app/tasks/setup.yaml deleted file mode 100644 index 55b68b47..00000000 --- a/roles/deploy_flask_app/tasks/setup.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- name: Set 'deploy_flask_app_region' variable - ansible.builtin.set_fact: - deploy_flask_app_region: "{{ deploy_flask_app_region | default(aws_region) }}" - -- name: Create resources playbook - block: - - name: Set 'sshkey_file' variable - ansible.builtin.set_fact: - deploy_flask_app_sshkey_file: ~/private-key-{{ deploy_flask_app_sshkey_pair_name }}-{{ deploy_flask_app_region | default(aws_region) }} - - - name: Add host to inventory - ansible.builtin.add_host: - hostname: bastion - ansible_ssh_user: "{{ deploy_flask_app_bastion_host_username }}" - ansible_host: "{{ deploy_flask_app_vm_info.instances.0.public_ip_address }}" - ansible_ssh_common_args: -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ deploy_flask_app_sshkey_file }} - ansible_python_interpreter: auto - ansible_host_name: "{{ deploy_flask_app_vm_info.instances.0.public_dns_name | split('.') | first }}" - host_config: - public_subnet_id: "{{ deploy_flask_app_vm_info.instances.0.subnet_id }}" - private_subnet_id: "{{ deploy_flask_app_private_subnet_id }}" - image_id: "{{ deploy_flask_app_vm_info.instances.0.image_id }}" - group_id: "{{ deploy_flask_app_vm_info.instances.0.security_groups[0].group_id }}" - private_ip: "{{ deploy_flask_app_vm_info.instances.0.private_ip_address }}" - vpc_id: "{{ deploy_flask_app_vpc_id }}" - rds_info: - host: "{{ deploy_flask_app_rds_info.instances.0.endpoint.address }}" - name: "{{ deploy_flask_app_rds_info.instances.0.db_name | default('mysampledb123') }}" - master_user_password: "{{ deploy_flask_app_rds_master_password | default('L#5cH2mgy_') }}" - master_username: "{{ deploy_flask_app_rds_master_username | default('ansible') }}" - register: deploy_flask_app_setup diff --git a/roles/deploy_flask_app/tasks/setup_infra.yaml b/roles/deploy_flask_app/tasks/setup_infra.yaml new file mode 100644 index 00000000..0b52bded --- /dev/null +++ b/roles/deploy_flask_app/tasks/setup_infra.yaml @@ -0,0 +1,96 @@ +--- +- name: Describe bastion instance + amazon.aws.ec2_instance_info: + instance_ids: + - "{{ deploy_flask_app_bastion_instance_id }}" + register: bastion_info + +- name: Set common variables to be used later + ansible.builtin.set_fact: + deploy_flask_app__resource_prefix: "{{ bastion_info.instances.0.public_dns_name | split('.') | first }}" + deploy_flask_app__group_id: "{{ bastion_info.instances.0.security_groups[0].group_id }}" + deploy_flask_app__vm_image_id: "{{ bastion_info.instances.0.image_id }}" + deploy_flask_app__bastion_public_ip: "{{ bastion_info.instances.0.public_ip_address }}" + deploy_flask_app__public_subnet_id: "{{ bastion_info.instances.0.subnet_id }}" + +- name: Set variable for key pair and bastion hostname + ansible.builtin.set_fact: + deploy_flask_app__workers_keypair_name: "{{ deploy_flask_app__resource_prefix }}-key" + deploy_flask_app__bastion_hostname: "{{ deploy_flask_app__resource_prefix }}-bastion" + +- name: Add bastion host to inventory + ansible.builtin.add_host: + hostname: "{{ deploy_flask_app__bastion_hostname }}" + ansible_ssh_user: "{{ deploy_flask_app_bastion_host_username }}" + ansible_host: "{{ deploy_flask_app__bastion_public_ip }}" + ansible_python_interpreter: auto + ansible_ssh_common_args: '-o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no -i {{ deploy_flask_app_bastion_ssh_private_key_path }}' + +- name: Create key pair to connect to the workers + amazon.aws.ec2_key: + name: "{{ deploy_flask_app__workers_keypair_name }}" + register: keypair_result + +- name: Save key pair content into file on bastion host + ansible.builtin.copy: + content: "{{ keypair_result.key.private_key }}" + dest: "{{ deploy_flask_app_workers_ssh_private_key }}" + mode: 0600 + when: keypair_result is changed + delegate_to: "{{ deploy_flask_app__bastion_hostname }}" + +- name: Create workers instances + block: + - name: Set variables + ansible.builtin.set_fact: + deploy_flask_app_instance_name: "{{ deploy_flask_app__resource_prefix }}-workers" + + - name: List running instances + amazon.aws.ec2_instance_info: + filters: + network-interface.subnet-id: "{{ deploy_flask_app_private_subnet_id }}" + key-name: "{{ deploy_flask_app__workers_keypair_name }}" + image-id: "{{ deploy_flask_app__vm_image_id }}" + instance-state-name: running + register: deploy_flask_app_vms + + - name: Compute number of instances to create/delete + ansible.builtin.set_fact: + deploy_flask_app_expected_instances: "{{ deploy_flask_app_number_of_workers | int - deploy_flask_app_vms.instances | length }}" + + - name: Create list of targets hosts + amazon.aws.ec2_instance: + name: "{{ deploy_flask_app_instance_name }}" + instance_type: "{{ deploy_flask_app_workers_instance_type }}" + image_id: "{{ deploy_flask_app__vm_image_id }}" + key_name: "{{ deploy_flask_app__workers_keypair_name }}" + subnet_id: "{{ deploy_flask_app_private_subnet_id }}" + network: + assign_public_ip: false + delete_on_termination: true + groups: + - "{{ deploy_flask_app__group_id }}" + security_groups: + - "{{ deploy_flask_app__group_id }}" + wait: true + count: "{{ deploy_flask_app_expected_instances }}" + state: started + register: deploy_flask_app_workers + when: deploy_flask_app_expected_instances | int > 0 + + - name: List running instances (once again) + amazon.aws.ec2_instance_info: + filters: + network-interface.subnet-id: "{{ deploy_flask_app_private_subnet_id }}" + key-name: "{{ deploy_flask_app__workers_keypair_name }}" + image-id: "{{ deploy_flask_app__vm_image_id }}" + instance-state-name: running + register: deploy_flask_app_vms + + - name: Create list of instances (join) + ansible.builtin.set_fact: + deploy_flask_app_instances: [] + - name: Update join_instances + ansible.builtin.set_fact: + deploy_flask_app_instances: "{{ deploy_flask_app_instances + [item.instance_id + ':' + item.private_ip_address] }}" + with_items: "{{ deploy_flask_app_vms.instances }}" diff --git a/roles/deploy_flask_app/tasks/start_containers.yaml b/roles/deploy_flask_app/tasks/start_containers.yaml new file mode 100644 index 00000000..9d3f1c66 --- /dev/null +++ b/roles/deploy_flask_app/tasks/start_containers.yaml @@ -0,0 +1,43 @@ +--- +- name: Configure bastion + delegate_to: "{{ deploy_flask_app__bastion_hostname }}" + block: + - name: Create ssh configuration files + ansible.builtin.file: + state: "{{ item.state }}" + path: "{{ item.path }}" + mode: '0755' + with_items: + - state: directory + path: "~/.ssh" + - state: touch + path: "~/.ssh/config" + + - name: Update local .ssh/config + ansible.builtin.blockinfile: + state: present + insertafter: EOF + dest: "~/.ssh/config" + content: "{{ lookup('template', 'bastion_ssh_config.j2') }}" + + - name: Generate workers inventory file + ansible.builtin.copy: + content: "{{ lookup('template', 'workers_inventory.yaml.j2') }}" + dest: "{{ deploy_flask_app_workers_inventory_file }}" + mode: 0755 + + - name: Generate playbook to deploy application + ansible.builtin.copy: + content: "{{ lookup('template', 'deploy_app.yaml.j2') }}" + dest: "{{ deploy_flask_app_workers_playbook_file }}" + mode: 0755 + vars: + deploy_flask_app_instances_list: "{{ deploy_flask_app_instances | join(',') }}" + deploy_flask_app_worker_hostname: "{{ '{{' }} inventory_hostname {{ '}}' }}" + + - name: Deploy application into workers + ansible.builtin.command: >- + ansible-playbook + --inventory {{ deploy_flask_app_workers_inventory_file }} + {{ deploy_flask_app_workers_playbook_file }} + -v diff --git a/roles/deploy_flask_app/templates/bastion_ssh_config.j2 b/roles/deploy_flask_app/templates/bastion_ssh_config.j2 new file mode 100644 index 00000000..97ddc59b --- /dev/null +++ b/roles/deploy_flask_app/templates/bastion_ssh_config.j2 @@ -0,0 +1,8 @@ +{% for item in deploy_flask_app_vms.instances %} +Host {{ item.instance_id }} + User {{ deploy_flask_app_workers_user_name }} + HostName {{ item.private_ip_address }} + IdentityFile {{ deploy_flask_app_workers_ssh_private_key }} + StrictHostKeyChecking no + UserKnownHostsFile /dev/null +{% endfor %} diff --git a/roles/deploy_flask_app/templates/deploy_app.yaml.j2 b/roles/deploy_flask_app/templates/deploy_app.yaml.j2 new file mode 100644 index 00000000..c1fe7d9e --- /dev/null +++ b/roles/deploy_flask_app/templates/deploy_app.yaml.j2 @@ -0,0 +1,66 @@ +--- +- name: Run app + hosts: all + gather_facts: false + strategy: free + become: true + + tasks: + - name: Update ssh_config to increase ssh session lifetime + ansible.builtin.blockinfile: + path: /etc/ssh/sshd_config + block: | + ClientAliveInterval 1200 + ClientAliveCountMax 3 + + - name: Install Podman + ansible.builtin.yum: + name: + - podman + update_cache: false + state: present + + - name: Check running container + ansible.builtin.shell: + cmd: "podman container ps -a -f name=webapp-container-1 --format=.Names" + register: container + changed_when: false + + - name: Initialize database tables + ansible.builtin.shell: + cmd: > + podman run --rm + -e FLASK_APP="{{ deploy_flask_app_config.app_dir }}" + -e FLASK_ENV="{{ deploy_flask_app_config.env }}" + -e DATABASE_HOST="{{ deploy_flask_app_rds_host }}" + -e DATABASE_INSTANCE="{{ deploy_flask_app_rds_dbname }}" + -e DATABASE_USER="{{ deploy_flask_app_rds_master_username }}" + -e DATABASE_PASSWORD="{{ deploy_flask_app_rds_master_password }}" + -e ADMIN_USER="{{ deploy_flask_app_config.admin_user }}" + -e ADMIN_PASSWORD="{{ deploy_flask_app_config.admin_password }}" + -e WORKER_HOSTNAME="{{ deploy_flask_app_worker_hostname }}" + -e WORKERS_HOSTS="{{ deploy_flask_app_instances_list }}" + {{ deploy_flask_app_container_image }} flask {{ (deploy_flask_app_force_init | bool) | ternary('force-init-db', 'init-db') }} + run_once: true + + - name: Run application instance + ansible.builtin.shell: + cmd: >- + podman run + --rm + -e FLASK_APP="{{ deploy_flask_app_config.app_dir }}" + -e FLASK_ENV="{{ deploy_flask_app_config.env }}" + -e DATABASE_HOST="{{ deploy_flask_app_rds_host }}" + -e DATABASE_INSTANCE="{{ deploy_flask_app_rds_dbname }}" + -e DATABASE_USER="{{ deploy_flask_app_rds_master_username }}" + -e DATABASE_PASSWORD="{{ deploy_flask_app_rds_master_password }}" + -e ADMIN_USER="{{ deploy_flask_app_config.admin_user }}" + -e ADMIN_PASSWORD="{{ deploy_flask_app_config.admin_password }}" + -e WORKER_HOSTNAME='{{ deploy_flask_app_worker_hostname }}' + -e WORKERS_HOSTS="{{ deploy_flask_app_instances_list }}" + -p 5000:5000 + --name webapp-container-1 + -d {{ deploy_flask_app_container_image }} + when: + - container.stdout == "" + changed_when: true diff --git a/roles/deploy_flask_app/templates/inventory.j2 b/roles/deploy_flask_app/templates/inventory.j2 deleted file mode 100644 index d5fb7eba..00000000 --- a/roles/deploy_flask_app/templates/inventory.j2 +++ /dev/null @@ -1,4 +0,0 @@ -[all] -{% for item in deploy_flask_app_workers_instances %} -{{ item.instance_id }} workers_hosts="{{ deploy_flask_app_workers_join }}" ansible_ssh_user="{{ deploy_flask_app_workers_user_name }}" ansible_ssh_common_args='-o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no' ansible_host="{{ item.private_ip_address }}" -{% endfor %} diff --git a/roles/deploy_flask_app/templates/vars.yaml.j2 b/roles/deploy_flask_app/templates/vars.yaml.j2 deleted file mode 100644 index bc6a211d..00000000 --- a/roles/deploy_flask_app/templates/vars.yaml.j2 +++ /dev/null @@ -1,15 +0,0 @@ ---- -registry_host_port: "{{ deploy_flask_app_setup.add_host.host_vars.host_config.private_ip }}:{{ deploy_flask_app_local_registry_port }}" -registry_login: - user: "{{ deploy_flask_app_local_registry_user }}" - password: "{{ deploy_flask_app_local_registry_pwd }}" -rds_listening_port: "{{ rds_listening_port }}" -application_dir: "{{ deploy_flask_app_config.app_dir }}" -application_env: "{{ deploy_flask_app_config.env }}" -application_db: - host: "{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.host }}" - instance: "{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.name }}" - dbuser_name: "{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.master_username }}" - dbuser_password: "{{ deploy_flask_app_setup.add_host.host_vars.host_config.rds_info.master_user_password }}" - admin_user: "{{ deploy_flask_app_config.admin_user }}" - admin_password: "{{ deploy_flask_app_config.admin_password }}" diff --git a/roles/deploy_flask_app/templates/workers_inventory.yaml.j2 b/roles/deploy_flask_app/templates/workers_inventory.yaml.j2 new file mode 100644 index 00000000..2e4b7331 --- /dev/null +++ b/roles/deploy_flask_app/templates/workers_inventory.yaml.j2 @@ -0,0 +1,6 @@ +all: + hosts: +{% for item in deploy_flask_app_vms.instances %} + {{ item.instance_id }}: + ansible_python_interpreter: auto +{% endfor %} diff --git a/tests/integration/targets/setup_rsa_keys/defaults/main.yml b/tests/integration/targets/setup_rsa_keys/defaults/main.yml new file mode 100644 index 00000000..7688fdce --- /dev/null +++ b/tests/integration/targets/setup_rsa_keys/defaults/main.yml @@ -0,0 +1,2 @@ +--- +setup_rsa_keys__path: "~/.ssh-{{ resource_prefix }}" diff --git a/tests/integration/targets/test_deploy_flask_app/aliases b/tests/integration/targets/test_deploy_flask_app/aliases index 931f237c..de2fdf2f 100644 --- a/tests/integration/targets/test_deploy_flask_app/aliases +++ b/tests/integration/targets/test_deploy_flask_app/aliases @@ -1,7 +1,3 @@ cloud/aws role/deploy_flask_app time=35m - -# Integration tests are broken -# fatal: [testhost -> bastion]: UNREACHABLE! -disabled \ No newline at end of file diff --git a/tests/integration/targets/test_deploy_flask_app/defaults/main.yml b/tests/integration/targets/test_deploy_flask_app/defaults/main.yml deleted file mode 100644 index 0e6574a5..00000000 --- a/tests/integration/targets/test_deploy_flask_app/defaults/main.yml +++ /dev/null @@ -1 +0,0 @@ -aws_security_token: '{{ security_token | default(omit) }}' diff --git a/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/defaults/main.yml b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/defaults/main.yml new file mode 100644 index 00000000..d0dec9fd --- /dev/null +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/defaults/main.yml @@ -0,0 +1,2 @@ +--- +run_deploy_flask_app_operation: create diff --git a/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/files/ec2-trust-policy.json b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/files/ec2-trust-policy.json new file mode 100644 index 00000000..840205bd --- /dev/null +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/files/ec2-trust-policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2008-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/targets/test_deploy_flask_app/tasks/create.yaml b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/create.yaml similarity index 62% rename from tests/integration/targets/test_deploy_flask_app/tasks/create.yaml rename to tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/create.yaml index 87ed7431..6489f236 100644 --- a/tests/integration/targets/test_deploy_flask_app/tasks/create.yaml +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/create.yaml @@ -1,8 +1,4 @@ --- -- name: Set 'region' variable - ansible.builtin.set_fact: - region: "{{ deploy_flask_app_region | default(aws_region) }}" - - name: Create resources playbook block: - name: Get image ID to create an instance @@ -70,13 +66,12 @@ state: present register: internet_gw - - name: Create NAT gateway (allow access to internet for instances in private subnet) + - name: Create NAT gateway attached to the public subnet (allow access to internet for instances in private subnet) amazon.aws.ec2_vpc_nat_gateway: subnet_id: "{{ subnet.subnet.id }}" if_exist_do_not_create: true - wait: true + wait: false # Long running tasks, the availability will checked later state: present - register: nat_gw - name: Create Route table for internet gateway (public subnet) amazon.aws.ec2_vpc_route_table: @@ -92,20 +87,6 @@ route: internet state: present - - name: Create Route table for NAT gateway (private subnet) - amazon.aws.ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - subnets: - - "{{ private_subnet.subnet.id }}" - routes: - - dest: 0.0.0.0/0 - gateway_id: "{{ nat_gw.nat_gateway_id }}" - lookup: tag - resource_tags: - subnet: private - route: nat-gateway - state: present - - name: Create security group for bastion amazon.aws.ec2_security_group: name: "{{ public_secgroup_name }}" @@ -141,77 +122,50 @@ state: present register: rds_sg - - name: Get RDS instance info - amazon.aws.rds_instance_info: + - name: Create RDS instance (PostGreSQL Database) + amazon.aws.rds_instance: + force_update_password: true + allocated_storage: "{{ rds_allocated_storage_gb }}" + backup_retention_period: 0 + db_instance_class: "{{ rds_instance_class }}" db_instance_identifier: "{{ rds_identifier }}" + db_name: "{{ rds_instance_name }}" + engine: "{{ rds_engine }}" + engine_version: "{{ rds_engine_version }}" + master_user_password: "{{ deploy_flask_app_rds_master_password }}" + master_username: "{{ deploy_flask_app_rds_master_username }}" + monitoring_interval: 0 + storage_type: standard + skip_final_snapshot: true + db_subnet_group_name: "{{ rds_subnet_group_name }}" + vpc_security_group_ids: + - "{{ rds_sg.group_id }}" + wait: true register: rds_result - - name: Create RDS instance - when: rds_result.instances | length == 0 - block: - - name: Create RDS instance (PostGreSQL Database) - amazon.aws.rds_instance: - force_update_password: true - wait: true - allocated_storage: "{{ rds_allocated_storage_gb }}" - backup_retention_period: 0 - db_instance_class: "{{ rds_instance_class }}" - db_instance_identifier: "{{ rds_identifier }}" - db_name: "{{ rds_instance_name }}" - engine: "{{ rds_engine }}" - engine_version: "{{ rds_engine_version }}" - master_user_password: "{{ deploy_flask_app_rds_master_password }}" - master_username: "{{ deploy_flask_app_rds_master_username }}" - monitoring_interval: 0 - storage_type: standard - skip_final_snapshot: true - db_subnet_group_name: "{{ rds_subnet_group_name }}" - vpc_security_group_ids: - - "{{ rds_sg.group_id }}" - when: rds_snapshot_arn is not defined - - - name: Create RDS instance from snapshot (PostGreSQL Database) - amazon.aws.rds_instance: - force_update_password: true - wait: true - allocated_storage: "{{ rds_allocated_storage_gb }}" - backup_retention_period: 0 - db_instance_class: "{{ rds_instance_class }}" - db_instance_identifier: "{{ rds_identifier }}" - engine: "{{ rds_engine }}" - engine_version: "{{ rds_engine_version }}" - master_user_password: "{{ deploy_flask_app_rds_master_password }}" - master_username: "{{ deploy_flask_app_rds_master_user }}" - monitoring_interval: 0 - storage_type: standard - skip_final_snapshot: true - db_subnet_group_name: "{{ rds_subnet_group_name }}" - vpc_security_group_ids: - - "{{ rds_sg.group_id }}" - creation_source: snapshot - db_snapshot_identifier: "{{ rds_snapshot_arn }}" - when: rds_snapshot_arn is defined - - - name: Get RDS instance info - amazon.aws.rds_instance_info: - db_instance_identifier: "{{ rds_identifier }}" - register: rds_result + # Create key pair to connect to the VM + - name: Create directory to generate keys in + ansible.builtin.file: + path: "{{ deploy_flask_app_bastion_rsa_key_dir }}" + state: directory - - name: Set 'sshkey_file' variable - ansible.builtin.set_fact: - sshkey_file: ~/private-key-{{ deploy_flask_app_sshkey_pair_name }}-{{ region | default(aws_region) }} + - name: Generate RSA keys + community.crypto.openssh_keypair: + path: "{{ deploy_flask_app_bastion_rsa_key_dir }}/id_rsa" - name: Create key pair to connect to the VM amazon.aws.ec2_key: name: "{{ deploy_flask_app_sshkey_pair_name }}" - register: rsa_key + key_material: "{{ lookup('file', deploy_flask_app_bastion_rsa_key_dir + '/id_rsa.pub') }}" - - name: Save private key into file - ansible.builtin.copy: - content: "{{ rsa_key.key.private_key }}" - dest: "{{ sshkey_file }}" - mode: 0400 - when: rsa_key is changed + - name: Ensure IAM instance role exists + amazon.aws.iam_role: + name: "{{ bastion_host_iam_role }}" + assume_role_policy_document: "{{ lookup('file', 'ec2-trust-policy.json') }}" + state: present + create_instance_profile: true + wait: true + register: role_output - name: Create a virtual machine amazon.aws.ec2_instance: @@ -220,12 +174,41 @@ image_id: "{{ images.images.0.image_id }}" key_name: "{{ deploy_flask_app_sshkey_pair_name }}" subnet_id: "{{ subnet.subnet.id }}" + ebs_optimized: true + instance_role: "{{ role_output.iam_role.role_name }}" network: assign_public_ip: true groups: - "{{ secgroup.group_id }}" security_groups: - "{{ secgroup.group_id }}" + user_data: | + #!/bin/bash + yum install -y python3 python-virtualenv sshpass netcat ansible wait: true state: started register: vm_result + + - name: Wait for the NAT gateway to be available + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + subnet-id: "{{ subnet.subnet.id }}" + state: "available" + register: nat_gateway + retries: 60 + delay: 5 + until: nat_gateway.result | length > 0 + + - name: Create Route table for NAT gateway (private subnet) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + subnets: + - "{{ private_subnet.subnet.id }}" + routes: + - dest: 0.0.0.0/0 + gateway_id: "{{ nat_gateway.result.0.nat_gateway_id }}" + lookup: tag + resource_tags: + subnet: private + route: nat-gateway + state: present diff --git a/tests/integration/targets/test_deploy_flask_app/tasks/delete.yaml b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/delete.yaml similarity index 56% rename from tests/integration/targets/test_deploy_flask_app/tasks/delete.yaml rename to tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/delete.yaml index ae621d91..73f1b384 100644 --- a/tests/integration/targets/test_deploy_flask_app/tasks/delete.yaml +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/delete.yaml @@ -15,52 +15,40 @@ ansible.builtin.set_fact: vpc_id: "{{ vpc.vpcs.0.vpc_id }}" - - name: Get bastion instance info + # Delete EC2 instances + - name: Get EC2 instance info amazon.aws.ec2_instance_info: filters: - instance-type: "{{ bastion_host_type }}" - key-name: "{{ deploy_flask_app_sshkey_pair_name }}" vpc-id: "{{ vpc_id }}" - register: bastion - - - name: Delete EC2 instances with dependant Resources - when: bastion.instances | length == 1 - block: - - name: Set 'instance_host_name' variable - ansible.builtin.set_fact: - instance_host_name: "{{ bastion.instances.0.public_dns_name | split('.') | first }}" - - - name: Delete workers key pair - amazon.aws.ec2_key: - name: "{{ instance_host_name }}-key" - state: absent - - - name: Delete load balancer - amazon.aws.elb_classic_lb: - name: "{{ instance_host_name }}-lb" - wait: true - state: absent - - - name: List workers - amazon.aws.ec2_instance_info: - filters: - tag:Name: "{{ instance_host_name }}-workers" - instance-state-name: running - register: running - - - name: Delete workers - amazon.aws.ec2_instance: - instance_ids: "{{ running.instances | map(attribute='instance_id') | list }}" - wait: true - state: terminated - - - name: Delete bastion host - amazon.aws.ec2_instance: - instance_ids: - - "{{ bastion.instances.0.instance_id }}" - wait: true - state: terminated + register: ec2_instances + - name: Delete ec2 instances from VPC + amazon.aws.ec2_instance: + instance_ids: "{{ ec2_instances.instances | map(attribute='instance_id') | list }}" + wait: true + state: terminated + when: ec2_instances.instances | length > 0 + + # Delete Load balancer + - name: List Load balancer(s) from VPC + community.aws.elb_classic_lb_info: + register: load_balancers + + - name: Delete load balancer(s) + amazon.aws.elb_classic_lb: + name: "{{ item }}" + wait: true + state: absent + with_items: "{{ load_balancers.elbs | selectattr('vpc_id', 'equalto', vpc_id) | map(attribute='load_balancer_name') | list }}" + + # Delete EC2 key pair + - name: Delete EC2 key pair + amazon.aws.ec2_key: + name: "{{ item }}" + state: absent + with_items: "{{ ec2_instances.instances | map(attribute='key_name') | unique | list }}" + + # Delete RDS information - name: Delete RDS instance amazon.aws.rds_instance: state: absent @@ -68,29 +56,12 @@ skip_final_snapshot: true wait: true - - name: Delete key pair to connect to the bastion VM - amazon.aws.ec2_key: - name: "{{ deploy_flask_app_sshkey_pair_name }}" - state: absent - - name: Delete RDS subnet group amazon.aws.rds_subnet_group: name: "{{ rds_subnet_group_name }}" state: absent - - name: List Security group from VPC - amazon.aws.ec2_security_group_info: - filters: - vpc-id: "{{ vpc_id }}" - tag:prefix: "{{ resource_prefix }}" - register: secgroups - - - name: Delete security groups - amazon.aws.ec2_security_group: - state: absent - group_id: "{{ item }}" - with_items: "{{ secgroups.security_groups | map(attribute='group_id') | list }}" - + # Delete VPC route table - name: List routes table from VPC amazon.aws.ec2_vpc_route_table_info: filters: @@ -106,6 +77,7 @@ state: absent with_items: "{{ route_table.route_tables | map(attribute='id') | list }}" + # Delete NAT Gateway - name: Get NAT gateway amazon.aws.ec2_vpc_nat_gateway_info: filters: @@ -119,23 +91,50 @@ wait: true with_items: "{{ nat_gw.result | map(attribute='nat_gateway_id') | list }}" + # Delete Internet gateway - name: Delete internet gateway amazon.aws.ec2_vpc_igw: vpc_id: "{{ vpc_id }}" state: absent + # Delete Subnets + - name: List Subnets from VPC + amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: "{{ vpc_id }}" + register: vpc_subnets + - name: Delete subnets amazon.aws.ec2_vpc_subnet: cidr: "{{ item }}" state: absent vpc_id: "{{ vpc_id }}" - with_items: "{{ subnet_cidr }}" + with_items: "{{ vpc_subnets.subnets | map(attribute='cidr_block') | list }}" + + # Delete Security groups + - name: List Security group from VPC + amazon.aws.ec2_security_group_info: + filters: + vpc-id: "{{ vpc_id }}" + register: secgroups + + - name: Delete security groups + amazon.aws.ec2_security_group: + state: absent + group_id: "{{ item }}" + with_items: "{{ secgroups.security_groups | rejectattr('group_name', 'equalto', 'default') | map(attribute='group_id') | list }}" - # As ec2_vpc_route_table can't delete route table, the vpc still has dependencies and cannot be deleted. - # You need to do it delete it manually using either the console or the cli. + # Delete VPC - name: Delete VPC amazon.aws.ec2_vpc_net: name: "{{ vpc_name }}" cidr_block: "{{ vpc_cidr }}" state: absent ignore_errors: true + + # Delete IAM Role + - name: Delete IAM role + amazon.aws.iam_role: + name: "{{ bastion_host_iam_role }}" + state: absent + wait: true diff --git a/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/main.yaml b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/main.yaml new file mode 100644 index 00000000..1cc301d7 --- /dev/null +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- name: "Run test_deploy_flask_app" + ansible.builtin.include_tasks: "validate_{{ run_deploy_flask_app_operation }}.yaml" diff --git a/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/validate_create.yaml b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/validate_create.yaml new file mode 100644 index 00000000..da946961 --- /dev/null +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/validate_create.yaml @@ -0,0 +1,23 @@ +--- +- name: Run operation create + ansible.builtin.include_tasks: "create.yaml" + +- name: Deploy resource from Bastion + ansible.builtin.include_role: + name: cloud.aws_ops.deploy_flask_app + vars: + deploy_flask_app_private_subnet_id: "{{ private_subnet.subnet.id }}" + deploy_flask_app_vpc_id: "{{ vpc.vpc.id }}" + deploy_flask_app_bastion_instance_id: "{{ vm_result.instance_ids.0 }}" + deploy_flask_app_rds_host: "{{ rds_result.endpoint.address }}" + deploy_flask_app_rds_dbname: "{{ rds_result.db_name }}" + deploy_flask_app_bastion_ssh_private_key_path: "{{ deploy_flask_app_bastion_rsa_key_dir }}/id_rsa" + +- name: Check that a page returns successfully + ansible.builtin.uri: + url: "http://{{ deploy_flask_app_lb_result.elb.dns_name }}:{{ deploy_flask_app_listening_port }}" + register: deploy_flask_app_check + until: "deploy_flask_app_check.status == 200" + retries: 200 + delay: 3 + ignore_errors: true diff --git a/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/validate_delete.yaml b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/validate_delete.yaml new file mode 100644 index 00000000..8e97433d --- /dev/null +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/tasks/validate_delete.yaml @@ -0,0 +1,3 @@ +--- +- name: Delete resources created for Flask application + ansible.builtin.include_tasks: "delete.yaml" diff --git a/tests/integration/targets/test_deploy_flask_app/vars/main.yaml b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/vars/main.yaml similarity index 75% rename from tests/integration/targets/test_deploy_flask_app/vars/main.yaml rename to tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/vars/main.yaml index 488fd0cc..64823dd2 100644 --- a/tests/integration/targets/test_deploy_flask_app/vars/main.yaml +++ b/tests/integration/targets/test_deploy_flask_app/roles/run_deploy_flask_app/vars/main.yaml @@ -22,37 +22,27 @@ rds_engine: postgres rds_engine_version: "14.8" bastion_host_type: t3.micro bastion_host_venv_path: ~/env -image_filter: Fedora-Cloud-Base-37-* +image_filter: Fedora-Cloud-Base-38-* +bastion_host_iam_role: "{{ resource_prefix }}-role" # vars for the deploy_flask_app role and create task # ================================================= deploy_flask_app_bastion_host_name: "{{ resource_prefix }}-bastion" deploy_flask_app_bastion_host_username: fedora -deploy_flask_app_bastion_host_required_packages: - - python3 - - python-virtualenv - - sshpass - - git - - podman - - httpd-tools - - ansible deploy_flask_app_sshkey_pair_name: "{{ resource_prefix }}-key" deploy_flask_app_workers_user_name: fedora deploy_flask_app_workers_instance_type: t3.micro deploy_flask_app_number_of_workers: 2 deploy_flask_app_listening_port: 5000 -deploy_flask_app_git_repository: https://github.com/abikouo/webapp_pyflask_demo.git deploy_flask_app_config: env: development admin_user: admin admin_password: admin app_dir: /app/pyapp deploy_flask_app_force_init: false -deploy_flask_app_local_registry_user: ansible -deploy_flask_app_local_registry_pwd: testing123 -deploy_flask_app_local_registry_port: "{{ deploy_flask_app_listening_port }}" deploy_flask_app_rds_master_password: L#5cH2mgy_ deploy_flask_app_rds_master_username: ansible +deploy_flask_app_bastion_rsa_key_dir: "~/.ssh-{{ resource_prefix }}" diff --git a/tests/integration/targets/test_deploy_flask_app/run.yaml b/tests/integration/targets/test_deploy_flask_app/run.yaml new file mode 100644 index 00000000..54dec912 --- /dev/null +++ b/tests/integration/targets/test_deploy_flask_app/run.yaml @@ -0,0 +1,12 @@ +- hosts: localhost + gather_facts: false + + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + roles: + - role: run_deploy_flask_app diff --git a/tests/integration/targets/test_deploy_flask_app/runme.sh b/tests/integration/targets/test_deploy_flask_app/runme.sh new file mode 100755 index 00000000..bb3f1767 --- /dev/null +++ b/tests/integration/targets/test_deploy_flask_app/runme.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -eux + +function cleanup() { + ansible-playbook run.yaml -e "run_deploy_flask_app_operation=delete" "$@" + exit 1 +} + +trap 'cleanup "${@}"' ERR + +# Create web application +ansible-playbook run.yaml -e "run_deploy_flask_app_operation=create" "$@" + +# Delete web application +ansible-playbook run.yaml -e "run_deploy_flask_app_operation=delete" "$@" \ No newline at end of file diff --git a/tests/integration/targets/test_deploy_flask_app/tasks/main.yaml b/tests/integration/targets/test_deploy_flask_app/tasks/main.yaml deleted file mode 100644 index 0b013d3d..00000000 --- a/tests/integration/targets/test_deploy_flask_app/tasks/main.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: "Run deploy_flask_app integration tests" - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ aws_security_token }}" - region: "{{ aws_region }}" - - block: - - name: Fail when 'resource_prefix' is not defined - ansible.builtin.fail: - msg: resource prefix should be defined as resource_prefix - when: resource_prefix is not defined - - - name: Run operation create - ansible.builtin.include_tasks: "create.yaml" - - - name: Deploy resource from Bastion - ansible.builtin.include_role: - name: cloud.aws_ops.deploy_flask_app - vars: - deploy_flask_app_private_subnet_id: "{{ private_subnet.subnet.id }}" - deploy_flask_app_vpc_id: "{{ vpc.vpc.id }}" - deploy_flask_app_vm_info: "{{ vm_result }}" - deploy_flask_app_rds_info: "{{ rds_result }}" - - - name: Check that a page returns successfully - ansible.builtin.uri: - url: "http://{{ deploy_flask_app_lb_result.elb.dns_name }}:{{ deploy_flask_app_listening_port }}" - register: deploy_flask_app_check - until: "deploy_flask_app_check.status == 200" - retries: 5 - delay: 10 - - always: - # Cleanup after ourselves - - name: Cleanup - ansible.builtin.include_tasks: "delete.yaml"