From 6d62d39dfc08fd6ecbd2d6767cc1c36b22600cd3 Mon Sep 17 00:00:00 2001 From: Felix Fontein Date: Mon, 22 May 2023 07:24:44 +0200 Subject: [PATCH 01/29] Switch to Ansible Galaxy compatible requirements files for tests. (#1564) Switch to Ansible Galaxy compatible requirements files for tests SUMMARY See ansible-community/community-topics#230. ISSUE TYPE Test Pull Request COMPONENT NAME test requirements files Reviewed-by: Mark Chappell --- changelogs/fragments/test-reqs.yml | 2 ++ tests/integration/requirements.yml | 4 ++++ tests/requirements.yml | 4 ---- 3 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/test-reqs.yml create mode 100644 tests/integration/requirements.yml delete mode 100644 tests/requirements.yml diff --git a/changelogs/fragments/test-reqs.yml b/changelogs/fragments/test-reqs.yml new file mode 100644 index 00000000000..3ae8d00f4de --- /dev/null +++ b/changelogs/fragments/test-reqs.yml @@ -0,0 +1,2 @@ +trivial: + - "Move Galaxy test requirements from old transitional format in tests/requirements.yml to a standard Ansible Galaxy requirements file in tests/integration/requirements.yml." diff --git a/tests/integration/requirements.yml b/tests/integration/requirements.yml new file mode 100644 index 00000000000..df4d6171dc1 --- /dev/null +++ b/tests/integration/requirements.yml @@ -0,0 +1,4 @@ +--- +collections: +- ansible.windows +- ansible.utils # ipv6 filter diff --git a/tests/requirements.yml b/tests/requirements.yml deleted file mode 100644 index 1df63b1b74d..00000000000 --- a/tests/requirements.yml +++ /dev/null @@ -1,4 +0,0 @@ -integration_tests_dependencies: -- ansible.windows -- ansible.utils # ipv6 filter -unit_tests_dependencies: [] From 3d47b2e3ad30293fce44171e8896fa570b97d17f Mon Sep 17 00:00:00 2001 From: Helen Bailey Date: Tue, 23 May 2023 04:56:31 -0400 Subject: [PATCH 02/29] elb_application_lb - fix missing attributes on create (#1563) elb_application_lb - fix missing attributes on create SUMMARY The create_or_update_alb() function didn't include all attributes when creating a new ALB. This fix just adds a call to the existing update_elb_attributes() and modify_elb_attributes() methods to ensure ALB attributes match supplied params after creating the new ALB. Fixes #1510 ISSUE TYPE Bugfix Pull Request COMPONENT NAME elb_application_lb Reviewed-by: Mark Chappell Reviewed-by: Helen Bailey Reviewed-by: Alina Buzachis --- ...pecific-attributes-not-added-on-create.yml | 2 + plugins/modules/elb_application_lb.py | 4 + .../elb_application_lb/defaults/main.yml | 18 ++ .../targets/elb_application_lb/tasks/main.yml | 204 ++++++++++++++++++ .../elb_application_lb/templates/policy.json | 13 ++ 5 files changed, 241 insertions(+) create mode 100644 changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml create mode 100644 tests/integration/targets/elb_application_lb/templates/policy.json diff --git a/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml b/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml new file mode 100644 index 00000000000..763a71e2306 --- /dev/null +++ b/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml @@ -0,0 +1,2 @@ +bugfixes: + - elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). diff --git a/plugins/modules/elb_application_lb.py b/plugins/modules/elb_application_lb.py index 7dce86c6332..ec0fec87382 100644 --- a/plugins/modules/elb_application_lb.py +++ b/plugins/modules/elb_application_lb.py @@ -614,6 +614,10 @@ def create_or_update_alb(alb_obj): alb_obj.module.exit_json(changed=True, msg="Would have created ALB if not in check mode.") alb_obj.create_elb() + # Add ALB attributes + alb_obj.update_elb_attributes() + alb_obj.modify_elb_attributes() + # Listeners listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb["LoadBalancerArn"]) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() diff --git a/tests/integration/targets/elb_application_lb/defaults/main.yml b/tests/integration/targets/elb_application_lb/defaults/main.yml index f475f05c51e..71985192471 100644 --- a/tests/integration/targets/elb_application_lb/defaults/main.yml +++ b/tests/integration/targets/elb_application_lb/defaults/main.yml @@ -2,9 +2,27 @@ resource_short: "{{ '%0.8x'%((16**8) | random(seed=resource_prefix)) }}" alb_name: alb-test-{{ resource_short }} +alb_2_name: alb-test-2-{{ resource_short }} tg_name: alb-test-{{ resource_short }} +tg_2_name: alb-test-2-{{ resource_short }} vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 private_subnet_cidr_1: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 private_subnet_cidr_2: 10.{{ 256 | random(seed=resource_prefix) }}.2.0/24 public_subnet_cidr_1: 10.{{ 256 | random(seed=resource_prefix) }}.3.0/24 public_subnet_cidr_2: 10.{{ 256 | random(seed=resource_prefix) }}.4.0/24 +s3_bucket_name: alb-test-{{ resource_short }} + +# Amazon's SDKs don't provide the list of account ID's. Amazon only provide a +# web page. If you want to run the tests outside the US regions you'll need to +# update this. +# https://docs.aws.amazon.com/elasticloadbalancing/latest/application/enable-access-logging.html +elb_access_log_account_id_map: + us-east-1: "127311923021" + us-east-2: "033677994240" + us-west-1: "027434742980" + us-west-2: "797873946194" + us-gov-east-1: "190560391635" + us-gov-west-1: "048591011584" + + +elb_account_id: '{{ elb_access_log_account_id_map[aws_region] }}' diff --git a/tests/integration/targets/elb_application_lb/tasks/main.yml b/tests/integration/targets/elb_application_lb/tasks/main.yml index a921fc2d7f6..96b7b3b2e5e 100644 --- a/tests/integration/targets/elb_application_lb/tasks/main.yml +++ b/tests/integration/targets/elb_application_lb/tasks/main.yml @@ -115,6 +115,27 @@ vpc_id: '{{ vpc_id }}' state: present register: tg + - name: Create a second target group for testing + community.aws.elb_target_group: + name: '{{ tg_2_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: present + register: tg_2 + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: Register account id + ansible.builtin.set_fact: + aws_account: "{{ aws_caller_info.account }}" + - name: Create S3 bucket for testing + amazon.aws.s3_bucket: + name: "{{ s3_bucket_name }}" + state: present + encryption: "aws:kms" + policy: "{{ lookup('template', 'policy.json') }}" + - name: Create an ALB (invalid - SslPolicy is required when Protocol == HTTPS) elb_application_lb: name: '{{ alb_name }}' @@ -270,6 +291,157 @@ # ------------------------------------------------------------------------------------------ + - name: Create an ALB with attributes - check_mode + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + check_mode: true + + - name: Verify check mode response + ansible.builtin.assert: + that: + - alb_2 is changed + - alb_2.msg is match('Would have created ALB if not in check mode.') + + - name: Create an ALB with attributes + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + idle_timeout: 120 + ip_address_type: dualstack + waf_fail_open: true + register: alb_2 + + - name: Verify ALB was created with correct attributes + ansible.builtin.assert: + that: + - alb_2 is changed + - alb_2.listeners[0].rules | length == 1 + - alb_2.security_groups | length == 1 + - alb_2.security_groups[0] == sec_group.group_id + - alb_2.ip_address_type == 'dualstack' + - alb_2.access_logs_s3_enabled | bool + - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}" + - alb_2.access_logs_s3_prefix == "alb-logs" + - not alb_2.routing_http2_enabled | bool + - alb_2.routing_http_desync_mitigation_mode == 'monitor' + - alb_2.routing_http_drop_invalid_header_fields_enabled | bool + - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb_2.routing_http_xff_client_port_enabled | bool + - alb_2.idle_timeout_timeout_seconds == "120" + - alb_2.waf_fail_open_enabled | bool + + - name: Create an ALB with attributes (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + check_mode: true + + - name: Verify idempotence check mode response + ansible.builtin.assert: + that: + - alb_2 is not changed + - alb_2.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with attributes (idempotence) + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + subnets: '{{ public_subnets }}' + security_groups: '{{ sec_group.group_id }}' + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: '{{ tg_2_name }}' + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: "alb-logs" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + + - name: Verify ALB was not changed + ansible.builtin.assert: + that: + - alb_2 is not changed + - alb_2.listeners[0].rules | length == 1 + - alb_2.security_groups | length == 1 + - alb_2.security_groups[0] == sec_group.group_id + - alb_2.ip_address_type == 'dualstack' + - alb_2.access_logs_s3_enabled | bool + - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}" + - alb_2.access_logs_s3_prefix == "alb-logs" + - not alb_2.routing_http2_enabled | bool + - alb_2.routing_http_desync_mitigation_mode == 'monitor' + - alb_2.routing_http_drop_invalid_header_fields_enabled | bool + - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb_2.routing_http_xff_client_port_enabled | bool + - alb_2.idle_timeout_timeout_seconds == "120" + - alb_2.waf_fail_open_enabled | bool + + # ------------------------------------------------------------------------------------------ + - name: Update an ALB with ip address type - check_mode elb_application_lb: name: '{{ alb_name }}' @@ -1275,6 +1447,13 @@ wait: true wait_timeout: 600 ignore_errors: true + - name: Destroy ALB 2 + amazon.aws.elb_application_lb: + name: '{{ alb_2_name }}' + state: absent + wait: true + wait_timeout: 600 + ignore_errors: true - name: Destroy target group if it was created elb_target_group: name: '{{ tg_name }}' @@ -1290,6 +1469,21 @@ until: remove_tg is success when: tg is defined ignore_errors: true + - name: Destroy target group 2 if it was created + community.aws.elb_target_group: + name: '{{ tg_2_name }}' + protocol: http + port: 80 + vpc_id: '{{ vpc_id }}' + state: absent + wait: true + wait_timeout: 600 + register: remove_tg_2 + retries: 5 + delay: 3 + until: remove_tg_2 is success + when: tg_2 is defined + ignore_errors: true - name: Destroy sec groups ec2_security_group: name: '{{ item }}' @@ -1352,3 +1546,13 @@ delay: 5 until: remove_vpc is success ignore_errors: true + - name: Destroy ELB acccess log test file + amazon.aws.s3_object: + bucket: "{{ s3_bucket_name }}" + mode: delobj + object: "alb-logs/AWSLogs/{{ aws_account }}/ELBAccessLogTestFile" + - name: Destroy S3 bucket + amazon.aws.s3_bucket: + name: "{{ s3_bucket_name }}" + state: absent + force: true diff --git a/tests/integration/targets/elb_application_lb/templates/policy.json b/tests/integration/targets/elb_application_lb/templates/policy.json new file mode 100644 index 00000000000..aa6ebf9b6fb --- /dev/null +++ b/tests/integration/targets/elb_application_lb/templates/policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ elb_account_id}}:root" + }, + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::{{ s3_bucket_name }}/alb-logs/AWSLogs/{{ aws_account }}/*" + } + ] +} From 9b65de3c26f3a768196f5c606549f81a4114d900 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 23 May 2023 16:46:55 +0200 Subject: [PATCH 03/29] Add black and github workflow rework (#1499) Manually merged (.github/) --- .github/workflows/all_green_check.yml | 42 ++++++ .github/workflows/ansible-bot.yml | 2 +- .github/workflows/changelog_and_linters.yml | 10 ++ .github/workflows/darker-pr.yml | 50 ------- .github/workflows/sanity.yml | 64 +++++++++ .github/workflows/tests.yml | 151 -------------------- .github/workflows/units.yml | 64 +++++++++ .github/workflows/update-variables.yml | 2 +- tox.ini | 4 +- 9 files changed, 183 insertions(+), 206 deletions(-) create mode 100644 .github/workflows/all_green_check.yml create mode 100644 .github/workflows/changelog_and_linters.yml delete mode 100644 .github/workflows/darker-pr.yml create mode 100644 .github/workflows/sanity.yml delete mode 100644 .github/workflows/tests.yml create mode 100644 .github/workflows/units.yml diff --git a/.github/workflows/all_green_check.yml b/.github/workflows/all_green_check.yml new file mode 100644 index 00000000000..5ea5cc71220 --- /dev/null +++ b/.github/workflows/all_green_check.yml @@ -0,0 +1,42 @@ +--- +name: all_green + +concurrency: + group: ${{ github.head_ref }} + cancel-in-progress: true + +on: # yamllint disable-line rule:truthy + pull_request: + types: + - opened + - reopened + - labeled + - unlabeled + - synchronize + branches: + - main + - 'stable-*' + tags: + - '*' + +jobs: + changelog-and-linters: + uses: ./.github/workflows/changelog_and_linters.yml # use the callable changelog-and-linters job to run tests + sanity: + uses: ./.github/workflows/sanity.yml # use the callable sanity job to run tests + units: + uses: ./.github/workflows/units.yml # use the callable units job to run tests + all_green: + if: ${{ always() }} + needs: + - changelog-and-linters + - sanity + - units + runs-on: ubuntu-latest + steps: + - run: >- + python -c "assert set([ + '${{ needs.changelog-and-linters.result }}', + '${{ needs.sanity.result }}', + '${{ needs.units.result }}' + ]) == {'success'}" diff --git a/.github/workflows/ansible-bot.yml b/.github/workflows/ansible-bot.yml index 23da46607f7..347abc738f7 100644 --- a/.github/workflows/ansible-bot.yml +++ b/.github/workflows/ansible-bot.yml @@ -14,4 +14,4 @@ jobs: steps: - uses: actions-ecosystem/action-add-labels@v1 with: - labels: needs_triage \ No newline at end of file + labels: needs_triage diff --git a/.github/workflows/changelog_and_linters.yml b/.github/workflows/changelog_and_linters.yml new file mode 100644 index 00000000000..ddd891ea294 --- /dev/null +++ b/.github/workflows/changelog_and_linters.yml @@ -0,0 +1,10 @@ +--- +name: changelog and linters + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + changelog: + uses: ansible-network/github_actions/.github/workflows/changelog.yml@main + linters: + uses: ansible-network/github_actions/.github/workflows/tox-linters.yml@main diff --git a/.github/workflows/darker-pr.yml b/.github/workflows/darker-pr.yml deleted file mode 100644 index df5d71f827a..00000000000 --- a/.github/workflows/darker-pr.yml +++ /dev/null @@ -1,50 +0,0 @@ ---- -name: 'Python formatting linter (Darker / Black)' - -on: - workflow_dispatch: - pull_request: - branches: - - main - -permissions: - contents: read - pull-requests: read - -# This allows a subsequently queued workflow run to interrupt previous runs -concurrency: - group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' - cancel-in-progress: true - -jobs: - check-darker: - runs-on: ${{ fromJSON('["ubuntu-latest", "self-hosted"]')[github.repository == 'github/docs-internal'] }} - steps: - - name: Set up Python - uses: actions/setup-python@v3 - with: - python-version: ${{ inputs.python }} - - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.sha }} - - - name: Install darker - shell: bash - run: | - pip install darker - - - name: Rebase against current base - shell: bash - run: | - git config user.email "github@example.com" - git config user.name "Git Hub Testing Rebase" - git rebase ${{ github.event.pull_request.base.sha }} - git show -s - - - name: Run darker - shell: bash - run: | - darker --check --diff --color --rev ${{ github.event.pull_request.base.sha }}.. diff --git a/.github/workflows/sanity.yml b/.github/workflows/sanity.yml new file mode 100644 index 00000000000..01bc4c9c086 --- /dev/null +++ b/.github/workflows/sanity.yml @@ -0,0 +1,64 @@ +--- +name: sanity tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + sanity: + uses: ansible-network/github_actions/.github/workflows/sanity.yml@main + with: + matrix_include: "[]" + matrix_exclude: >- + [ + { + "ansible-version": "stable-2.9" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.11" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.11" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.8" + }, + { + "ansible-version": "milestone", + "python-version": "3.7" + }, + { + "ansible-version": "milestone", + "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.7" + }, + { + "ansible-version": "devel", + "python-version": "3.8" + } + ] diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml deleted file mode 100644 index beeaec60969..00000000000 --- a/.github/workflows/tests.yml +++ /dev/null @@ -1,151 +0,0 @@ ---- -name: Test collection - -concurrency: - group: ${{ github.head_ref }} - cancel-in-progress: true - -on: # yamllint disable-line rule:truthy - pull_request: - branches: - - main - - 'stable-*' - workflow_dispatch: - -jobs: - linters: - uses: abikouo/github_actions/.github/workflows/tox-linters.yml@tox_linters - changelog: - uses: ansible-network/github_actions/.github/workflows/changelog.yml@main - sanity: - uses: ansible-network/github_actions/.github/workflows/sanity.yml@main - with: - matrix_include: "[]" - matrix_exclude: >- - [ - { - "ansible-version": "stable-2.9" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.11" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.11" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.8" - }, - { - "ansible-version": "milestone", - "python-version": "3.7" - }, - { - "ansible-version": "milestone", - "python-version": "3.8" - }, - { - "ansible-version": "devel", - "python-version": "3.7" - }, - { - "ansible-version": "devel", - "python-version": "3.8" - } - ] - unit-source: - uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main - with: - matrix_exclude: >- - [ - { - "python-version": "3.11" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.12", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.13", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.14", - "python-version": "3.8" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.7" - }, - { - "ansible-version": "stable-2.15", - "python-version": "3.8" - }, - { - "ansible-version": "milestone", - "python-version": "3.7" - }, - { - "ansible-version": "milestone", - "python-version": "3.8" - }, - { - "ansible-version": "devel", - "python-version": "3.7" - }, - { - "ansible-version": "devel", - "python-version": "3.8" - } - ] - collection_pre_install: '' - all_green: - if: ${{ always() }} - needs: - - changelog - - linters - - sanity - - unit-source - runs-on: ubuntu-latest - steps: - - run: >- - python -c "assert set([ - '${{ needs.changelog.result }}', - '${{ needs.sanity.result }}', - '${{ needs.linters.result }}', - '${{ needs.unit-source.result }}' - ]) == {'success'}" diff --git a/.github/workflows/units.yml b/.github/workflows/units.yml new file mode 100644 index 00000000000..b55028a08df --- /dev/null +++ b/.github/workflows/units.yml @@ -0,0 +1,64 @@ +--- +name: unit tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + unit-source: + uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main + with: + matrix_exclude: >- + [ + { + "python-version": "3.11" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.12", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.13", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.14", + "python-version": "3.8" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.7" + }, + { + "ansible-version": "stable-2.15", + "python-version": "3.8" + }, + { + "ansible-version": "milestone", + "python-version": "3.7" + }, + { + "ansible-version": "milestone", + "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.7" + }, + { + "ansible-version": "devel", + "python-version": "3.8" + } + ] + collection_pre_install: '' diff --git a/.github/workflows/update-variables.yml b/.github/workflows/update-variables.yml index 4c9103bed20..f92f77cc6e8 100644 --- a/.github/workflows/update-variables.yml +++ b/.github/workflows/update-variables.yml @@ -14,4 +14,4 @@ on: jobs: update-variables: - uses: abikouo/github_actions/.github/workflows/update_aws_variables.yml@automate_aws_user_agent_variable \ No newline at end of file + uses: ansible-network/github_actions/.github/workflows/update_aws_variables.yml@main diff --git a/tox.ini b/tox.ini index 08a3afeec0f..63409e54498 100644 --- a/tox.ini +++ b/tox.ini @@ -24,7 +24,7 @@ commands = coverage erase description = Generate a HTML complexity report in the complexity directory deps = # See: https://github.com/lordmauve/flake8-html/issues/30 - flake8>=3.3.0,<5.0.0' + flake8>=3.3.0,<5.0.0 flake8-html commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs:complexity} plugins @@ -48,5 +48,3 @@ show-source = True ignore = E123,E125,E203,E402,E501,E741,F401,F811,F841,W503 max-line-length = 160 builtins = _ - - From b7533c55cbfb63f136d04d3543d7ffb90d0ba7ca Mon Sep 17 00:00:00 2001 From: Bikouo Aubin <79859644+abikouo@users.noreply.github.com> Date: Tue, 30 May 2023 12:51:45 +0200 Subject: [PATCH 04/29] Fix: ec2_instance idempotency issue when attaching ENI to isntance (#1576) ec2_instance - fix idempotency issue when attaching ENI to isntance SUMMARY closes #1403 ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_instance Reviewed-by: Alina Buzachis --- .../ec2_instance-eni-attach-idempotency.yml | 3 ++ plugins/modules/ec2_instance.py | 23 +++++---- .../tasks/main.yml | 51 ++++++++++++++++++- 3 files changed, 66 insertions(+), 11 deletions(-) create mode 100644 changelogs/fragments/ec2_instance-eni-attach-idempotency.yml diff --git a/changelogs/fragments/ec2_instance-eni-attach-idempotency.yml b/changelogs/fragments/ec2_instance-eni-attach-idempotency.yml new file mode 100644 index 00000000000..ae50aa484d7 --- /dev/null +++ b/changelogs/fragments/ec2_instance-eni-attach-idempotency.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). diff --git a/plugins/modules/ec2_instance.py b/plugins/modules/ec2_instance.py index 9847e36cfc2..a70b6bf15dd 100644 --- a/plugins/modules/ec2_instance.py +++ b/plugins/modules/ec2_instance.py @@ -1510,16 +1510,19 @@ def change_network_attachments(instance, params): # network.interfaces can create the need to attach new interfaces old_ids = [inty["NetworkInterfaceId"] for inty in instance["NetworkInterfaces"]] to_attach = set(new_ids) - set(old_ids) - for eni_id in to_attach: - try: - client.attach_network_interface( - aws_retry=True, - DeviceIndex=new_ids.index(eni_id), - InstanceId=instance["InstanceId"], - NetworkInterfaceId=eni_id, - ) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Could not attach interface {eni_id} to instance {instance['InstanceId']}") + if not module.check_mode: + for eni_id in to_attach: + try: + client.attach_network_interface( + aws_retry=True, + DeviceIndex=new_ids.index(eni_id), + InstanceId=instance["InstanceId"], + NetworkInterfaceId=eni_id, + ) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg=f"Could not attach interface {eni_id} to instance {instance['InstanceId']}" + ) return bool(len(to_attach)) return False diff --git a/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml b/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml index 74943a3fdbf..7aa2c196067 100644 --- a/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml +++ b/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml @@ -60,6 +60,39 @@ - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' + - name: "Add a second interface (check_mode=true)" + ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + - id: "{{ eni_b.interface.id }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: add_interface_check_mode + check_mode: true + + - name: Validate task reported changed + assert: + that: + - add_interface_check_mode is changed + + - name: "Gather {{ resource_prefix }}-test-eni-vpc info" + ec2_instance_info: + filters: + "tag:Name": '{{ resource_prefix }}-test-eni-vpc' + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + assert: + that: + - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' + - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' + - name: "Add a second interface" ec2_instance: state: present @@ -75,9 +108,25 @@ wait: false register: add_interface until: add_interface is not failed - ignore_errors: yes + ignore_errors: true retries: 10 + - name: Validate that the instance has now 2 interfaces attached + block: + - name: "Gather {{ resource_prefix }}-test-eni-vpc info" + ec2_instance_info: + filters: + "tag:Name": '{{ resource_prefix }}-test-eni-vpc' + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + assert: + that: + - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' + - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 2' + + when: add_interface is successful + - name: "Make instance in the testing subnet created in the test VPC(check mode)" ec2_instance: state: present From f1fe603084fdf05b70a856ea137a608eb663ccb9 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 30 May 2023 15:23:31 +0200 Subject: [PATCH 05/29] ec2_metadata_facts - Handle decompression when EC2 instance user-data is compressed (#1575) ec2_metadata_facts - Handle decompression when EC2 instance user-data is compressed SUMMARY Handle decompression when user-data is compressed. The fetch_url method from ansible.module_utils.urls does not decompress the user-data because the header is missing (The API does not set 'Content-Encoding' = 'gzip'). ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_metadata_facts ADDITIONAL INFORMATION Reviewed-by: Mike Graves Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- ...data_facts-handle_compressed_user_data.yml | 3 ++ plugins/modules/ec2_metadata_facts.py | 34 ++++++++++++++++ .../modules/test_ec2_metadata_facts.py | 39 +++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100644 changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml diff --git a/changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml b/changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml new file mode 100644 index 00000000000..2abee986495 --- /dev/null +++ b/changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - 'ec2_metadata_facts - Handle decompression when EC2 instance user-data is gzip compressed. The fetch_url method from ansible.module_utils.urls does not decompress the user-data unless the header explicitly contains ``Content-Encoding: gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575).' diff --git a/plugins/modules/ec2_metadata_facts.py b/plugins/modules/ec2_metadata_facts.py index b8288d2d912..9d043bf218d 100644 --- a/plugins/modules/ec2_metadata_facts.py +++ b/plugins/modules/ec2_metadata_facts.py @@ -438,6 +438,7 @@ import re import socket import time +import zlib from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text @@ -484,11 +485,42 @@ def __init__( self._token = None self._prefix = "ansible_ec2_%s" + def _decode(self, data): + try: + return data.decode("utf-8") + except UnicodeDecodeError: + # Decoding as UTF-8 failed, return data without raising an error + self.module.warn("Decoding user-data as UTF-8 failed, return data as is ignoring any error") + return data.decode("utf-8", errors="ignore") + + def decode_user_data(self, data): + is_compressed = False + + # Check if data is compressed using zlib header + if data.startswith(b"\x78\x9c") or data.startswith(b"\x1f\x8b"): + is_compressed = True + + if is_compressed: + # Data is compressed, attempt decompression and decode using UTF-8 + try: + decompressed = zlib.decompress(data, zlib.MAX_WBITS | 32) + return self._decode(decompressed) + except zlib.error: + # Unable to decompress, return original data + self.module.warn( + "Unable to decompress user-data using zlib, attempt to decode original user-data as UTF-8" + ) + return self._decode(data) + else: + # Data is not compressed, decode using UTF-8 + return self._decode(data) + def _fetch(self, url): encoded_url = quote(url, safe="%/:=&?~#+!$,;'@()*[]") headers = {} if self._token: headers = {"X-aws-ec2-metadata-token": self._token} + response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) if info.get("status") in (401, 403): @@ -505,6 +537,8 @@ def _fetch(self, url): ) if response and info["status"] < 400: data = response.read() + if "user-data" in encoded_url: + return to_text(self.decode_user_data(data)) else: data = None return to_text(data) diff --git a/tests/unit/plugins/modules/test_ec2_metadata_facts.py b/tests/unit/plugins/modules/test_ec2_metadata_facts.py index e43b00769e3..10c4a341adc 100644 --- a/tests/unit/plugins/modules/test_ec2_metadata_facts.py +++ b/tests/unit/plugins/modules/test_ec2_metadata_facts.py @@ -1,6 +1,7 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +import gzip import io import pytest from unittest.mock import MagicMock @@ -59,3 +60,41 @@ def test_fetch_recusive(m_fetch_url, ec2_instance): ] ec2_instance.fetch("http://169.254.169.254/latest/meta-data/") assert ec2_instance._data == {"http://169.254.169.254/latest/meta-data/whatever/my-key": "my-value"} + + +@patch(module_name + ".fetch_url") +def test__fetch_user_data_compressed(m_fetch_url, ec2_instance): + user_data = b"""Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/cloud-config +Mime-Version: 1.0 + +packages: ['httpie'] + +--MIMEBOUNDARY-- +""" + + m_fetch_url.return_value = (io.BytesIO(gzip.compress(user_data)), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/user-data") == user_data.decode("utf-8") + + +@patch(module_name + ".fetch_url") +def test__fetch_user_data_plain(m_fetch_url, ec2_instance): + user_data = b"""Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/cloud-config +Mime-Version: 1.0 + +packages: ['httpie'] + +--MIMEBOUNDARY-- +""" + + m_fetch_url.return_value = (io.BytesIO(user_data), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/user-data") == user_data.decode("utf-8") From 4d74baf80ba5fd78fbe3d23a0977a69d0f3ae6f3 Mon Sep 17 00:00:00 2001 From: Matthew Davis <7035647+mdavis-xyz@users.noreply.github.com> Date: Wed, 31 May 2023 20:41:28 +1000 Subject: [PATCH 06/29] Remove unrelated snapshot comment from rds_instance state docs (#1578) Remove unrelated snapshot comment from rds_instance state docs SUMMARY The state field for rds_instance mentions snapshots. But that's irrelevant. Probably an error when copy-pasting to create the module. Fixes #1574 ISSUE TYPE Docs Pull Request COMPONENT NAME rds_instance Reviewed-by: Mark Chappell --- changelogs/fragments/1578-rds-instance-docs.yml | 2 ++ plugins/modules/rds_instance.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1578-rds-instance-docs.yml diff --git a/changelogs/fragments/1578-rds-instance-docs.yml b/changelogs/fragments/1578-rds-instance-docs.yml new file mode 100644 index 00000000000..821f8009161 --- /dev/null +++ b/changelogs/fragments/1578-rds-instance-docs.yml @@ -0,0 +1,2 @@ +trivial: + - "Remove incorrect mention of RDS snapshots in RDS instance state parameter, and reformat the remaining parameter explanation." diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index ab80af2df13..527fc6f256b 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -24,11 +24,11 @@ # General module options state: description: - - Whether the snapshot should exist or not. I(rebooted) is not idempotent and will leave the DB instance in a running state + - Desired state of the RDS Instance. + - I(state=rebooted) is not idempotent and will leave the DB instance in a running state and start it prior to rebooting if it was stopped. I(present) will leave the DB instance in the current running/stopped state, (running if creating the DB instance). - - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). Note - rebooting the instance - is not idempotent. + - I(state=running) and I(state=started) are synonyms, as are I(state=rebooted) and I(state=restarted). choices: ['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'] default: 'present' type: str From 463d58802ebc1d8d40703baa96fa1c8b42ac13f8 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Thu, 1 Jun 2023 23:44:17 +0200 Subject: [PATCH 07/29] Update aws_ec2 documentation for the use_ssm_inventory option (#1586) Update aws_ec2 documentation for the use_ssm_inventory option SUMMARY Update aws_ec2 documentation for the use_ssm_inventory option Might fix #1461 ISSUE TYPE Docs Pull Request COMPONENT NAME aws_ec2 Reviewed-by: Mark Chappell Reviewed-by: Jill R --- .../20230531-aws_ec2-use_ssm_inventory_documentation.yml | 2 ++ docs/docsite/rst/aws_ec2_guide.rst | 4 +++- plugins/inventory/aws_ec2.py | 5 ++++- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml diff --git a/changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml b/changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml new file mode 100644 index 00000000000..3b769cd3f07 --- /dev/null +++ b/changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml @@ -0,0 +1,2 @@ +trivial: + - "Update the ``aws_ec2`` inventory plugins documentation for the ``use_ssm_inventory`` option." diff --git a/docs/docsite/rst/aws_ec2_guide.rst b/docs/docsite/rst/aws_ec2_guide.rst index 6d8b9f2123d..514d8ada439 100644 --- a/docs/docsite/rst/aws_ec2_guide.rst +++ b/docs/docsite/rst/aws_ec2_guide.rst @@ -484,7 +484,9 @@ Now the output of ``ansible-inventory -i demo.aws_ec2.yml --list``: ``use_ssm_inventory`` --------------------- -``use_ssm_inventory: True`` will include SSM inventory variables into hostvars for ssm-configured instances. +``use_ssm_inventory: True`` enables fetching additional EC2 instance information from the AWS Systems Manager (SSM) inventory service into hostvars. By leveraging the SSM inventory data, the ``use_ssm_inventory`` option provides additional details and attributes about the EC2 instances in your inventory. +These details can include operating system information, installed software, network configurations, and custom inventory attributes defined in SSM. + ``cache`` --------- diff --git a/plugins/inventory/aws_ec2.py b/plugins/inventory/aws_ec2.py index 260ad03d51f..ac6df2e84b0 100644 --- a/plugins/inventory/aws_ec2.py +++ b/plugins/inventory/aws_ec2.py @@ -137,7 +137,10 @@ version_added: 3.1.0 use_ssm_inventory: description: - - Add SSM inventory information into hostvars. + - Enables fetching additional EC2 instance information from the AWS Systems Manager (SSM) inventory service into hostvars. + - By leveraging the SSM inventory data, the I(use_ssm_inventory) option provides additional details and attributes + about the EC2 instances in your inventory. These details can include operating system information, installed software, + network configurations, and custom inventory attributes defined in SSM. type: bool default: False version_added: 6.0.0 From ee68be3d7221b76e3bff2ecd047b5eabc62faf46 Mon Sep 17 00:00:00 2001 From: GomathiselviS Date: Fri, 2 Jun 2023 07:44:19 -0400 Subject: [PATCH 08/29] aws_ec2 inventory : Modify accepted type for hostnames argument in the module doc (#1582) aws_ec2 inventory : Modify accepted type for hostnames argument in the module doc SUMMARY Fixes #1460 The argument hostnames is of type list, whose elements can be string and dict. ISSUE TYPE Docs Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- changelogs/fragments/ec2-inventory-hostnames-doc.yml | 2 ++ plugins/inventory/aws_ec2.py | 12 ++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/ec2-inventory-hostnames-doc.yml diff --git a/changelogs/fragments/ec2-inventory-hostnames-doc.yml b/changelogs/fragments/ec2-inventory-hostnames-doc.yml new file mode 100644 index 00000000000..de5663282eb --- /dev/null +++ b/changelogs/fragments/ec2-inventory-hostnames-doc.yml @@ -0,0 +1,2 @@ +trivial: + - "Module documetation fix for hostnames argument. It can be list of str and dict." diff --git a/plugins/inventory/aws_ec2.py b/plugins/inventory/aws_ec2.py index ac6df2e84b0..6de182bd59e 100644 --- a/plugins/inventory/aws_ec2.py +++ b/plugins/inventory/aws_ec2.py @@ -32,16 +32,17 @@ hostnames: description: - A list in order of precedence for hostname variables. + - The elements of the list can be a dict with the keys mentioned below or a string. + - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - If value provided does not exist in the above options, it will be used as a literal string. + - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. type: list - elements: dict + elements: raw default: [] suboptions: name: description: - Name of the host. - - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). - - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. - - If value provided does not exist in the above options, it will be used as a literal string. type: str required: True prefix: @@ -776,6 +777,9 @@ def parse(self, inventory, loader, path, cache=True): use_contrib_script_compatible_ec2_tag_keys = self.get_option("use_contrib_script_compatible_ec2_tag_keys") use_ssm_inventory = self.get_option("use_ssm_inventory") + if not all(isinstance(element, (dict, str)) for element in hostnames): + self.fail_aws("Hostnames should be a list of dict and str.") + if self.get_option("include_extra_api_calls"): self.display.deprecate( "The include_extra_api_calls option has been deprecated and will be removed in release 6.0.0.", From 3d045fda5df9dd6d867c05c29982a6186d616b5a Mon Sep 17 00:00:00 2001 From: "Bryan J. Hong" Date: Tue, 6 Jun 2023 03:13:18 -0700 Subject: [PATCH 09/29] autoscaling_group_info - Fix ValidationError when describing ASGs that have more than 20 Load Balancer Target Groups attached (#1593) autoscaling_group_info - Fix ValidationError when describing ASGs that have more than 20 Load Balancer Target Groups attached SUMMARY When retrieving autoscaling group info from an ASG that has more than 20 load balancer target groups attached to it the AWS API returns a ValidationError via Ansible: "error": { "code": "ValidationError", "message": "You cannot describe more than '20' target groups at a time", "type": "Sender" }, This commit (if length of target_group_arns is greater than 20) breaks the target_group_arns list into chunks and builds the target_group_names list incrementally to avoid exceeding the AWS limit of 20. ISSUE TYPE Bugfix Pull Request COMPONENT NAME autoscaling_group_info ADDITIONAL INFORMATION Before An exception occurred during task execution. To see the full traceback, use -vvv. The error was: botocore.exceptions.ClientError: An error occurred (ValidationError) when calling the DescribeTargetGroups operation: You cannot describe more than '20' target groups at a time failed: [us-west-1] (item=['redacted', 'redacted']) => { "ansible_loop_var": "item", "boto3_version": "1.26.136", "botocore_version": "1.29.146", "changed": false, "error": { "code": "ValidationError", "message": "You cannot describe more than '20' target groups at a time", "type": "Sender" }, "item": [ "01", "proxy" ], "response_metadata": { "http_headers": { "connection": "close", "content-length": "321", "content-type": "text/xml", "date": "Fri, 02 Jun 2023 21:45:34 GMT", "x-amzn-requestid": "32205cd9-0c1c-4c8b-bf6f-0536d1251f72" }, "http_status_code": 400, "request_id": "32205cd9-0c1c-4c8b-bf6f-0536d1251f72", "retry_attempts": 0 } } MSG: Failed to describe Target Groups: An error occurred (ValidationError) when calling the DescribeTargetGroups operation: You cannot describe more than '20' target groups at a time These are the sections of output below https://docs.ansible.com/ansible/latest/collections/amazon/aws/autoscaling_group_info_module.html#return-target_group_arns https://docs.ansible.com/ansible/latest/collections/amazon/aws/autoscaling_group_info_module.html#return-target_group_names After this commit, the module is able to retrieve and return target group names vs ValidationError above. ... "target_group_arns": [ "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs0-001-pd01/54a21ec73d9567de", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs1-001-pd01/9c3c288759270055", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs10-001-pd01/1a8f765efa2b21f3", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs11-001-pd01/be79ff24e7c9dfc8", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs12-001-pd01/b0efa4c98c3707ea", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs13-001-pd01/5ba3bd3e820189e5", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs14-001-pd01/9eb427e64e64cb34", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs15-001-pd01/cd57f2fc112ef1be", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs16-001-pd01/7c96ab6cf57065d9", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs17-001-pd01/b2f2cb1f8c56fccd", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs18-001-pd01/48d5b7e958256383", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs19-001-pd01/c31c2835b325053e", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs2-001-pd01/088e22de058a5682", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs20-001-pd01/06bc6d5594c400d8", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs21-001-pd01/d2e19039fb58901a", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs22-001-pd01/3e542166d868cd51", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs23-001-pd01/915a38152472f256", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs24-001-pd01/0d490d00340a3b0c", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs25-001-pd01/4da43adf85c2c74f", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs26-001-pd01/dd6eed2cf2b3b40a", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs27-001-pd01/b6a70661615b58e5", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs28-001-pd01/9f90bb0063a399d3", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs29-001-pd01/a5de186522047a38", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs3-001-pd01/1abe1864526da4f4", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs30-001-pd01/acdd881d550190b0", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs31-001-pd01/e2b2d9f7496a1e42", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs32-001-pd01/c67d2beab9937b36", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs33-001-pd01/1a6e069d76d35129", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs34-001-pd01/7e7a041ba28d9ba1", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs35-001-pd01/7e9c6316c8bfbdcc", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs36-001-pd01/a63ef453fb2a9ffc", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs37-001-pd01/6d2b70df56a6e6b8", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs38-001-pd01/c29789ed0c5f185a", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs39-001-pd01/b2ee2dd578012057", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs4-001-pd01/6bac5695356d7a31", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs40-001-pd01/7b7a1b4110b676a0", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs41-001-pd01/69f80d314ea3998c", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs42-001-pd01/f3b542db69b6364e", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs43-001-pd01/e2af6db68a4f90c1", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs44-001-pd01/d8e30df7de550115", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs45-001-pd01/28030e72b5771b95", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs46-001-pd01/4c6558b79bea34e9", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs47-001-pd01/8de7ffe13bb7fdd8", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs48-001-pd01/02d57221242667e8", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs49-001-pd01/5847cbf10c46b9aa", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs5-001-pd01/82828f534fc4f0ba", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs6-001-pd01/c6258cfe31de8ff8", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs7-001-pd01/e665c3b1fceb2717", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs8-001-pd01/5309914c19f919cc", "arn:aws:elasticloadbalancing:us-west-1:redacted:targetgroup/tgcs9-001-pd01/bf33d7ca75fe4c37" ], "target_group_names": [ "tgcs0-001-pd01", "tgcs1-001-pd01", "tgcs10-001-pd01", "tgcs11-001-pd01", "tgcs12-001-pd01", "tgcs13-001-pd01", "tgcs14-001-pd01", "tgcs15-001-pd01", "tgcs16-001-pd01", "tgcs17-001-pd01", "tgcs18-001-pd01", "tgcs19-001-pd01", "tgcs2-001-pd01", "tgcs20-001-pd01", "tgcs21-001-pd01", "tgcs22-001-pd01", "tgcs23-001-pd01", "tgcs24-001-pd01", "tgcs25-001-pd01", "tgcs26-001-pd01", "tgcs27-001-pd01", "tgcs28-001-pd01", "tgcs29-001-pd01", "tgcs3-001-pd01", "tgcs30-001-pd01", "tgcs31-001-pd01", "tgcs32-001-pd01", "tgcs33-001-pd01", "tgcs34-001-pd01", "tgcs35-001-pd01", "tgcs36-001-pd01", "tgcs37-001-pd01", "tgcs38-001-pd01", "tgcs39-001-pd01", "tgcs4-001-pd01", "tgcs40-001-pd01", "tgcs41-001-pd01", "tgcs42-001-pd01", "tgcs43-001-pd01", "tgcs44-001-pd01", "tgcs45-001-pd01", "tgcs46-001-pd01", "tgcs47-001-pd01", "tgcs48-001-pd01", "tgcs49-001-pd01", "tgcs5-001-pd01", "tgcs6-001-pd01", "tgcs7-001-pd01", "tgcs8-001-pd01", "tgcs9-001-pd01" ], ... Reviewed-by: Mark Chappell Reviewed-by: Bryan J. Hong Reviewed-by: Alina Buzachis --- ...ling_group_info-20-target-groups-per-call.yml | 4 ++++ plugins/modules/autoscaling_group.py | 16 +++++++++++----- plugins/modules/autoscaling_group_info.py | 14 ++++++++++++-- .../roles/ec2_asg/tasks/create_update_delete.yml | 4 ++-- 4 files changed, 29 insertions(+), 9 deletions(-) create mode 100644 changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml diff --git a/changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml b/changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml new file mode 100644 index 00000000000..7a763556b80 --- /dev/null +++ b/changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml @@ -0,0 +1,4 @@ +--- +bugfixes: + - autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). diff --git a/plugins/modules/autoscaling_group.py b/plugins/modules/autoscaling_group.py index 8ac27e2e062..5ee414c89d4 100644 --- a/plugins/modules/autoscaling_group.py +++ b/plugins/modules/autoscaling_group.py @@ -901,12 +901,18 @@ def get_properties(autoscaling_group): if properties["target_group_arns"]: elbv2_connection = module.client("elbv2") tg_paginator = elbv2_connection.get_paginator("describe_target_groups") - tg_result = tg_paginator.paginate(TargetGroupArns=properties["target_group_arns"]).build_full_result() - target_groups = tg_result["TargetGroups"] + # Limit of 20 similar to https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html + tg_chunk_size = 20 + properties["target_group_names"] = [] + tg_chunks = [ + properties["target_group_arns"][i : i + tg_chunk_size] + for i in range(0, len(properties["target_group_arns"]), tg_chunk_size) + ] + for chunk in tg_chunks: + tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() + properties["target_group_names"].extend([tg["TargetGroupName"] for tg in tg_result["TargetGroups"]]) else: - target_groups = [] - - properties["target_group_names"] = [tg["TargetGroupName"] for tg in target_groups] + properties["target_group_names"] = [] return properties diff --git a/plugins/modules/autoscaling_group_info.py b/plugins/modules/autoscaling_group_info.py index 4e3fe5c5eab..eb376c001fd 100644 --- a/plugins/modules/autoscaling_group_info.py +++ b/plugins/modules/autoscaling_group_info.py @@ -414,8 +414,18 @@ def find_asgs(conn, module, name=None, tags=None): if elbv2: try: tg_paginator = elbv2.get_paginator("describe_target_groups") - tg_result = tg_paginator.paginate(TargetGroupArns=asg["target_group_arns"]).build_full_result() - asg["target_group_names"] = [tg["TargetGroupName"] for tg in tg_result["TargetGroups"]] + # Limit of 20 similar to https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html + tg_chunk_size = 20 + asg["target_group_names"] = [] + tg_chunks = [ + asg["target_group_arns"][i : i + tg_chunk_size] + for i in range(0, len(asg["target_group_arns"]), tg_chunk_size) + ] + for chunk in tg_chunks: + tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() + asg["target_group_names"].extend( + [tg["TargetGroupName"] for tg in tg_result["TargetGroups"]] + ) except is_boto3_error_code("TargetGroupNotFound"): asg["target_group_names"] = [] except ( diff --git a/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml b/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml index bea9e82163f..456973ce417 100644 --- a/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml +++ b/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml @@ -474,8 +474,8 @@ # Target group names have max length of 32 characters - set_fact: - tg1_name: "{{ (resource_prefix + '-tg1' ) | regex_search('(.{1,32})$') }}" - tg2_name: "{{ (resource_prefix + '-tg2' ) | regex_search('(.{1,32})$') }}" + tg1_name: "ansible-test-{{tiny_prefix}}-asg-t1" + tg2_name: "ansible-test-{{tiny_prefix}}-asg-t2" - name: create target group 1 elb_target_group: name: '{{ tg1_name }}' From 3926a7dc5f1afd2b56fdc60beda23c6ca64eb343 Mon Sep 17 00:00:00 2001 From: Gabor Simon Date: Tue, 6 Jun 2023 14:42:44 +0400 Subject: [PATCH 10/29] ACMServiceManager.list_certificates_with_backoff: explicit key type filter added (#1570) ACMServiceManager.list_certificates_with_backoff: explicit key type filter added SUMMARY Fixes #1567 ACM.Client.list_certificates requires explicit certificate type filter in order to return the non-RSA_2048 certificates too, and this is needed to ensure the idempotency of importing such certificates. ISSUE TYPE Bugfix Pull Request COMPONENT NAME acm Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- .../1567-list-certificate-all-key-types.yml | 2 ++ plugins/module_utils/acm.py | 15 ++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/1567-list-certificate-all-key-types.yml diff --git a/changelogs/fragments/1567-list-certificate-all-key-types.yml b/changelogs/fragments/1567-list-certificate-all-key-types.yml new file mode 100644 index 00000000000..9a595838cab --- /dev/null +++ b/changelogs/fragments/1567-list-certificate-all-key-types.yml @@ -0,0 +1,2 @@ +bugfixes: +- module_utils.acm - fixes list_certificates returning only RSA_2048 certificates (https://github.com/ansible-collections/amazon.aws/issues/1567). diff --git a/plugins/module_utils/acm.py b/plugins/module_utils/acm.py index 114232d2f80..e7fee816f9f 100644 --- a/plugins/module_utils/acm.py +++ b/plugins/module_utils/acm.py @@ -63,7 +63,20 @@ def delete_certificate_with_backoff(self, arn): @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) def list_certificates_with_backoff(self, statuses=None): paginator = self.client.get_paginator("list_certificates") - kwargs = dict() + # `list_certificates` requires explicit key type filter, or it returns only RSA_2048 certificates + kwargs = { + "Includes": { + "keyTypes": [ + "RSA_1024", + "RSA_2048", + "RSA_3072", + "RSA_4096", + "EC_prime256v1", + "EC_secp384r1", + "EC_secp521r1", + ], + }, + } if statuses: kwargs["CertificateStatuses"] = statuses return paginator.paginate(**kwargs).build_full_result()["CertificateSummaryList"] From 76fc8a0bbf55b03227c1b49f7c3a1190c8b6a569 Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 6 Jun 2023 21:10:54 +0200 Subject: [PATCH 11/29] Fix sanity (#1599) austoscaling_group* - Fix sanity errors SUMMARY Fix sanity errors - No need to backport to stable-6 or stable-5 because the back ported PRs include this fix already (#1598). ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mark Chappell --- .../fragments/20230506-autoscaling_group-fix_sanity.yml | 2 ++ plugins/modules/autoscaling_group.py | 4 ++-- plugins/modules/autoscaling_group_info.py | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelogs/fragments/20230506-autoscaling_group-fix_sanity.yml diff --git a/changelogs/fragments/20230506-autoscaling_group-fix_sanity.yml b/changelogs/fragments/20230506-autoscaling_group-fix_sanity.yml new file mode 100644 index 00000000000..4a51fd47532 --- /dev/null +++ b/changelogs/fragments/20230506-autoscaling_group-fix_sanity.yml @@ -0,0 +1,2 @@ +trivial: + - "Fix sanity errors autoscaling_group and autoscaling_group_info." diff --git a/plugins/modules/autoscaling_group.py b/plugins/modules/autoscaling_group.py index 5ee414c89d4..56389400b28 100644 --- a/plugins/modules/autoscaling_group.py +++ b/plugins/modules/autoscaling_group.py @@ -905,9 +905,9 @@ def get_properties(autoscaling_group): tg_chunk_size = 20 properties["target_group_names"] = [] tg_chunks = [ - properties["target_group_arns"][i : i + tg_chunk_size] + properties["target_group_arns"][i: i + tg_chunk_size] for i in range(0, len(properties["target_group_arns"]), tg_chunk_size) - ] + ] # fmt: skip for chunk in tg_chunks: tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() properties["target_group_names"].extend([tg["TargetGroupName"] for tg in tg_result["TargetGroups"]]) diff --git a/plugins/modules/autoscaling_group_info.py b/plugins/modules/autoscaling_group_info.py index eb376c001fd..8ec36f1bc85 100644 --- a/plugins/modules/autoscaling_group_info.py +++ b/plugins/modules/autoscaling_group_info.py @@ -418,9 +418,9 @@ def find_asgs(conn, module, name=None, tags=None): tg_chunk_size = 20 asg["target_group_names"] = [] tg_chunks = [ - asg["target_group_arns"][i : i + tg_chunk_size] + asg["target_group_arns"][i: i + tg_chunk_size] for i in range(0, len(asg["target_group_arns"]), tg_chunk_size) - ] + ] # fmt: skip for chunk in tg_chunks: tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() asg["target_group_names"].extend( From aae17c7cfdf1215a9a63b7722f0a93cefdc0a54d Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Tue, 13 Jun 2023 09:54:40 +0200 Subject: [PATCH 12/29] Prep amazon.aws 6.1.0 (#1601) (#1605) Update main branch after release 6.1.0 and 6.0.1 SUMMARY Update main branch after release 6.1.0 ISSUE TYPE Bugfix Pull Request Docs Pull Request Feature Pull Request New Module Pull Request COMPONENT NAME ADDITIONAL INFORMATION Reviewed-by: Mike Graves Reviewed-by: Helen Bailey --- CHANGELOG.rst | 45 ++++++++++++++ changelogs/changelog.yaml | 60 +++++++++++++++++++ ...e_identifier-to-create-update-instance.yml | 3 - ...rt-modifying-create-volume-permissions.yml | 3 - ...pecific-attributes-not-added-on-create.yml | 2 - ...1548-s3_object-leading-slash-deprecate.yml | 2 - .../1567-list-certificate-all-key-types.yml | 2 - .../fragments/1578-rds-instance-docs.yml | 2 - ...g_group_info-20-target-groups-per-call.yml | 4 -- ...data_facts-handle_compressed_user_data.yml | 3 - ...ws_ec2-use_ssm_inventory_documentation.yml | 2 - .../fragments/ec2-inventory-hostnames-doc.yml | 2 - .../ec2_instance-eni-attach-idempotency.yml | 3 - changelogs/fragments/test-reqs.yml | 2 - 14 files changed, 105 insertions(+), 30 deletions(-) delete mode 100644 changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml delete mode 100644 changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml delete mode 100644 changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml delete mode 100644 changelogs/fragments/1548-s3_object-leading-slash-deprecate.yml delete mode 100644 changelogs/fragments/1567-list-certificate-all-key-types.yml delete mode 100644 changelogs/fragments/1578-rds-instance-docs.yml delete mode 100644 changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml delete mode 100644 changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml delete mode 100644 changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml delete mode 100644 changelogs/fragments/ec2-inventory-hostnames-doc.yml delete mode 100644 changelogs/fragments/ec2_instance-eni-attach-idempotency.yml delete mode 100644 changelogs/fragments/test-reqs.yml diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a91805f278b..79caa61e435 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,6 +5,51 @@ amazon.aws Release Notes .. contents:: Topics +v6.1.0 +====== + +Release Summary +--------------- + +This release brings some new features, several bugfixes, and deprecated features are also included. + +Minor Changes +------------- + +- ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). +- ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). + +Deprecated Features +------------------- + +- s3_object - support for passing object keys with a leading ``/`` has been deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). + +Bugfixes +-------- + +- autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). +- ec2_metadata_facts - Handle decompression when EC2 instance user-data is gzip compressed. The fetch_url method from ansible.module_utils.urls does not decompress the user-data unless the header explicitly contains ``Content-Encoding: gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575). +- elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). +- module_utils.acm - fixes list_certificates returning only RSA_2048 certificates (https://github.com/ansible-collections/amazon.aws/issues/1567). +- rds_instance - add support for CACertificateIdentifier to create/update rds instance (https://github.com/ansible-collections/amazon.aws/pull/1459)." + +v6.0.1 +====== + +Release Summary +--------------- + +This is a patch release that includes some bug fixes for the aws_ec2 inventory plugin and the s3_bucket and s3_object modules. + +Bugfixes +-------- + +- aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). +- s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). +- s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + v6.0.0 ====== diff --git a/changelogs/changelog.yaml b/changelogs/changelog.yaml index 8ef0957eea9..3686ed2a1b6 100644 --- a/changelogs/changelog.yaml +++ b/changelogs/changelog.yaml @@ -2114,3 +2114,63 @@ releases: name: aws_collection_constants namespace: null release_date: '2023-05-09' + 6.0.1: + changes: + bugfixes: + - aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided + and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). + - s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). + - s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + release_summary: This is a patch release that includes some bug fixes for the + aws_ec2 inventory plugin and the s3_bucket and s3_object modules. + fragments: + - 1538-s3-null.yml + - 1548-s3_object-leading-slash.yml + - 1551-ec2_inventory-no-region.yml + - 1560-revert_1546.yml + - release_summary.yml + release_date: '2023-05-19' + 6.1.0: + changes: + bugfixes: + - autoscaling_group - fix ValidationError when describing an autoscaling group + that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - autoscaling_group_info - fix ValidationError when describing an autoscaling + group that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). + - 'ec2_metadata_facts - Handle decompression when EC2 instance user-data is + gzip compressed. The fetch_url method from ansible.module_utils.urls does + not decompress the user-data unless the header explicitly contains ``Content-Encoding: + gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575).' + - elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` + was including ALB-specific attributes when updating an existing ALB but not + when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). + - module_utils.acm - fixes list_certificates returning only RSA_2048 certificates + (https://github.com/ansible-collections/amazon.aws/issues/1567). + - rds_instance - add support for CACertificateIdentifier to create/update rds + instance (https://github.com/ansible-collections/amazon.aws/pull/1459)." + deprecated_features: + - s3_object - support for passing object keys with a leading ``/`` has been + deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). + minor_changes: + - ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). + - ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). + release_summary: This release brings some new features, several bugfixes, and + deprecated features are also included. + fragments: + - 1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml + - 1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml + - 1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml + - 1548-s3_object-leading-slash-deprecate.yml + - 1567-list-certificate-all-key-types.yml + - 1578-rds-instance-docs.yml + - 1593-autoscaling_group_info-20-target-groups-per-call.yml + - 20230526-ec2_mertadata_facts-handle_compressed_user_data.yml + - 20230531-aws_ec2-use_ssm_inventory_documentation.yml + - ec2-inventory-hostnames-doc.yml + - ec2_instance-eni-attach-idempotency.yml + - release_summary.yml + - test-reqs.yml + release_date: '2023-06-07' diff --git a/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml b/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml deleted file mode 100644 index 349a148abe3..00000000000 --- a/changelogs/fragments/1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: -- rds_instance - add support for CACertificateIdentifier to create/update rds instance (https://github.com/ansible-collections/amazon.aws/pull/1459)." diff --git a/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml b/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml deleted file mode 100644 index c3fe986018b..00000000000 --- a/changelogs/fragments/1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml +++ /dev/null @@ -1,3 +0,0 @@ -minor_changes: -- ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). -- ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). diff --git a/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml b/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml deleted file mode 100644 index 763a71e2306..00000000000 --- a/changelogs/fragments/1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: - - elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). diff --git a/changelogs/fragments/1548-s3_object-leading-slash-deprecate.yml b/changelogs/fragments/1548-s3_object-leading-slash-deprecate.yml deleted file mode 100644 index 8260c7af71a..00000000000 --- a/changelogs/fragments/1548-s3_object-leading-slash-deprecate.yml +++ /dev/null @@ -1,2 +0,0 @@ -deprecated_features: -- s3_object - support for passing object keys with a leading ``/`` has been deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). diff --git a/changelogs/fragments/1567-list-certificate-all-key-types.yml b/changelogs/fragments/1567-list-certificate-all-key-types.yml deleted file mode 100644 index 9a595838cab..00000000000 --- a/changelogs/fragments/1567-list-certificate-all-key-types.yml +++ /dev/null @@ -1,2 +0,0 @@ -bugfixes: -- module_utils.acm - fixes list_certificates returning only RSA_2048 certificates (https://github.com/ansible-collections/amazon.aws/issues/1567). diff --git a/changelogs/fragments/1578-rds-instance-docs.yml b/changelogs/fragments/1578-rds-instance-docs.yml deleted file mode 100644 index 821f8009161..00000000000 --- a/changelogs/fragments/1578-rds-instance-docs.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - "Remove incorrect mention of RDS snapshots in RDS instance state parameter, and reformat the remaining parameter explanation." diff --git a/changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml b/changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml deleted file mode 100644 index 7a763556b80..00000000000 --- a/changelogs/fragments/1593-autoscaling_group_info-20-target-groups-per-call.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -bugfixes: - - autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). - - autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). diff --git a/changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml b/changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml deleted file mode 100644 index 2abee986495..00000000000 --- a/changelogs/fragments/20230526-ec2_mertadata_facts-handle_compressed_user_data.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - 'ec2_metadata_facts - Handle decompression when EC2 instance user-data is gzip compressed. The fetch_url method from ansible.module_utils.urls does not decompress the user-data unless the header explicitly contains ``Content-Encoding: gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575).' diff --git a/changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml b/changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml deleted file mode 100644 index 3b769cd3f07..00000000000 --- a/changelogs/fragments/20230531-aws_ec2-use_ssm_inventory_documentation.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - "Update the ``aws_ec2`` inventory plugins documentation for the ``use_ssm_inventory`` option." diff --git a/changelogs/fragments/ec2-inventory-hostnames-doc.yml b/changelogs/fragments/ec2-inventory-hostnames-doc.yml deleted file mode 100644 index de5663282eb..00000000000 --- a/changelogs/fragments/ec2-inventory-hostnames-doc.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - "Module documetation fix for hostnames argument. It can be list of str and dict." diff --git a/changelogs/fragments/ec2_instance-eni-attach-idempotency.yml b/changelogs/fragments/ec2_instance-eni-attach-idempotency.yml deleted file mode 100644 index ae50aa484d7..00000000000 --- a/changelogs/fragments/ec2_instance-eni-attach-idempotency.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -bugfixes: - - ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). diff --git a/changelogs/fragments/test-reqs.yml b/changelogs/fragments/test-reqs.yml deleted file mode 100644 index 3ae8d00f4de..00000000000 --- a/changelogs/fragments/test-reqs.yml +++ /dev/null @@ -1,2 +0,0 @@ -trivial: - - "Move Galaxy test requirements from old transitional format in tests/requirements.yml to a standard Ansible Galaxy requirements file in tests/integration/requirements.yml." From ad953e83bfe609008d06a8a092fc910caa3493aa Mon Sep 17 00:00:00 2001 From: Helen Bailey Date: Wed, 21 Jun 2023 10:49:04 -0400 Subject: [PATCH 13/29] Backup plan params bugfix (#1611) Backup plan params bugfix SUMMARY This updates the backup_plan module to remove all None values from nested dicts in supplied params. Previously nested None values were retained and caused errors when sent through to the boto3 client calls. ISSUE TYPE Bugfix Pull Request COMPONENT NAME backup_plan ADDITIONAL INFORMATION Creating or updating a plan without providing an optional value in a nested dict would result in that value being set to None, and it wasn't being removed. For example: - name: Update Backup plan amazon.aws.backup_plan: backup_plan_name: my-backup-plan rules: - rule_name: my-rule target_backup_vault_name: my-vault schedule_expression: "cron(0 * ? * * *)" lifecycle: move_to_cold_storage_after_days: 30 The optional lifecycle.delete_after_days option in the above example would be set to None by default, and the removal of None values wasn't recursively going through the entire dict. This is now fixed and the above example will work. Reviewed-by: Alina Buzachis Reviewed-by: Helen Bailey Reviewed-by: Bikouo Aubin --- ...up_plan-remove-none-from-nested-params.yml | 3 + plugins/modules/backup_plan.py | 8 +- .../targets/backup_plan/tasks/main.yml | 129 +++++++++++++++++- 3 files changed, 135 insertions(+), 5 deletions(-) create mode 100644 changelogs/fragments/202306012-backup_plan-remove-none-from-nested-params.yml diff --git a/changelogs/fragments/202306012-backup_plan-remove-none-from-nested-params.yml b/changelogs/fragments/202306012-backup_plan-remove-none-from-nested-params.yml new file mode 100644 index 00000000000..d244b1ee3de --- /dev/null +++ b/changelogs/fragments/202306012-backup_plan-remove-none-from-nested-params.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - backup_plan - Use existing `scrub_none_values` function from module_utils to remove None values from nested dicts in supplied params. Nested None values were being retained and causing an error when sent through to the boto3 client operation (https://github.com/ansible-collections/amazon.aws/pull/1611). diff --git a/plugins/modules/backup_plan.py b/plugins/modules/backup_plan.py index 10c4d461cbc..93bb132f2d1 100644 --- a/plugins/modules/backup_plan.py +++ b/plugins/modules/backup_plan.py @@ -257,9 +257,10 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters try: from botocore.exceptions import ClientError, BotoCoreError @@ -541,10 +542,9 @@ def main(): plan_name = module.params["backup_plan_name"] plan = { "backup_plan_name": module.params["backup_plan_name"], - "rules": [{k: v for k, v in rule.items() if v is not None} for rule in module.params["rules"] or []], + "rules": [scrub_none_parameters(rule) for rule in module.params["rules"] or []], "advanced_backup_settings": [ - {k: v for k, v in setting.items() if v is not None} - for setting in module.params["advanced_backup_settings"] or [] + scrub_none_parameters(setting) for setting in module.params["advanced_backup_settings"] or [] ], } tags = module.params["tags"] diff --git a/tests/integration/targets/backup_plan/tasks/main.yml b/tests/integration/targets/backup_plan/tasks/main.yml index 082db390cd3..86e35019031 100644 --- a/tests/integration/targets/backup_plan/tasks/main.yml +++ b/tests/integration/targets/backup_plan/tasks/main.yml @@ -119,8 +119,8 @@ copy_actions: - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" lifecycle: - move_to_cold_storage_after_days: 90 delete_after_days: 300 + move_to_cold_storage_after_days: 90 tags: status: archive register: backup_plan_update_result @@ -137,6 +137,133 @@ - backup_plan_update_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules - backup_plan_update_result.tags != backup_plan_info.backup_plans[0].tags + - name: Get updated backup plan details + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Update backup plan without nested optional values in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + check_mode: true + register: check_mode_update_without_nested_optional_values_result + + - name: Verify backup plan update without nested optional values in check mode result + ansible.builtin.assert: + that: + - check_mode_update_without_nested_optional_values_result.exists is true + - check_mode_update_without_nested_optional_values_result.changed is true + - check_mode_update_without_nested_optional_values_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + + - name: Get backup plan details after update in check mode + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info_after_check_mode_update + + - name: Verify backup plan was not actually updated in check mode + ansible.builtin.assert: + that: + - backup_plan_info_after_check_mode_update.backup_plans[0] == backup_plan_info.backup_plans[0] + + - name: Update backup plan without nested optional values + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + register: update_without_nested_optional_values_result + + - name: Verify backup plan update without nested optional values result + ansible.builtin.assert: + that: + - update_without_nested_optional_values_result.exists is true + - update_without_nested_optional_values_result.changed is true + - update_without_nested_optional_values_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - update_without_nested_optional_values_result.backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - update_without_nested_optional_values_result.creation_date != backup_plan_info.backup_plans[0].creation_date + - update_without_nested_optional_values_result.version_id != backup_plan_info.backup_plans[0].version_id + - update_without_nested_optional_values_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - update_without_nested_optional_values_result.tags == backup_plan_info.backup_plans[0].tags + + - name: Get updated backup plan details + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: updated_backup_plan_info + + - name: Verify backup plan was actually updated + ansible.builtin.assert: + that: + - updated_backup_plan_info.backup_plans[0].backup_plan_name == backup_plan_info.backup_plans[0].backup_plan_name + - updated_backup_plan_info.backup_plans[0].backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - updated_backup_plan_info.backup_plans[0].version_id != backup_plan_info.backup_plans[0].version_id + - updated_backup_plan_info.backup_plans[0].backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - updated_backup_plan_info.backup_plans[0].tags == backup_plan_info.backup_plans[0].tags + + - name: Update backup plan without nested optional values - idempotency + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 * ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + register: update_without_nested_optional_values_idempotency_result + + - name: Verify backup plan update without nested optional values idempotency result + ansible.builtin.assert: + that: + - update_without_nested_optional_values_idempotency_result.exists is true + - update_without_nested_optional_values_idempotency_result.changed is false + - update_without_nested_optional_values_idempotency_result.backup_plan_id == updated_backup_plan_info.backup_plans[0].backup_plan_id + - update_without_nested_optional_values_idempotency_result.backup_plan_arn == updated_backup_plan_info.backup_plans[0].backup_plan_arn + - update_without_nested_optional_values_idempotency_result.creation_date == updated_backup_plan_info.backup_plans[0].creation_date + - update_without_nested_optional_values_idempotency_result.version_id == updated_backup_plan_info.backup_plans[0].version_id + - update_without_nested_optional_values_idempotency_result.backup_plan.rules == updated_backup_plan_info.backup_plans[0].backup_plan.rules + - update_without_nested_optional_values_idempotency_result.tags == updated_backup_plan_info.backup_plans[0].tags + - name: Delete backup plan in check mode amazon.aws.backup_plan: backup_plan_name: "{{ backup_plan_name }}" From 344dbd1a0f6cc6c2e6cfd582ba989f070001444a Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Fri, 23 Jun 2023 09:55:29 +0200 Subject: [PATCH 14/29] Refactor ARN validation code (#1619) Refactor ARN validation code SUMMARY Adds resource_id and resource_type to parse_aws_arn() return value. Adds validate_aws_arn() to handle common pattern matching for ARNs. ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_instance iam_user ADDITIONAL INFORMATION Related to ansible-collections/community.aws#1846 - We've been doing things like assuming the aws partition. Reviewed-by: Alina Buzachis --- changelogs/fragments/1846-arn-validation.yml | 5 + plugins/module_utils/arn.py | 38 +++ plugins/modules/ec2_instance.py | 5 +- plugins/modules/iam_user.py | 7 +- .../module_utils/arn/test_parse_aws_arn.py | 186 +++++++++++++-- .../module_utils/arn/test_validate_aws_arn.py | 217 ++++++++++++++++++ .../ec2_instance/test_determine_iam_role.py | 4 +- 7 files changed, 437 insertions(+), 25 deletions(-) create mode 100644 changelogs/fragments/1846-arn-validation.yml create mode 100644 tests/unit/module_utils/arn/test_validate_aws_arn.py diff --git a/changelogs/fragments/1846-arn-validation.yml b/changelogs/fragments/1846-arn-validation.yml new file mode 100644 index 00000000000..56be4417e26 --- /dev/null +++ b/changelogs/fragments/1846-arn-validation.yml @@ -0,0 +1,5 @@ +minor_changes: +- ec2_instance - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- iam_user - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - added ``validate_aws_arn`` function to handle common pattern matching for ARNs (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - add ``resource_id`` and ``resource_type`` to ``parse_aws_arn`` return values (https://github.com/ansible-collections/amazon.aws/pull/1619). diff --git a/plugins/module_utils/arn.py b/plugins/module_utils/arn.py index 22d91b3f364..d62b4c4d800 100644 --- a/plugins/module_utils/arn.py +++ b/plugins/module_utils/arn.py @@ -6,14 +6,46 @@ import re +def validate_aws_arn( + arn, partition=None, service=None, region=None, account_id=None, resource=None, resource_type=None, resource_id=None +): + details = parse_aws_arn(arn) + + if not details: + return False + + if partition and details.get("partition") != partition: + return False + if service and details.get("service") != service: + return False + if region and details.get("region") != region: + return False + if account_id and details.get("account_id") != account_id: + return False + if resource and details.get("resource") != resource: + return False + if resource_type and details.get("resource_type") != resource_type: + return False + if resource_id and details.get("resource_id") != resource_id: + return False + + return True + + def parse_aws_arn(arn): """ + Based on https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html + The following are the general formats for ARNs. arn:partition:service:region:account-id:resource-id arn:partition:service:region:account-id:resource-type/resource-id arn:partition:service:region:account-id:resource-type:resource-id The specific formats depend on the resource. The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID. + + Note: resource_type handling is very naive, for complex cases it may be necessary to use + "resource" directly instead of resource_type, this will include the resource type and full ID, + including all paths. """ m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn) if m is None: @@ -25,6 +57,12 @@ def parse_aws_arn(arn): result.update(dict(account_id=m.group(6))) result.update(dict(resource=m.group(7))) + m2 = re.search(r"^(.*?)[:/](.+)$", m.group(7)) + if m2 is None: + result.update(dict(resource_type=None, resource_id=m.group(7))) + else: + result.update(dict(resource_type=m2.group(1), resource_id=m2.group(2))) + return result diff --git a/plugins/modules/ec2_instance.py b/plugins/modules/ec2_instance.py index a70b6bf15dd..6c5a7e9f713 100644 --- a/plugins/modules/ec2_instance.py +++ b/plugins/modules/ec2_instance.py @@ -975,7 +975,7 @@ from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags @@ -1791,8 +1791,7 @@ def pretty_instance(i): def determine_iam_role(name_or_arn): - result = parse_aws_arn(name_or_arn) - if result and result["service"] == "iam" and result["resource"].startswith("instance-profile/"): + if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"): return name_or_arn iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: diff --git a/plugins/modules/iam_user.py b/plugins/modules/iam_user.py index 75d412a6bc9..f4f1483cdde 100644 --- a/plugins/modules/iam_user.py +++ b/plugins/modules/iam_user.py @@ -182,8 +182,9 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @@ -208,7 +209,7 @@ def convert_friendly_names_to_arns(connection, module, policy_names): # List comprehension that looks for any policy in the 'policy_names' list # that does not begin with 'arn'. If there aren't any, short circuit. # If there are, translate friendly name to the full arn - if not any(not policy.startswith("arn:") for policy in policy_names if policy is not None): + if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None): return policy_names allpolicies = {} paginator = connection.get_paginator("list_policies") @@ -218,7 +219,7 @@ def convert_friendly_names_to_arns(connection, module, policy_names): allpolicies[policy["PolicyName"]] = policy["Arn"] allpolicies[policy["Arn"]] = policy["Arn"] try: - return [allpolicies[policy] for policy in policy_names] + return [allpolicies[policy] for policy in policy_names if policy is not None] except KeyError as e: module.fail_json(msg="Couldn't find policy: " + str(e)) diff --git a/tests/unit/module_utils/arn/test_parse_aws_arn.py b/tests/unit/module_utils/arn/test_parse_aws_arn.py index 49375a855de..cc4b4057656 100644 --- a/tests/unit/module_utils/arn/test_parse_aws_arn.py +++ b/tests/unit/module_utils/arn/test_parse_aws_arn.py @@ -24,6 +24,8 @@ region="us-east-1", account_id="123456789012", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), dict( partition="aws-gov", @@ -31,6 +33,8 @@ region="us-gov-east-1", account_id="123456789012", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), dict( partition="aws-cn", @@ -38,6 +42,8 @@ region="us-east-1", account_id="123456789012", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), # Start the account ID with 0s, it's a 12 digit *string*, if someone treats # it as an integer the leading 0s can disappear. @@ -47,21 +53,93 @@ region="us-east-1", account_id="000123000123", resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", ), # S3 doesn't "need" region/account_id as bucket names are globally unique - dict(partition="aws", service="s3", region="", account_id="", resource="bucket/object"), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="bucket/object", + resource_type="bucket", + resource_id="object", + ), # IAM is a 'global' service, so the ARNs don't have regions - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="policy/foo/bar/PolicyName"), dict( - partition="aws", service="iam", region="", account_id="123456789012", resource="instance-profile/ExampleProfile" + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="policy/foo/bar/PolicyName", + resource_type="policy", + resource_id="foo/bar/PolicyName", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="instance-profile/ExampleProfile", + resource_type="instance-profile", + resource_id="ExampleProfile", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="root", + resource_type=None, + resource_id="root", ), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="root"), # Some examples with different regions - dict(partition="aws", service="sqs", region="eu-west-3", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="us-gov-east-1", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="sa-east-1", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="ap-northeast-2", account_id="123456789012", resource="example-queue"), - dict(partition="aws", service="sqs", region="ca-central-1", account_id="123456789012", resource="example-queue"), + dict( + partition="aws", + service="sqs", + region="eu-west-3", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="us-gov-east-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="sa-east-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="ap-northeast-2", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="ca-central-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), # Some more unusual service names dict( partition="aws", @@ -69,6 +147,8 @@ region="us-east-1", account_id="123456789012", resource="stateful-rulegroup/ExampleDomainList", + resource_type="stateful-rulegroup", + resource_id="ExampleDomainList", ), dict( partition="aws", @@ -76,6 +156,8 @@ region="us-east-1", account_id="123456789012", resource="group/group-name", + resource_type="group", + resource_id="group-name", ), # A special case for resources AWS curate dict( @@ -84,22 +166,90 @@ region="us-east-1", account_id="aws-managed", resource="stateful-rulegroup/BotNetCommandAndControlDomainsActionOrder", + resource_type="stateful-rulegroup", + resource_id="BotNetCommandAndControlDomainsActionOrder", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="aws", + resource="policy/AWSDirectConnectReadOnlyAccess", + resource_type="policy", + resource_id="AWSDirectConnectReadOnlyAccess", ), - dict(partition="aws", service="iam", region="", account_id="aws", resource="policy/AWSDirectConnectReadOnlyAccess"), # Examples merged in from test_arn.py - dict(partition="aws-us-gov", service="iam", region="", account_id="0123456789", resource="role/foo-role"), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="user/dev/*"), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="user:test"), - dict(partition="aws-cn", service="iam", region="", account_id="123456789012", resource="user:test"), - dict(partition="aws", service="iam", region="", account_id="123456789012", resource="user"), - dict(partition="aws", service="s3", region="", account_id="", resource="my_corporate_bucket/*"), - dict(partition="aws", service="s3", region="", account_id="", resource="my_corporate_bucket/Development/*"), + dict( + partition="aws-us-gov", + service="iam", + region="", + account_id="0123456789", + resource="role/foo-role", + resource_type="role", + resource_id="foo-role", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user/dev/*", + resource_type="user", + resource_id="dev/*", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user:test", + resource_type="user", + resource_id="test", + ), + dict( + partition="aws-cn", + service="iam", + region="", + account_id="123456789012", + resource="user:test", + resource_type="user", + resource_id="test", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user", + resource_type=None, + resource_id="user", + ), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="my_corporate_bucket/*", + resource_type="my_corporate_bucket", + resource_id="*", + ), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="my_corporate_bucket/Development/*", + resource_type="my_corporate_bucket", + resource_id="Development/*", + ), dict( partition="aws", service="rds", region="es-east-1", account_id="000000000000", resource="snapshot:rds:my-db-snapshot", + resource_type="snapshot", + resource_id="rds:my-db-snapshot", ), dict( partition="aws", @@ -107,6 +257,8 @@ region="us-east-1", account_id="012345678901", resource="changeSet/Ansible-StackName-c6884247ede41eb0", + resource_type="changeSet", + resource_id="Ansible-StackName-c6884247ede41eb0", ), ] diff --git a/tests/unit/module_utils/arn/test_validate_aws_arn.py b/tests/unit/module_utils/arn/test_validate_aws_arn.py new file mode 100644 index 00000000000..d730ee6372c --- /dev/null +++ b/tests/unit/module_utils/arn/test_validate_aws_arn.py @@ -0,0 +1,217 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn + +arn_test_inputs = [ + # Just test it's a valid ARN + ("arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", True, None), + # Bad ARN + ("arn:was:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", False, None), + # Individual options + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"partition": "aws"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"partition": "aws-cn"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"service": "outposts"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"service": "iam"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"region": "us-east-1"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"region": "us-east-2"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"account_id": "123456789012"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"account_id": "111111111111"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource": "outpost/op-1234567890abcdef0"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource": "outpost/op-11111111111111111"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource_type": "outpost"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource_type": "notpost"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource_id": "op-1234567890abcdef0"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource_id": "op-11111111111111111"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + True, + {"resource_type": "stateMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + False, + {"resource_type": "nopeMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + True, + {"resource_id": "HelloWorldStateMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + False, + {"resource_id": "CruelWorldStateMachine"}, + ), + # All options + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws-cn", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "iam", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-2", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "111111111111", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-11111111111111111", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "notpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-11111111111111111", + }, + ), +] + + +@pytest.mark.parametrize("arn, result, kwargs", arn_test_inputs) +def test_validate_aws_arn(arn, result, kwargs): + kwargs = kwargs or {} + assert validate_aws_arn(arn, **kwargs) == result diff --git a/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py b/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py index 66bf66acecb..0bd9a284e81 100644 --- a/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py +++ b/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py @@ -51,7 +51,7 @@ def __init__(self): @pytest.fixture def ec2_instance(monkeypatch): - monkeypatch.setattr(ec2_instance_module, "parse_aws_arn", lambda arn: None) + monkeypatch.setattr(ec2_instance_module, "validate_aws_arn", lambda arn, service, resource_type: None) monkeypatch.setattr(ec2_instance_module, "module", MagicMock()) ec2_instance_module.module.fail_json.side_effect = FailJsonException() ec2_instance_module.module.fail_json_aws.side_effect = FailJsonException() @@ -60,7 +60,7 @@ def ec2_instance(monkeypatch): def test_determine_iam_role_arn(params_object, ec2_instance, monkeypatch): # Revert the default monkey patch to make it simple to try passing a valid ARNs - monkeypatch.setattr(ec2_instance, "parse_aws_arn", utils_arn.parse_aws_arn) + monkeypatch.setattr(ec2_instance, "validate_aws_arn", utils_arn.validate_aws_arn) # Simplest example, someone passes a valid instance profile ARN arn = ec2_instance.determine_iam_role("arn:aws:iam::123456789012:instance-profile/myprofile") From a04109d26f733bbd04cf6d9d5ea8c22196cde217 Mon Sep 17 00:00:00 2001 From: Giovanni Toraldo Date: Tue, 27 Jun 2023 13:22:14 +0200 Subject: [PATCH 15/29] Fix typo in rds_instance docs for master_user_password (#1617) Fix typo in rds_instance docs for master_user_password SUMMARY Fix typo in rds_instance docs for master_user_password attribute ISSUE TYPE Docs Pull Request COMPONENT NAME rds_instance ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Alina Buzachis --- plugins/modules/rds_instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/modules/rds_instance.py b/plugins/modules/rds_instance.py index 527fc6f256b..6607f21dc9b 100644 --- a/plugins/modules/rds_instance.py +++ b/plugins/modules/rds_instance.py @@ -250,7 +250,7 @@ master_user_password: description: - An 8-41 character password for the master database user. The password can contain any printable ASCII character - except C(/), C("), or C(@). To modify the password use I(force_update_password). Use I(apply immediately) to change + except C(/), C("), or C(@). To modify the password use I(force_update_password). Use I(apply_immediately) to change the password immediately, otherwise it is updated during the next maintenance window. aliases: - password From 20a8aba8d6961da25cc7df87a8c9b424a9ed670c Mon Sep 17 00:00:00 2001 From: Helen Bailey Date: Tue, 27 Jun 2023 10:55:44 -0400 Subject: [PATCH 16/29] Backup vault tag bugfix (#1610) Backup vault tag bugfix SUMMARY Fixes an error raised in backup_vault when updating tags on an existing vault by calling the correct boto3 client methods for tagging and untagging a resource. This is not applicable when creating a vault as tags are included in the create_backup_vault method params, but when updating a vault we have to call the tag methods individually. ISSUE TYPE Bugfix Pull Request COMPONENT NAME backup_vault ADDITIONAL INFORMATION See the included new integration test task for a command that previously failed and works with the updated code. Reviewed-by: Alina Buzachis Reviewed-by: Helen Bailey Reviewed-by: Mike Graves --- .../20230612-backup_vault-fix-tag-update.yml | 3 + plugins/modules/backup_vault.py | 8 +- .../targets/backup_vault/tasks/main.yml | 180 +++++++++++++++++- 3 files changed, 183 insertions(+), 8 deletions(-) create mode 100644 changelogs/fragments/20230612-backup_vault-fix-tag-update.yml diff --git a/changelogs/fragments/20230612-backup_vault-fix-tag-update.yml b/changelogs/fragments/20230612-backup_vault-fix-tag-update.yml new file mode 100644 index 00000000000..7ed9c45ca0b --- /dev/null +++ b/changelogs/fragments/20230612-backup_vault-fix-tag-update.yml @@ -0,0 +1,3 @@ +--- +bugfixes: + - backup_vault - fix error when updating tags on a backup vault by using the correct boto3 client methods for tagging and untagging backup resources (https://github.com/ansible-collections/amazon.aws/pull/1610). diff --git a/plugins/modules/backup_vault.py b/plugins/modules/backup_vault.py index 8d7452431ef..5ae309d1059 100644 --- a/plugins/modules/backup_vault.py +++ b/plugins/modules/backup_vault.py @@ -93,7 +93,6 @@ from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict @@ -150,17 +149,14 @@ def tag_vault(module, client, tags, vault_arn, curr_tags=None, purge_tags=True): return True if tags_to_remove: - remove = {k: curr_tags[k] for k in tags_to_remove} - tags_to_remove = ansible_dict_to_boto3_tag_list(remove) try: - client.remove_tags(ResourceId=vault_arn, TagsList=tags_to_remove) + client.untag_resource(ResourceArn=vault_arn, TagKeyList=tags_to_remove) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to remove tags from the vault") if tags_to_add: - tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_add) try: - client.add_tags(ResourceId=vault_arn, TagsList=tags_to_add) + client.tag_resource(ResourceArn=vault_arn, Tags=tags_to_add) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to add tags to Vault") diff --git a/tests/integration/targets/backup_vault/tasks/main.yml b/tests/integration/targets/backup_vault/tasks/main.yml index 9397c60afc6..24f27b7d597 100644 --- a/tests/integration/targets/backup_vault/tasks/main.yml +++ b/tests/integration/targets/backup_vault/tasks/main.yml @@ -28,6 +28,7 @@ - backup_vault_result_check is changed - backup_vault_result_check.vault.backup_vault_name == backup_vault_name - backup_vault_result_check.vault.encryption_key_arn == "" + - backup_vault_result_check.vault.tags.environment == "dev" - name: Create an AWS Backup Vault amazon.aws.backup_vault: @@ -42,7 +43,8 @@ - backup_vault_result is changed - backup_vault_result.vault.backup_vault_name == backup_vault_name - backup_vault_result.vault.encryption_key_arn == key.key_arn - + - backup_vault_result.vault.tags.environment == "dev" + - name: Get backup vault info by passing the vault name amazon.aws.backup_vault_info: backup_vault_names: @@ -53,6 +55,7 @@ that: - vault_info.backup_vaults[0].backup_vault_name == backup_vault_result.vault.backup_vault_name - vault_info.backup_vaults[0].backup_vault_arn == backup_vault_result.vault.backup_vault_arn + - vault_info.backup_vaults[0].tags.environment == "dev" - name: Create an AWS Backup Vault - idempotency check amazon.aws.backup_vault: @@ -67,7 +70,180 @@ - backup_vault_result_idem is not changed - backup_vault_result_idem.vault.backup_vault_name == backup_vault_name - backup_vault_result_idem.vault.encryption_key_arn == key.key_arn - + - backup_vault_result_idem.vault.tags.environment == "dev" + + - name: Update AWS Backup Vault - check mode + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + check_mode: true + register: backup_vault_update_check_mode_result + + - name: Verify check mode update result + assert: + that: + - backup_vault_update_check_mode_result is changed + - backup_vault_update_check_mode_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_check_mode_result.vault.encryption_key_arn == key.key_arn + - backup_vault_update_check_mode_result.vault.tags.environment == "dev" + - backup_vault_update_check_mode_result.vault.tags.owner == "ansible" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: update_check_mode_vault_info + + - name: Verify backup vault was not updated in check mode + ansible.builtin.assert: + that: + - update_check_mode_vault_info.backup_vaults[0].backup_vault_name == vault_info.backup_vaults[0].backup_vault_name + - update_check_mode_vault_info.backup_vaults[0].encryption_key_arn == vault_info.backup_vaults[0].encryption_key_arn + - update_check_mode_vault_info.backup_vaults[0].backup_vault_arn == vault_info.backup_vaults[0].backup_vault_arn + - update_check_mode_vault_info.backup_vaults[0].tags == vault_info.backup_vaults[0].tags + + - name: Update AWS Backup Vault + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + register: backup_vault_update_result + + - name: Verify update result + ansible.builtin.assert: + that: + - backup_vault_update_result is changed + - backup_vault_update_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_result.vault.encryption_key_arn == key.key_arn + - backup_vault_update_result.vault.tags.environment == "dev" + - backup_vault_update_check_mode_result.vault.tags.owner == "ansible" + + - name: Get updated backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_vault_info + + - name: Verify backup vault was updated + ansible.builtin.assert: + that: + - updated_vault_info.backup_vaults[0].backup_vault_name == vault_info.backup_vaults[0].backup_vault_name + - updated_vault_info.backup_vaults[0].backup_vault_arn == vault_info.backup_vaults[0].backup_vault_arn + - updated_vault_info.backup_vaults[0].encryption_key_arn == vault_info.backup_vaults[0].encryption_key_arn + - updated_vault_info.backup_vaults[0].tags != vault_info.backup_vaults[0].tags + + - name: Update AWS Backup Vault - idempotency + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + register: backup_vault_update_idempotency_result + + - name: Verify idempotency update result + ansible.builtin.assert: + that: + - backup_vault_update_idempotency_result is not changed + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_vault_info_idempotency + + - name: Verify backup vault was not updated + ansible.builtin.assert: + that: + - updated_vault_info_idempotency.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - updated_vault_info_idempotency.backup_vaults[0].backup_vault_arn == updated_vault_info.backup_vaults[0].backup_vault_arn + - updated_vault_info_idempotency.backup_vaults[0].encryption_key_arn == updated_vault_info.backup_vaults[0].encryption_key_arn + - updated_vault_info_idempotency.backup_vaults[0].tags == updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge - check mode + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + check_mode: true + register: backup_vault_update_tags_check_mode_result + + - name: Verify check mode tag update result + ansible.builtin.assert: + that: + - backup_vault_update_tags_check_mode_result is changed + - backup_vault_update_tags_check_mode_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_tags_check_mode_result.vault.tags | length == 1 + - backup_vault_update_tags_check_mode_result.vault.tags.environment == "test" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: update_tags_check_mode_info + + - name: Verify backup vault tags were not updated in check mode + ansible.builtin.assert: + that: + - update_tags_check_mode_info.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - update_tags_check_mode_info.backup_vaults[0].tags == updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + register: backup_vault_update_tags_result + + - name: Verify update tags with purge result + ansible.builtin.assert: + that: + - backup_vault_update_tags_result is changed + - backup_vault_update_tags_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_tags_result.vault.tags | length == 1 + - backup_vault_update_tags_result.vault.tags.environment == "test" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_tags_info + + - name: Verify backup vault tags were updated + ansible.builtin.assert: + that: + - updated_tags_info.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - updated_tags_info.backup_vaults[0].tags != updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge - idempotency + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + register: backup_vault_update_tags_idempotency_result + + - name: Verify update tags with purge idempotency result + ansible.builtin.assert: + that: + - backup_vault_update_tags_idempotency_result is not changed + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_tags_idempotency_info + + - name: Verify no changes were made + ansible.builtin.assert: + that: + - updated_tags_idempotency_info.backup_vaults[0].backup_vault_name == updated_tags_info.backup_vaults[0].backup_vault_name + - updated_tags_idempotency_info.backup_vaults[0].tags == updated_tags_info.backup_vaults[0].tags + always: - name: Delete AWS Backup Vault created during this test amazon.aws.backup_vault: From 793b2d1ca0407b0a4367f788d0db0ab765fc6485 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 27 Jun 2023 18:42:57 +0200 Subject: [PATCH 17/29] CI test fixups (#1625) CI test fixups SUMMARY ansible/ansible milestone branch just got updated and introduced some breaking changes for us ISSUE TYPE Bugfix Pull Request COMPONENT NAME ec2_security_group kms_key lambda ADDITIONAL INFORMATION Reviewed-by: Mark Chappell Reviewed-by: Mike Graves --- changelogs/fragments/20230627-ci-fixup.yml | 3 ++ .../targets/ec2_security_group/tasks/main.yml | 20 ++++----- .../kms_key/roles/aws_kms/tasks/main.yml | 2 +- .../integration/targets/lambda/tasks/main.yml | 42 ++++++++++--------- 4 files changed, 37 insertions(+), 30 deletions(-) create mode 100644 changelogs/fragments/20230627-ci-fixup.yml diff --git a/changelogs/fragments/20230627-ci-fixup.yml b/changelogs/fragments/20230627-ci-fixup.yml new file mode 100644 index 00000000000..160eb26b459 --- /dev/null +++ b/changelogs/fragments/20230627-ci-fixup.yml @@ -0,0 +1,3 @@ +trivial: +- CI fixup - ``include:`` should be ``include_tasks:``. +- CI fixup - YAML ``Null`` no longer matches any explicit parameter type requirements. diff --git a/tests/integration/targets/ec2_security_group/tasks/main.yml b/tests/integration/targets/ec2_security_group/tasks/main.yml index c4db855d2b7..0f1b5fe433f 100644 --- a/tests/integration/targets/ec2_security_group/tasks/main.yml +++ b/tests/integration/targets/ec2_security_group/tasks/main.yml @@ -32,15 +32,15 @@ Description: "Created by ansible-test" register: vpc_result #TODO(ryansb): Update CI for VPC peering permissions - #- include: ./multi_account.yml - - include: ./diff_mode.yml - - include: ./numeric_protos.yml - - include: ./rule_group_create.yml - - include: ./egress_tests.yml - - include: ./icmp_verbs.yml - - include: ./data_validation.yml - - include: ./multi_nested_target.yml - - include: ./group_info.yml + #- include_tasks: ./multi_account.yml + - include_tasks: ./diff_mode.yml + - include_tasks: ./numeric_protos.yml + - include_tasks: ./rule_group_create.yml + - include_tasks: ./egress_tests.yml + - include_tasks: ./icmp_verbs.yml + - include_tasks: ./data_validation.yml + - include_tasks: ./multi_nested_target.yml + - include_tasks: ./group_info.yml # ============================================================ - name: test state=absent (CHECK MODE) @@ -137,7 +137,7 @@ # ============================================================ - name: tests IPv6 with the default VPC - include: ./ipv6_default_tests.yml + include_tasks: ./ipv6_default_tests.yml when: default_vpc - name: test IPv6 with a specified VPC diff --git a/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml b/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml index 98b6ba937ae..e411eac615f 100644 --- a/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml +++ b/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml @@ -8,4 +8,4 @@ security_token: '{{ security_token | default(omit) }}' region: '{{ aws_region }}' block: - - include: ./test_{{ inventory_hostname }}.yml + - include_tasks: ./test_{{ inventory_hostname }}.yml diff --git a/tests/integration/targets/lambda/tasks/main.yml b/tests/integration/targets/lambda/tasks/main.yml index 58a0d7d4ff4..2fcadb1c42c 100644 --- a/tests/integration/targets/lambda/tasks/main.yml +++ b/tests/integration/targets/lambda/tasks/main.yml @@ -469,25 +469,29 @@ - lambda_infos_mappings.functions[0].policy is undefined - lambda_infos_mappings.functions[0].tags is undefined - # More Lambda update tests - - name: test state=present with all nullable variables explicitly set to null - lambda: - name: '{{lambda_function_name}}' - runtime: '{{ lambda_python_runtime }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - handler: '{{ lambda_python_handler }}' - description: - vpc_subnet_ids: - vpc_security_group_ids: - environment_variables: - dead_letter_arn: - register: result - - name: assert lambda remains as before - assert: - that: - - result is not failed - - result.changed == False + # 2023-06-27 + # An explicit "None" is no longer permitted by Ansible for parameters with a type other than "raw" + # (None is still the implicit value for "not set") + # + # # More Lambda update tests + # - name: test state=present with all nullable variables explicitly set to null + # lambda: + # name: '{{lambda_function_name}}' + # runtime: '{{ lambda_python_runtime }}' + # role: '{{ lambda_role_name }}' + # zip_file: '{{zip_res.dest}}' + # handler: '{{ lambda_python_handler }}' + # description: + # vpc_subnet_ids: + # vpc_security_group_ids: + # environment_variables: + # dead_letter_arn: + # register: result + # - name: assert lambda remains as before + # assert: + # that: + # - result is not failed + # - result.changed == False - name: test putting an environment variable changes lambda (check mode) lambda: From 271523ea97f73f0e4c5a027eac4f4fc72f4b7093 Mon Sep 17 00:00:00 2001 From: Taeho Park <113317744+taehopark32@users.noreply.github.com> Date: Tue, 27 Jun 2023 13:26:29 -0400 Subject: [PATCH 18/29] Lambda execute stack trace should be not formatted with extra space (#1615) Lambda execute stack trace should be not formatted with extra space SUMMARY Fixes #1497 ISSUE TYPE Bugfix Pull Request COMPONENT NAME plugins/modules/lambda_execute.py ADDITIONAL INFORMATION Reviewed-by: Jill R Reviewed-by: Bikouo Aubin Reviewed-by: Alina Buzachis Reviewed-by: Mike Graves --- .../fragments/1615-no_formatted_with_extra_space.yml | 3 +++ plugins/modules/lambda_execute.py | 8 ++------ 2 files changed, 5 insertions(+), 6 deletions(-) create mode 100644 changelogs/fragments/1615-no_formatted_with_extra_space.yml diff --git a/changelogs/fragments/1615-no_formatted_with_extra_space.yml b/changelogs/fragments/1615-no_formatted_with_extra_space.yml new file mode 100644 index 00000000000..693362b6950 --- /dev/null +++ b/changelogs/fragments/1615-no_formatted_with_extra_space.yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- Fixes to the stack trace output, where it does not contain spaces between each character. The module had incorrectly always outputted extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) \ No newline at end of file diff --git a/plugins/modules/lambda_execute.py b/plugins/modules/lambda_execute.py index 7f4b7aea109..10d38d1f59d 100644 --- a/plugins/modules/lambda_execute.py +++ b/plugins/modules/lambda_execute.py @@ -262,14 +262,10 @@ def main(): "Function executed, but there was an error in the Lambda function. " "Message: {errmsg}, Type: {type}, Stack Trace: {trace}" ) + error_data = { # format the stacktrace sent back as an array into a multiline string - "trace": "\n".join( - [ - " ".join([str(x) for x in line]) # cast line numbers to strings - for line in results.get("output", {}).get("stackTrace", []) - ] - ), + "trace": "\n".join(results.get("output", {}).get("stackTrace", [])), "errmsg": results["output"].get("errorMessage"), "type": results["output"].get("errorType"), } From 6a1f875ef2009578a15cfe39df4af65f6c078746 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Tue, 27 Jun 2023 23:03:09 +0200 Subject: [PATCH 19/29] New modules: iam_instance_profile(_info) (#1614) New modules: iam_instance_profile(_info) SUMMARY New modules for listing/managing IAM Instance Profiles ISSUE TYPE New Module Pull Request COMPONENT NAME iam_instance_profile iam_instance_profile_info ADDITIONAL INFORMATION Fixes: ansible-collections/community.aws#1842 Reviewed-by: Alina Buzachis Reviewed-by: Mark Chappell --- .../fragments/1843-iam_instance_profile.yml | 2 + meta/runtime.yml | 2 + plugins/module_utils/iam.py | 210 +++++++- plugins/modules/iam_instance_profile.py | 343 ++++++++++++ plugins/modules/iam_instance_profile_info.py | 132 +++++ .../targets/iam_instance_profile/aliases | 3 + .../iam_instance_profile/defaults/main.yml | 12 + .../files/deny-assume.json | 10 + .../iam_instance_profile/meta/main.yml | 1 + .../iam_instance_profile/tasks/main.yml | 510 ++++++++++++++++++ .../iam_instance_profile/tasks/tags.yml | 298 ++++++++++ 11 files changed, 1520 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1843-iam_instance_profile.yml create mode 100644 plugins/modules/iam_instance_profile.py create mode 100644 plugins/modules/iam_instance_profile_info.py create mode 100644 tests/integration/targets/iam_instance_profile/aliases create mode 100644 tests/integration/targets/iam_instance_profile/defaults/main.yml create mode 100644 tests/integration/targets/iam_instance_profile/files/deny-assume.json create mode 100644 tests/integration/targets/iam_instance_profile/meta/main.yml create mode 100644 tests/integration/targets/iam_instance_profile/tasks/main.yml create mode 100644 tests/integration/targets/iam_instance_profile/tasks/tags.yml diff --git a/changelogs/fragments/1843-iam_instance_profile.yml b/changelogs/fragments/1843-iam_instance_profile.yml new file mode 100644 index 00000000000..3cfb18270d4 --- /dev/null +++ b/changelogs/fragments/1843-iam_instance_profile.yml @@ -0,0 +1,2 @@ +trivial: +- new modules - iam_instance_profile, iam_instance_profile_info diff --git a/meta/runtime.yml b/meta/runtime.yml index 0242673f7ec..b4e6d17cd3e 100644 --- a/meta/runtime.yml +++ b/meta/runtime.yml @@ -67,6 +67,8 @@ action_groups: - elb_application_lb_info - elb_classic_lb - execute_lambda + - iam_instance_profile + - iam_instance_profile_info - iam_policy - iam_policy_info - iam_user diff --git a/plugins/module_utils/iam.py b/plugins/module_utils/iam.py index 3b08f1dac4e..cde0aff2b95 100644 --- a/plugins/module_utils/iam.py +++ b/plugins/module_utils/iam.py @@ -3,16 +3,73 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from copy import deepcopy + try: import botocore except ImportError: - pass + pass # Modules are responsible for handling this. from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from .retries import AWSRetry -from .botocore import is_boto3_error_code from .arn import parse_aws_arn +from .botocore import is_boto3_error_code +from .exceptions import AnsibleAWSError +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict + + +class AnsibleIAMError(AnsibleAWSError): + pass + + +@AWSRetry.jittered_backoff() +def _tag_iam_instance_profile(client, **kwargs): + client.tag_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _untag_iam_instance_profile(client, **kwargs): + client.untag_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _get_iam_instance_profiles(client, **kwargs): + return client.get_instance_profile(**kwargs)["InstanceProfile"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles_for_role(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles_for_role") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@AWSRetry.jittered_backoff() +def _create_instance_profile(client, **kwargs): + return client.create_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _delete_instance_profile(client, **kwargs): + client.delete_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _add_role_to_instance_profile(client, **kwargs): + client.add_role_to_instance_profile(**kwargs) + + +@AWSRetry.jittered_backoff() +def _remove_role_from_instance_profile(client, **kwargs): + client.remove_role_from_instance_profile(**kwargs) def get_aws_account_id(module): @@ -76,3 +133,150 @@ def get_aws_account_info(module): ) return (to_native(account_id), to_native(partition)) + + +def create_iam_instance_profile(client, name, path, tags): + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + try: + result = _create_instance_profile(client, InstanceProfileName=name, Path=path, Tags=boto3_tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to create instance profile", exception=e) + return result["InstanceProfile"] + + +def delete_iam_instance_profile(client, name): + try: + _delete_instance_profile(client, InstanceProfileName=name) + except is_boto3_error_code("NoSuchEntity"): + # Deletion already happened. + return False + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to delete instance profile", exception=e) + return True + + +def add_role_to_iam_instance_profile(client, profile_name, role_name): + try: + _add_role_to_instance_profile(client, InstanceProfileName=profile_name, RoleName=role_name) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError( + message="Unable to add role to instance profile", + exception=e, + profile_name=profile_name, + role_name=role_name, + ) + return True + + +def remove_role_from_iam_instance_profile(client, profile_name, role_name): + try: + _remove_role_from_instance_profile(client, InstanceProfileName=profile_name, RoleName=role_name) + except is_boto3_error_code("NoSuchEntity"): + # Deletion already happened. + return False + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError( + message="Unable to remove role from instance profile", + exception=e, + profile_name=profile_name, + role_name=role_name, + ) + return True + + +def list_iam_instance_profiles(client, name=None, prefix=None, role=None): + """ + Returns a list of IAM instance profiles in boto3 format. + Profiles need to be converted to Ansible format using normalize_iam_instance_profile before being displayed. + + See also: normalize_iam_instance_profile + """ + try: + if role: + return _list_iam_instance_profiles_for_role(client, RoleName=role) + if name: + # Unlike the others this returns a single result, make this a list with 1 element. + return [_get_iam_instance_profiles(client, InstanceProfileName=name)] + if prefix: + return _list_iam_instance_profiles(client, PathPrefix=prefix) + return _list_iam_instance_profiles(client) + except is_boto3_error_code("NoSuchEntity"): + return [] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to list instance profiles", exception=e) + + +def normalize_iam_instance_profile(profile): + """ + Converts a boto3 format IAM instance profile into "Ansible" format + """ + + new_profile = camel_dict_to_snake_dict(deepcopy(profile)) + if profile.get("Roles"): + new_profile["roles"] = [normalize_iam_role(role) for role in profile.get("Roles")] + if profile.get("Tags"): + new_profile["tags"] = boto3_tag_list_to_ansible_dict(profile.get("Tags")) + else: + new_profile["tags"] = {} + new_profile["original"] = profile + return new_profile + + +def normalize_iam_role(role): + """ + Converts a boto3 format IAM instance role into "Ansible" format + """ + + new_role = camel_dict_to_snake_dict(deepcopy(role)) + if role.get("InstanceProfiles"): + new_role["instance_profiles"] = [ + normalize_iam_instance_profile(profile) for profile in role.get("InstanceProfiles") + ] + if role.get("AssumeRolePolicyDocument"): + new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument") + if role.get("Tags"): + new_role["tags"] = boto3_tag_list_to_ansible_dict(role.get("Tags")) + else: + new_role["tags"] = {} + new_role["original"] = role + return new_role + + +def tag_iam_instance_profile(client, name, tags): + if not tags: + return + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + try: + result = _tag_iam_instance_profile(client, InstanceProfileName=name, Tags=boto3_tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to tag instance profile", exception=e) + + +def untag_iam_instance_profile(client, name, tags): + if not tags: + return + try: + result = _untag_iam_instance_profile(client, InstanceProfileName=name, TagKeys=tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleIAMError(message="Unable to untag instance profile", exception=e) diff --git a/plugins/modules/iam_instance_profile.py b/plugins/modules/iam_instance_profile.py new file mode 100644 index 00000000000..b34b6b1305b --- /dev/null +++ b/plugins/modules/iam_instance_profile.py @@ -0,0 +1,343 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile +version_added: 6.2.0 +short_description: manage IAM instance profiles +description: + - Manage IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + state: + description: + - Desired state of the instance profile. + type: str + choices: ["absent", "present"] + default: "present" + name: + description: + - Name of an instance profile. + aliases: + - instance_profile_name + type: str + required: True + prefix: + description: + - The path prefix for filtering the results. + aliases: ["path_prefix", "path"] + type: str + default: "/" + role: + description: + - The name of the role to attach to the instance profile. + - To remove all roles from the instance profile set I(role=""). + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Find all existing IAM instance profiles + amazon.aws.iam_instance_profile_info: + register: result + +- name: Describe a single instance profile + amazon.aws.iam_instance_profile_info: + name: MyIAMProfile + register: result + +- name: Find all IAM instance profiles starting with /some/path/ + amazon.aws.iam_instance_profile_info: + prefile: /some/path/ + register: result +""" + +RETURN = r""" +iam_instance_profile: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] + tags: + description: Instance profile tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + +from copy import deepcopy + +from ansible_collections.amazon.aws.plugins.module_utils.iam import add_role_to_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import create_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import delete_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import remove_role_from_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import tag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import untag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def describe_iam_instance_profile(client, name, prefix): + profiles = [] + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + + if not profiles: + return None + + return normalize_iam_instance_profile(profiles[0]) + + +def create_instance_profile(client, name, path, tags, check_mode): + if check_mode: + return True, {"instance_profile_name": name, "path": path, "tags": tags or {}, "roles": []} + + profile = create_iam_instance_profile(client, name, path, tags) + return True, normalize_iam_instance_profile(profile) + + +def ensure_tags( + original_profile, + client, + name, + tags, + purge_tags, + check_mode, +): + if tags is None: + return False, original_profile + + original_tags = original_profile.get("tags") or {} + + tags_to_set, tag_keys_to_unset = compare_aws_tags(original_tags, tags, purge_tags) + if not tags_to_set and not tag_keys_to_unset: + return False, original_profile + + new_profile = deepcopy(original_profile) + desired_tags = deepcopy(original_tags) + + for key in tag_keys_to_unset: + desired_tags.pop(key, None) + desired_tags.update(tags_to_set) + new_profile["tags"] = desired_tags + + if not check_mode: + untag_iam_instance_profile(client, name, tag_keys_to_unset) + tag_iam_instance_profile(client, name, tags_to_set) + + return True, new_profile + + +def ensure_role( + original_profile, + client, + name, + role, + check_mode, +): + if role is None: + return False, original_profile + + if role == "" and not original_profile.get("roles"): + return False, original_profile + else: + desired_role = [] + + if original_profile.get("roles") and original_profile.get("roles")[0].get("role_name", None) == role: + return False, original_profile + else: + desired_role = [{"role_name": role}] + + new_profile = deepcopy(original_profile) + new_profile["roles"] = desired_role + + if check_mode: + return True, new_profile + + if original_profile.get("roles"): + # We're changing the role, so we always need to remove the existing one first + remove_role_from_iam_instance_profile(client, name, original_profile["roles"][0]["role_name"]) + if role: + add_role_to_iam_instance_profile(client, name, role) + + return True, new_profile + + +def ensure_present( + original_profile, + client, + name, + path, + tags, + purge_tags, + role, + check_mode, +): + changed = False + if not original_profile: + changed, new_profile = create_instance_profile( + client, + name=name, + path=path, + tags=tags, + check_mode=check_mode, + ) + else: + new_profile = deepcopy(original_profile) + + role_changed, new_profile = ensure_role( + new_profile, + client, + name, + role, + check_mode, + ) + + tags_changed, new_profile = ensure_tags( + new_profile, + client, + name, + tags, + purge_tags, + check_mode, + ) + + changed |= role_changed + changed |= tags_changed + + return changed, new_profile + + +def ensure_absent( + original_profile, + client, + name, + prefix, + check_mode, +): + if not original_profile: + return False + + if check_mode: + return True + + roles = original_profile.get("roles") or [] + for role in roles: + remove_role_from_iam_instance_profile(client, name, role.get("role_name")) + + return delete_iam_instance_profile(client, name) + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"], required=True), + prefix=dict(aliases=["path_prefix", "path"], default="/"), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(aliases=["resource_tags"], type="dict"), + purge_tags=dict(type="bool", default=True), + role=dict(), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + try: + name = module.params["name"] + prefix = module.params["prefix"] + state = module.params["state"] + + original_profile = describe_iam_instance_profile(client, name, prefix) + + if state == "absent": + changed = ensure_absent( + original_profile, + client, + name, + prefix, + module.check_mode, + ) + final_profile = None + else: + changed, final_profile = ensure_present( + original_profile, + client, + name, + prefix, + module.params["tags"], + module.params["purge_tags"], + module.params["role"], + module.check_mode, + ) + + if not module.check_mode: + final_profile = describe_iam_instance_profile(client, name, prefix) + + except AnsibleIAMError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) + + results = { + "changed": changed, + "iam_instance_profile": final_profile, + } + if changed: + results["diff"] = { + "before": original_profile, + "after": final_profile, + } + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/plugins/modules/iam_instance_profile_info.py b/plugins/modules/iam_instance_profile_info.py new file mode 100644 index 00000000000..6b7f5ed2468 --- /dev/null +++ b/plugins/modules/iam_instance_profile_info.py @@ -0,0 +1,132 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile_info +version_added: 6.2.0 +short_description: gather information on IAM instance profiles +description: + - Gathers information about IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + name: + description: + - Name of an instance profile to search for. + - Mutually exclusive with I(prefix). + aliases: + - instance_profile_name + type: str + path_prefix: + description: + - The path prefix for filtering the results. + - Mutually exclusive with I(name). + aliases: ["path", "prefix"] + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Find all existing IAM instance profiles + amazon.aws.iam_instance_profile_info: + register: result + +- name: Describe a single instance profile + amazon.aws.iam_instance_profile_info: + name: MyIAMProfile + register: result + +- name: Find all IAM instance profiles starting with /some/path/ + amazon.aws.iam_instance_profile_info: + prefile: /some/path/ + register: result +""" + +RETURN = r""" +iam_instance_profiles: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def describe_iam_instance_profiles(module, client): + name = module.params["name"] + prefix = module.params["path_prefix"] + profiles = [] + try: + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + except AnsibleIAMError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) + + return [normalize_iam_instance_profile(p) for p in profiles] + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"]), + path_prefix=dict(aliases=["path", "prefix"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + module.exit_json(changed=False, iam_instance_profiles=describe_iam_instance_profiles(module, client)) + + +if __name__ == "__main__": + main() diff --git a/tests/integration/targets/iam_instance_profile/aliases b/tests/integration/targets/iam_instance_profile/aliases new file mode 100644 index 00000000000..e381149ffed --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/aliases @@ -0,0 +1,3 @@ +cloud/aws + +iam_instance_profile_info diff --git a/tests/integration/targets/iam_instance_profile/defaults/main.yml b/tests/integration/targets/iam_instance_profile/defaults/main.yml new file mode 100644 index 00000000000..28290d57856 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/defaults/main.yml @@ -0,0 +1,12 @@ +--- +test_profile: '{{ resource_prefix }}-iam-ip' +test_profile_complex: '{{ resource_prefix }}-iam-ip-complex' +test_role: '{{ resource_prefix }}-iam-ipr' +test_path: '/{{ resource_prefix }}-ip/' +safe_managed_policy: 'AWSDenyAll' + +test_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value diff --git a/tests/integration/targets/iam_instance_profile/files/deny-assume.json b/tests/integration/targets/iam_instance_profile/files/deny-assume.json new file mode 100644 index 00000000000..73e87715862 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/files/deny-assume.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { "Service": "ec2.amazonaws.com" }, + "Effect": "Deny" + } + ] +} diff --git a/tests/integration/targets/iam_instance_profile/meta/main.yml b/tests/integration/targets/iam_instance_profile/meta/main.yml new file mode 100644 index 00000000000..32cf5dda7ed --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/meta/main.yml @@ -0,0 +1 @@ +dependencies: [] diff --git a/tests/integration/targets/iam_instance_profile/tasks/main.yml b/tests/integration/targets/iam_instance_profile/tasks/main.yml new file mode 100644 index 00000000000..f628e617117 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/tasks/main.yml @@ -0,0 +1,510 @@ +--- +# Tests for iam_instance_profile and iam_instance_profile_info +# + +- name: "Setup AWS connection info" + module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + - community.general + block: + # =================================================================== + # Prepare + + - name: "Prepare IAM Roles" + iam_role: + state: present + name: "{{ item }}" + path: "{{ test_path }}" + create_instance_profile: True + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + managed_policies: + - "{{ safe_managed_policy }}" + wait: True + loop: + - "{{ test_role }}" + - "{{ test_role }}-2" + + # =================================================================== + # Test + + # =================================================================== + + - name: "Create minimal Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create minimal Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create minimal Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Create minimal Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - include_tasks: 'tags.yml' + + # =================================================================== + + - name: "Add role to Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Add role to Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Add role to Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Add role to Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # ===== + + - name: "Replace role on Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Replace role on Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Replace role on Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Replace role on Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # ===== + + - name: "Remove role from Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Remove role from Instance Profile" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Remove role from Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Remove role from Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile }}" + role: "" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: "Create complex Instance Profile (CHECK)" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create complex Instance Profile" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Create complex Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Create complex Instance Profile - Idempotent" + iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: "List all Instance Profiles (no filter)" + iam_instance_profile_info: + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length >= 4 + - '"{{ test_role }}" in profile_names' + - '"{{ test_role }}-2" in profile_names' + - '"{{ test_profile }}" in profile_names' + - '"{{ test_profile_complex }}" in profile_names' + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == "{{ test_role }}-2" + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: "List all Instance Profiles (filter by path)" + iam_instance_profile_info: + path: "{{ test_path }}" + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length == 3 + - '"{{ test_role }}" in profile_names' + - '"{{ test_role }}-2" in profile_names' + - '"{{ test_profile_complex }}" in profile_names' + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == "{{ test_role }}-2" + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: "List all Instance Profiles (filter by name - complex)" + iam_instance_profile_info: + name: "{{ test_profile_complex }}" + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length == 1 + - '"{{ test_profile_complex }}" in profile_names' + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"tags" in complex_profile' + - complex_profile.tags == test_tags + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == "{{ test_role }}-2" + - '"tags" in complex_profile.roles[0]' + - complex_profile.roles[0].tags == {} + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: "List an Instance Profile (filter by name)" + iam_instance_profile_info: + name: "{{ test_profile }}" + register: profile_info + + - assert: + that: + - profile_info.iam_instance_profiles | length == 1 + - '"arn" in simple_profile' + - '"create_date" in simple_profile' + - '"instance_profile_id" in simple_profile' + - '"instance_profile_name" in simple_profile' + - simple_profile.instance_profile_name == test_profile + - '"path" in simple_profile' + - simple_profile.path == "/" + - '"tags" in simple_profile' + - simple_profile.tags == {} + - '"roles" in simple_profile' + - simple_profile.roles | length == 0 + vars: + simple_profile: '{{ profile_info.iam_instance_profiles[0] }}' + + # =================================================================== + + - name: "Delete minimal Instance Profile (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete minimal Instance Profile" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete minimal Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Delete minimal Instance Profile - Idempotent" + iam_instance_profile: + state: absent + name: "{{ test_profile }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: "Delete complex Instance Profile (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete complex Instance Profile" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + register: profile_result + + - assert: + that: + - profile_result is changed + + - name: "Delete complex Instance Profile - Idempotent (CHECK)" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + check_mode: True + register: profile_result + + - assert: + that: + - profile_result is not changed + + - name: "Delete complex Instance Profile - Idempotent" + iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + register: profile_result + + - assert: + that: + - profile_result is not changed + + always: + # =================================================================== + # Cleanup + +# - name: "iam_instance_profile_info after Role deletion" +# iam_instance_profile_info: +# ignore_errors: true + + - name: "Delete Instance Profiles" + iam_instance_profile: + state: absent + name: "{{ item }}" + ignore_errors: true + loop: + - "{{ test_profile }}" + - "{{ test_profile_complex }}" + - "{{ test_role }}" + - "{{ test_role }}-2" + + - name: "Remove IAM Roles" + iam_role: + state: absent + name: "{{ item }}" + path: "{{ test_path }}" + delete_instance_profile: yes + ignore_errors: true + loop: + - "{{ test_role }}" + - "{{ test_role }}-2" + +# - name: "iam_role_info after Role deletion" +# iam_role_info: +# path: "{{ test_path }}" +# ignore_errors: true diff --git a/tests/integration/targets/iam_instance_profile/tasks/tags.yml b/tests/integration/targets/iam_instance_profile/tasks/tags.yml new file mode 100644 index 00000000000..5185301b8a4 --- /dev/null +++ b/tests/integration/targets/iam_instance_profile/tasks/tags.yml @@ -0,0 +1,298 @@ +- vars: + first_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + 'New Key with Spaces': Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + final_tags: + 'Key with Spaces': Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + 'New Key with Spaces': Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + module_defaults: + amazon.aws.iam_instance_profile: + name: '{{ test_profile }}' + amazon.aws.iam_instance_profile_info: + name: "{{ test_profile }}" + block: + + # ============================================================ + # + + - name: (check) add tags + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: add tags + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + + - name: get instance profile facts + iam_instance_profile_info: {} + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == first_tags + + - name: (check) add tags - IDEMPOTENCY + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: add tags - IDEMPOTENCY + iam_instance_profile: + tags: '{{ first_tags }}' + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: {} + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == first_tags + + # ============================================================ + + - name: (check) modify tags with purge + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: modify tags with purge + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == second_tags + + - name: (check) modify tags with purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: modify tags with purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ second_tags }}' + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == second_tags + + # ============================================================ + + - name: (check) modify tags without purge + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: modify tags without purge + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + - name: (check) modify tags without purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: modify tags without purge - IDEMPOTENCY + iam_instance_profile: + tags: '{{ third_tags }}' + state: 'present' + purge_tags: False + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + # ============================================================ + + - name: (check) No change to tags without setting tags + iam_instance_profile: + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is not changed + + - name: No change to tags without setting tags + iam_instance_profile: + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + # ============================================================ + + - name: (check) remove all tags + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would change + assert: + that: + - tag_profile is changed + + - name: remove all tags + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + - name: get instance profile facts + iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == {} + + - name: (check) remove all tags - IDEMPOTENCY + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + check_mode: True + + - name: assert would not change + assert: + that: + - tag_profile is not changed + + - name: remove all tags - IDEMPOTENCY + iam_instance_profile: + tags: {} + state: 'present' + register: tag_profile + - name: get instance profile + iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == {} From 3594dec0f50f1aa2e0cebf8b409b4e6b1e0e6739 Mon Sep 17 00:00:00 2001 From: Bikouo Aubin <79859644+abikouo@users.noreply.github.com> Date: Mon, 3 Jul 2023 16:29:23 +0200 Subject: [PATCH 20/29] s3_object - allow recursive copy of all objects in S3 bucket (#1608) s3_object - allow recursive copy of all objects in S3 bucket SUMMARY Add support to copy recursively all objects from one bucket to another one, user can set prefix to limit the object to copy. closes #1379 ISSUE TYPE Feature Pull Request COMPONENT NAME s3_object Reviewed-by: Helen Bailey Reviewed-by: Bikouo Aubin --- ...bject-support-copy-objects-recursively.yml | 2 + plugins/modules/s3_object.py | 244 +++++++++++------- .../s3_object/tasks/copy_recursively.yml | 152 +++++++++++ .../targets/s3_object/tasks/main.yml | 2 + tests/unit/plugins/modules/test_s3_object.py | 10 +- 5 files changed, 308 insertions(+), 102 deletions(-) create mode 100644 changelogs/fragments/20230612-s3_object-support-copy-objects-recursively.yml create mode 100644 tests/integration/targets/s3_object/tasks/copy_recursively.yml diff --git a/changelogs/fragments/20230612-s3_object-support-copy-objects-recursively.yml b/changelogs/fragments/20230612-s3_object-support-copy-objects-recursively.yml new file mode 100644 index 00000000000..9157ec0d0b0 --- /dev/null +++ b/changelogs/fragments/20230612-s3_object-support-copy-objects-recursively.yml @@ -0,0 +1,2 @@ +minor_changes: +- s3_object - Allow recursive copy of objects in S3 bucket (https://github.com/ansible-collections/amazon.aws/issues/1379). diff --git a/plugins/modules/s3_object.py b/plugins/modules/s3_object.py index 84f2c0c7b5c..8f36df398f0 100644 --- a/plugins/modules/s3_object.py +++ b/plugins/modules/s3_object.py @@ -228,11 +228,19 @@ type: str description: - key name of the source object. - required: true + - if not specified, all the objects of the I(copy_src.bucket) will be copied into the specified bucket. + required: false version_id: type: str description: - version ID of the source object. + prefix: + description: + - Copy all the keys that begin with the specified prefix. + - Ignored if I(copy_src.object) is supplied. + default: "" + type: str + version_added: 6.2.0 validate_bucket_name: description: - Whether the bucket name should be validated to conform to AWS S3 naming rules. @@ -370,6 +378,14 @@ copy_src: bucket: srcbucket object: /source/key.txt + +- name: Copy all the objects with name starting with 'ansible_' + amazon.aws.s3_object: + bucket: mybucket + mode: copy + copy_src: + bucket: srcbucket + prefix: 'ansible_' """ RETURN = r""" @@ -566,7 +582,7 @@ def paginated_versioned_list_with_fallback(s3, **pagination_params): yield [{"Key": key}] -def list_keys(module, s3, bucket, prefix, marker, max_keys): +def list_keys(s3, bucket, prefix=None, marker=None, max_keys=None): pagination_params = { "Bucket": bucket, "Prefix": prefix, @@ -576,8 +592,7 @@ def list_keys(module, s3, bucket, prefix, marker, max_keys): pagination_params = {k: v for k, v in pagination_params.items() if v} try: - keys = list(paginated_list(s3, **pagination_params)) - module.exit_json(msg="LIST operation complete", s3_keys=keys) + return list(paginated_list(s3, **pagination_params)) except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, @@ -892,78 +907,6 @@ def put_download_url(s3, bucket, obj, expiry): return url -def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag): - if module.check_mode: - module.exit_json(msg="COPY operation skipped - running in check mode", changed=True) - try: - params = {"Bucket": bucket, "Key": obj} - bucketsrc = { - "Bucket": module.params["copy_src"].get("bucket"), - "Key": module.params["copy_src"].get("object"), - } - version = None - if module.params["copy_src"].get("version_id"): - version = module.params["copy_src"].get("version_id") - bucketsrc.update({"VersionId": version}) - if not key_check( - module, - s3, - bucketsrc["Bucket"], - bucketsrc["Key"], - version=version, - validate=validate, - ): - # Key does not exist in source bucket - module.exit_json( - msg=f"Key {bucketsrc['Key']} does not exist in bucket {bucketsrc['Bucket']}.", - changed=False, - ) - - s_etag = get_etag(s3, bucketsrc["Bucket"], bucketsrc["Key"], version=version) - if s_etag == d_etag: - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - if not changed: - module.exit_json( - msg="ETag from source and destination are the same", - changed=False, - ) - else: - module.exit_json( - msg="tags successfully updated.", - changed=changed, - tags=tags, - ) - else: - params.update({"CopySource": bucketsrc}) - params.update( - get_extra_params( - encrypt, - module.params.get("encryption_mode"), - module.params.get("encryption_kms_key_id"), - metadata, - ) - ) - s3.copy_object(aws_retry=True, **params) - put_object_acl(module, s3, bucket, obj) - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - module.exit_json( - msg=f"Object copied from bucket {bucketsrc['Bucket']} to bucket {bucket}.", - tags=tags, - changed=True, - ) - except ( - botocore.exceptions.BotoCoreError, - botocore.exceptions.ClientError, - boto3.exceptions.Boto3Error, - ) as e: # pylint: disable=duplicate-except - raise S3ObjectFailure( - f"Failed while copying object {obj} from bucket {module.params['copy_src'].get('Bucket')}.", - e, - ) - - def get_current_object_tags_dict(module, s3, bucket, obj, version=None): try: if version: @@ -1230,8 +1173,7 @@ def s3_object_do_delobj(module, connection, connection_v4, s3_vars): def s3_object_do_list(module, connection, connection_v4, s3_vars): # If the bucket does not exist then bail out - list_keys( - module, + keys = list_keys( connection, s3_vars["bucket"], s3_vars["prefix"], @@ -1239,6 +1181,8 @@ def s3_object_do_list(module, connection, connection_v4, s3_vars): s3_vars["max_keys"], ) + module.exit_json(msg="LIST operation complete", s3_keys=keys) + def s3_object_do_create(module, connection, connection_v4, s3_vars): # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified @@ -1330,23 +1274,132 @@ def s3_object_do_getstr(module, connection, connection_v4, s3_vars): module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") +def check_object_tags(module, connection, bucket, obj): + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + diff = False + if tags: + current_tags_dict = get_current_object_tags_dict(module, connection, bucket, obj) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_tags_dict.update(tags) + diff = current_tags_dict != tags + return diff + + +def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, src_bucket, src_obj, versionId=None): + try: + params = {"Bucket": bucket, "Key": obj} + if not key_check(module, s3, src_bucket, src_obj, version=versionId, validate=validate): + # Key does not exist in source bucket + module.exit_json( + msg=f"Key {src_obj} does not exist in bucket {src_bucket}.", + changed=False, + ) + + s_etag = get_etag(s3, src_bucket, src_obj, version=versionId) + d_etag = get_etag(s3, bucket, obj) + if s_etag == d_etag: + if module.check_mode: + changed = check_object_tags(module, s3, bucket, obj) + result = {} + if changed: + result.update({"msg": "Would have update object tags is not running in check mode."}) + return changed, result + + # Ensure tags + tags, changed = ensure_tags(s3, module, bucket, obj) + result = {"msg": "ETag from source and destination are the same"} + if changed: + result = {"msg": "tags successfully updated.", "tags": tags} + return changed, result + elif module.check_mode: + return True, {"msg": "ETag from source and destination differ"} + else: + changed = True + bucketsrc = { + "Bucket": src_bucket, + "Key": src_obj, + } + if versionId: + bucketsrc.update({"VersionId": versionId}) + params.update({"CopySource": bucketsrc}) + params.update( + get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + metadata, + ) + ) + s3.copy_object(aws_retry=True, **params) + put_object_acl(module, s3, bucket, obj) + # Tags + tags, tags_updated = ensure_tags(s3, module, bucket, obj) + msg = f"Object copied from bucket {bucketsrc['Bucket']} to bucket {bucket}." + return changed, {"msg": msg, "tags": tags} + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure( + f"Failed while copying object {obj} from bucket {module.params['copy_src'].get('Bucket')}.", + e, + ) + + def s3_object_do_copy(module, connection, connection_v4, s3_vars): - # if copying an object in a bucket yet to be created, acls for the bucket and/or the object may be specified - # these were separated into the variables bucket_acl and object_acl above - d_etag = get_etag(connection, s3_vars["bucket"], s3_vars["object"]) - if not s3_vars["acl_disabled"]: - # only use valid object acls for the copy operation - s3_vars["permission"] = s3_vars["object_acl"] - copy_object_to_bucket( - module, - connection, - s3_vars["bucket"], - s3_vars["object"], - s3_vars["encrypt"], - s3_vars["metadata"], - s3_vars["validate"], - d_etag, - ) + copy_src = module.params.get("copy_src") + if not copy_src.get("object") and s3_vars["object"]: + module.fail_json( + msg="A destination object was specified while trying to copy all the objects from the source bucket." + ) + src_bucket = copy_src.get("bucket") + if not copy_src.get("object"): + # copy recursively object(s) from source bucket to destination bucket + # list all the objects from the source bucket + keys = list_keys(connection, src_bucket, copy_src.get("prefix")) + if len(keys) == 0: + module.exit_json(msg=f"No object found to be copied from source bucket {src_bucket}.") + + changed = False + number_keys_updated = 0 + for key in keys: + updated, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + key, + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + key, + versionId=copy_src.get("version_id"), + ) + changed |= updated + number_keys_updated += 1 if updated else 0 + + msg = "object(s) from buckets '{0}' and '{1}' are the same.".format(src_bucket, s3_vars["bucket"]) + if number_keys_updated: + msg = "{0} copied into bucket '{1}'".format(number_keys_updated, s3_vars["bucket"]) + module.exit_json(changed=changed, msg=msg) + else: + # copy single object from source bucket into destination bucket + changed, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + copy_src.get("object"), + versionId=copy_src.get("version_id"), + ) + module.exit_json(changed=changed, **result) def populate_params(module): @@ -1459,7 +1512,8 @@ def main(): type="dict", options=dict( bucket=dict(required=True), - object=dict(required=True), + object=dict(), + prefix=dict(default=""), version_id=dict(), ), ), diff --git a/tests/integration/targets/s3_object/tasks/copy_recursively.yml b/tests/integration/targets/s3_object/tasks/copy_recursively.yml new file mode 100644 index 00000000000..1e31020d714 --- /dev/null +++ b/tests/integration/targets/s3_object/tasks/copy_recursively.yml @@ -0,0 +1,152 @@ +- name: Test copy recursively object from one bucket to another one. + block: + - name: Create S3 bucket + s3_bucket: + name: "{{ item }}" + state: present + with_items: + - "{{ bucket_src }}" + - "{{ bucket_dst }}" + + - name: Create object into bucket + s3_object: + bucket: "{{ bucket_src }}" + mode: put + content: "{{ item.content }}" + object: "{{ item.object }}" + with_items: "{{ s3_objects }}" + + - name: Copy all objects from source bucket into destination bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + check_mode: true + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure no object were found into bucket + assert: + that: + - _objects.s3_keys | length == 0 + + # Test: Copy all objects using prefix + - name: copy object using prefix + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + prefix: "file" + register: _copy_with_prefix + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure objects with prefix 'file' were copied into bucket + assert: + that: + - _copy_with_prefix is changed + - _objects.s3_keys | length == 3 + - '"file1.txt" in _objects.s3_keys' + - '"file2.txt" in _objects.s3_keys' + - '"file3.txt" in _objects.s3_keys' + + # Test: Copy all objects using prefix (idempotency) + - name: copy object using prefix (idempotency) + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + prefix: "file" + register: _copy_with_prefix_idempotency + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure objects with prefix 'file' were copied into bucket + assert: + that: + - _copy_with_prefix_idempotency is not changed + - _objects.s3_keys | length == 3 + - '"file1.txt" in _objects.s3_keys' + - '"file2.txt" in _objects.s3_keys' + - '"file3.txt" in _objects.s3_keys' + + # Test: Copy all objects from source bucket + - name: copy all objects from source bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + register: _copy_all + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure all objects were copied into bucket + assert: + that: + - _copy_all is changed + - _objects.s3_keys | length == 5 + + # Test: Copy all objects from source bucket (idempotency) + - name: copy all objects from source bucket (idempotency) + s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + register: _copy_all_idempotency + + - name: list objects from bucket + s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure number of copied objects remains the same. + assert: + that: + - _copy_all_idempotency is not changed + - _objects.s3_keys | length == 5 + + vars: + bucket_src: "{{ bucket_name }}-recursive-src" + bucket_dst: "{{ bucket_name }}-recursive-dst" + s3_objects: + - object: file1.txt + content: | + some content for file1.txt + - object: file2.txt + content: | + some content for file2.txt + - object: file3.txt + content: | + some content for file3.txt + - object: testfile.py + content: "This is a sample text file" + - object: another.txt + content: "another file to create into bucket" + + always: + - include_tasks: delete_bucket.yml + with_items: + - "{{ bucket_src }}" + - "{{ bucket_dst }}" diff --git a/tests/integration/targets/s3_object/tasks/main.yml b/tests/integration/targets/s3_object/tasks/main.yml index 5603ddc1ec1..c5006322545 100644 --- a/tests/integration/targets/s3_object/tasks/main.yml +++ b/tests/integration/targets/s3_object/tasks/main.yml @@ -1039,6 +1039,8 @@ - "'tags' in result" - (result.tags | length) == 0 + - include_tasks: copy_recursively.yml + always: - name: delete temporary files diff --git a/tests/unit/plugins/modules/test_s3_object.py b/tests/unit/plugins/modules/test_s3_object.py index c8d3fc4fcf3..863001335a3 100644 --- a/tests/unit/plugins/modules/test_s3_object.py +++ b/tests/unit/plugins/modules/test_s3_object.py @@ -17,26 +17,22 @@ @patch(module_name + ".paginated_list") def test_list_keys_success(m_paginated_list): - module = MagicMock() s3 = MagicMock() m_paginated_list.return_value = ["delete.txt"] - s3_object.list_keys(module, s3, "a987e6b6026ab04e4717", "", "", 1000) - - assert m_paginated_list.call_count == 1 - module.exit_json.assert_called_with(msg="LIST operation complete", s3_keys=["delete.txt"]) + assert ["delete.txt"] == s3_object.list_keys(s3, "a987e6b6026ab04e4717", "", "", 1000) + m_paginated_list.assert_called_once() @patch(module_name + ".paginated_list") def test_list_keys_failure(m_paginated_list): - module = MagicMock() s3 = MagicMock() m_paginated_list.side_effect = botocore.exceptions.BotoCoreError with pytest.raises(s3_object.S3ObjectFailure): - s3_object.list_keys(module, s3, "a987e6b6026ab04e4717", "", "", 1000) + s3_object.list_keys(s3, "a987e6b6026ab04e4717", "", "", 1000) @patch(module_name + ".delete_key") From bfea52d7d0116d9bc0f540c6f05149f6e79d2101 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 3 Jul 2023 17:00:53 +0200 Subject: [PATCH 21/29] Add suggestion for isort config (#1637) Add suggestion for isort config SUMMARY We've been slowly converging on a more consistent formatting on our imports, adds isort rule which implements this (doesn't force the change at this time unless someone explicitly runs isort) ISSUE TYPE Feature Pull Request COMPONENT NAME pyproject.toml ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis Reviewed-by: Helen Bailey --- changelogs/fragments/20230702-isort.yml | 2 ++ pyproject.toml | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 changelogs/fragments/20230702-isort.yml diff --git a/changelogs/fragments/20230702-isort.yml b/changelogs/fragments/20230702-isort.yml new file mode 100644 index 00000000000..5ceaa201c0e --- /dev/null +++ b/changelogs/fragments/20230702-isort.yml @@ -0,0 +1,2 @@ +trivial: +- added isort configs to pyproject.toml diff --git a/pyproject.toml b/pyproject.toml index 34fb52ea61b..b78e8bd0e3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,3 +16,20 @@ src = [ "tests/unit", "tests/integration", ] + +[tool.isort] +profile = "black" +force_single_line = true +line_length = 120 + +src_paths = [ + "plugins", + "tests/unit", + "tests/integration", +] + +sections=["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "ANSIBLE_CORE", "ANSIBLE_AMAZON_AWS", "ANSIBLE_COMMUNITY_AWS", "LOCALFOLDER"] +known_third_party=["botocore", "boto3"] +known_ansible_core=["ansible"] +known_ansible_amazon_aws=["ansible_collections.amazon.aws"] +known_ansible_community_aws=["ansible_collections.community.aws"] From cfeffe665a55e24426bede38afab01c1dfa0cd90 Mon Sep 17 00:00:00 2001 From: Helen Bailey Date: Tue, 4 Jul 2023 05:08:29 -0400 Subject: [PATCH 22/29] Document and validate backup_selection conditions suboptions (#1633) Document and validate backup_selection conditions suboptions SUMMARY Adds documentation and validation for all conditions suboptions in backup_selection module. Fixes #1613 Additionally fixes a bug in module_utils.backup that caused an empty list to be returned from get_selection_details() when multiple backup selections exist for a given backup plan. ISSUE TYPE Bugfix Pull Request COMPONENT NAME backup_selection module_utils.backup ADDITIONAL INFORMATION See #1613 for detailed description of related issue. Reviewed-by: Jill R Reviewed-by: Alina Buzachis --- .../1633-backup-selection-conditions.yml | 5 + plugins/module_utils/backup.py | 2 +- plugins/modules/backup_selection.py | 113 +++++++++++++++++- .../targets/backup_selection/tasks/main.yml | 109 ++++++++--------- 4 files changed, 167 insertions(+), 62 deletions(-) create mode 100644 changelogs/fragments/1633-backup-selection-conditions.yml diff --git a/changelogs/fragments/1633-backup-selection-conditions.yml b/changelogs/fragments/1633-backup-selection-conditions.yml new file mode 100644 index 00000000000..8fac9ef4ce3 --- /dev/null +++ b/changelogs/fragments/1633-backup-selection-conditions.yml @@ -0,0 +1,5 @@ +minor_changes: +- backup_selection - add validation and documentation for all conditions suboptions (https://github.com/ansible-collections/amazon.aws/pull/1633). + +bugfixes: +- module_utils.backup - get_selection_details fix empty list returned when multiple backup selections exist (https://github.com/ansible-collections/amazon.aws/pull/1633). diff --git a/plugins/module_utils/backup.py b/plugins/module_utils/backup.py index b456ab970cc..a9e3e6ed01b 100644 --- a/plugins/module_utils/backup.py +++ b/plugins/module_utils/backup.py @@ -144,7 +144,7 @@ def get_selection_details(module, client, plan_name: str, selection_name: Union[ if selection["SelectionName"] == selection_name: selection_id = selection["SelectionId"] result.append(_get_backup_selection(client, module, plan_id, selection_id)) - break + break else: for selection in selection_list: selection_id = selection["SelectionId"] diff --git a/plugins/modules/backup_selection.py b/plugins/modules/backup_selection.py index e6edc251a31..6dd7454e0ee 100644 --- a/plugins/modules/backup_selection.py +++ b/plugins/modules/backup_selection.py @@ -43,13 +43,14 @@ description: - A list of conditions that you define to assign resources to your backup plans using tags. - Condition operators are case sensitive. + - When you specify more than one condition in I(list_of_tags), you assign all resources that match AT LEAST ONE condition (using OR logic). type: list elements: dict suboptions: condition_type: description: - An operation applied to a key-value pair used to assign resources to your backup plan. - - Condition only supports C(StringEquals). + - Condition only supports C(string_equals). type: str condition_key: description: @@ -69,8 +70,71 @@ conditions: description: - A list of conditions (expressed as a dict) that you define to assign resources to your backup plans using tags. - - I(conditions) supports C(StringEquals), C(StringLike), C(StringNotEquals), and C(StringNotLike). I(list_of_tags) only supports C(StringEquals). + - When you specify more than one condition in I(conditions), you only assign the resources that match ALL conditions (using AND logic). + - I(conditions) supports C(string_equals), C(string_like), C(string_not_equals), and C(string_not_like). I(list_of_tags) only supports C(string_equals). type: dict + suboptions: + string_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged with the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_like: + description: + - Filters the values of your tagged resources for matching tag values with the use of a wildcard character (*) anywhere in the string. + For example, "prod*" or "*rod*" matches the tag value "production". + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged that do not have the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_like: + description: + - Filters the values of your tagged resources for non-matching tag values with the use of a wildcard character (*) anywhere in the string. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str state: description: - Create, delete a backup selection. @@ -220,7 +284,47 @@ def main(): backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), iam_role_arn=dict(type="str"), resources=dict(type="list", elements="str"), - conditions=dict(type="dict"), + conditions=dict( + type="dict", + options=dict( + string_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + ), + ), not_resources=dict(type="list", elements="str"), list_of_tags=dict( type="list", @@ -271,8 +375,7 @@ def main(): if current_selection: results["exists"] = True - update_needed |= check_for_update(current_selection, backup_selection_data, iam_role_arn) - + update_needed = check_for_update(current_selection, backup_selection_data, iam_role_arn) if update_needed: if module.check_mode: results["changed"] = True diff --git a/tests/integration/targets/backup_selection/tasks/main.yml b/tests/integration/targets/backup_selection/tasks/main.yml index c29d738b3c3..aba34a2ba36 100644 --- a/tests/integration/targets/backup_selection/tasks/main.yml +++ b/tests/integration/targets/backup_selection/tasks/main.yml @@ -18,60 +18,36 @@ wait: true register: iam_role - # Wait for the role to be created - - pause: - seconds: 5 + - name: Wait for the role to be created + ansible.builtin.pause: + seconds: 8 - name: Create an AWS Backup vault for the plan to target amazon.aws.backup_vault: backup_vault_name: "{{ backup_vault_name }}" - register: _resutl_create_backup_vault + register: _result_create_backup_vault - name: Verify result ansible.builtin.assert: that: - - _resutl_create_backup_vault.changed - - # - name: Create an AWS Backup plan - # amazon.aws.backup_plan: - # backup_plan_name: "{{ backup_plan_name }}" - # rules: - # - RuleName: DailyBackups - # TargetBackupVaultName: "{{ backup_vault_name }}" - # ScheduleExpression: "cron(0 5 ? * * *)" - # StartWindowMinutes: 60 - # CompletionWindowMinutes: 1440 - # tags: - # environment: test - # register: _resutl_create_backup_plan - - # - name: Verify result - # ansible.builtin.assert: - # that: - # - _resutl_create_backup_plan.changed - - # - name: Get detailed information about the AWS Backup plan - # amazon.aws.backup_plan_info: - # backup_plan_names: - # - "{{ backup_plan_name }}" - # register: _result_backup_plan_info - - # - name: Verify result - # ansible.builtin.assert: - # that: - # - _result_backup_plan_info.backup_plans | length == 1 - - - name: Create an AWS Backup plan - command: aws backup create-backup-plan --backup-plan "{\"BackupPlanName\":\"{{ backup_plan_name }}\",\"Rules\":[{\"RuleName\":\"DailyBackups\",\"ScheduleExpression\":\"cron(0 5 ? * * *)\",\"StartWindowMinutes\":60,\"TargetBackupVaultName\":\"{{ backup_vault_name }}\",\"CompletionWindowMinutes\":1440,\"Lifecycle\":{\"DeleteAfterDays\":35}}]}" - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" + - _result_create_backup_vault.changed + + - name: Create an AWS Backup plan for the selection to target + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: DailyBackups + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: "cron(0 5 ? * * *)" + start_window_minutes: 60 + completion_window_minutes: 1440 + tags: + environment: test register: _result_create_backup_plan - - set_fact: - backup_plan_id: "{{ (_result_create_backup_plan.stdout | from_json).BackupPlanId }}" + - name: Set backup plan ID + ansible.builtin.set_fact: + backup_plan_id: "{{ _result_create_backup_plan.backup_plan_id }}" - name: Create an AWS Backup selection (check_mode) amazon.aws.backup_selection: @@ -82,6 +58,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "daily" + conditions: + string_like: + - condition_key: "aws:ResourceTag/environment" + condition_value: "prod*" check_mode: true register: _create_result_backup_selection @@ -99,6 +79,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "daily" + conditions: + string_like: + - condition_key: "aws:ResourceTag/environment" + condition_value: "prod*" register: _create_result_backup_selection - name: Verify result @@ -118,6 +102,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "daily" + conditions: + string_like: + - condition_key: "aws:ResourceTag/environment" + condition_value: "prod*" register: _create_result_backup_selection - name: Verify result @@ -155,6 +143,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "weekly" + conditions: + string_not_equals: + - condition_key: "aws:ResourceTag/environment" + condition_value: "dev" check_mode: true register: _modify_result_backup_selection @@ -172,6 +164,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "weekly" + conditions: + string_not_equals: + - condition_key: "aws:ResourceTag/environment" + condition_value: "dev" register: _modify_result_backup_selection - name: Verify result @@ -191,6 +187,10 @@ - condition_type: "STRINGEQUALS" condition_key: "backup" condition_value: "weekly" + conditions: + string_not_equals: + - condition_key: "aws:ResourceTag/environment" + condition_value: "dev" register: _modify_result_backup_selection - name: Verify result @@ -260,19 +260,10 @@ state: absent ignore_errors: true - # - name: Delete AWS Backup plan created during this test - # amazon.aws.backup_plan: - # backup_plan_name: "{{ backup_plan_name }}" - # state: absent - # ignore_errors: true - - name: Delete AWS Backup plan created during this test - command: aws backup delete-backup-plan --backup-plan-id "{{ backup_plan_id }}" - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent ignore_errors: true - name: Delete AWS Backup vault created during this test @@ -280,3 +271,9 @@ backup_vault_name: "{{ backup_vault_name }}" state: absent ignore_errors: true + + - name: Delete IAM role created during this test + community.aws.iam_role: + name: "{{ backup_iam_role_name }}" + state: absent + ignore_errors: true From 614b792be81a8a64c8c955093c853733ca1f57c3 Mon Sep 17 00:00:00 2001 From: Taeho Park <113317744+taehopark32@users.noreply.github.com> Date: Tue, 4 Jul 2023 05:08:34 -0400 Subject: [PATCH 23/29] cloudwatchevent_rule should return false when there is no change done to the rule (#1589) cloudwatchevent_rule should return false when there is no change done to the rule SUMMARY Fixes #1080 ISSUE TYPE Bugfix Pull Request COMPONENT NAME plugins/modules/cloudwatchevent_rule.py ADDITIONAL INFORMATION Reviewed-by: GomathiselviS Reviewed-by: Mark Chappell Reviewed-by: Jill R Reviewed-by: Alina Buzachis --- .../1589-return_false_when_no_change..yml | 3 +++ plugins/modules/cloudwatchevent_rule.py | 12 +++++++++++ .../cloudwatchevent_rule/tasks/main.yml | 21 +++++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 changelogs/fragments/1589-return_false_when_no_change..yml diff --git a/changelogs/fragments/1589-return_false_when_no_change..yml b/changelogs/fragments/1589-return_false_when_no_change..yml new file mode 100644 index 00000000000..5b5367c5f63 --- /dev/null +++ b/changelogs/fragments/1589-return_false_when_no_change..yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- Fixes changed status to report False when no change has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) \ No newline at end of file diff --git a/plugins/modules/cloudwatchevent_rule.py b/plugins/modules/cloudwatchevent_rule.py index 828e75533b2..8f6f1fe5734 100644 --- a/plugins/modules/cloudwatchevent_rule.py +++ b/plugins/modules/cloudwatchevent_rule.py @@ -433,6 +433,18 @@ def _rule_matches_aws(self): def _targets_to_put(self): """Returns a list of targets that need to be updated or added remotely""" remote_targets = self.rule.list_targets() + + # keys with none values must be scrubbed off of self.targets + temp = [] + for t in self.targets: + if t["input_transformer"] is not None and t["input_transformer"]["input_template"] is not None: + # The remote_targets contain quotes, so add + # quotes to temp + val = t["input_transformer"]["input_template"] + t["input_transformer"]["input_template"] = '"' + val + '"' + temp.append(scrub_none_parameters(t)) + self.targets = temp + return [t for t in self.targets if t not in remote_targets] def _remote_target_ids_to_remove(self): diff --git a/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml b/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml index 0047831a7ba..9243742d786 100644 --- a/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml +++ b/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml @@ -52,6 +52,27 @@ that: - event_rule_input_transformer_output.changed + - name: Create cloudwatch event rule with input transformer (idempotent) + cloudwatchevent_rule: + name: "{{ input_transformer_event_name }}" + description: "Event rule with input transformer configuration" + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input_transformer: + input_paths_map: + instance: "$.detail.instance-id" + state: "$.detail.state" + input_template: " is in state " + register: event_rule_input_transformer_output + + - name: Assert that no changes were made to the rule + assert: + that: + - event_rule_input_transformer_output is not changed + - name: Create cloudwatch event rule with inputs cloudwatchevent_rule: name: "{{ input_event_name }}" From afe9ccb52fa0611607c5a8f19f9454cba86980b3 Mon Sep 17 00:00:00 2001 From: Taeho Park <113317744+taehopark32@users.noreply.github.com> Date: Tue, 4 Jul 2023 05:17:41 -0400 Subject: [PATCH 24/29] ec2_vpc_nat_gateway - changes to no allocate eip address when connectivity_type=private (#1632) ec2_vpc_nat_gateway - changes to no allocate eip address when connectivity_type=private SUMMARY Fixes #1618 ISSUE TYPE Bugfix Pull Request COMPONENT NAME plugins/modules/ec2_vpc_nat_gateway.py ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- ...s-to-no-allocate-eip-when-connectivity_type=private.yml | 2 ++ plugins/modules/ec2_vpc_nat_gateway.py | 7 ++++--- .../integration/targets/ec2_vpc_nat_gateway/tasks/main.yml | 1 + 3 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelogs/fragments/1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml diff --git a/changelogs/fragments/1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml b/changelogs/fragments/1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml new file mode 100644 index 00000000000..4b820a3498f --- /dev/null +++ b/changelogs/fragments/1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml @@ -0,0 +1,2 @@ +bugfixes: +- ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a private NAT gateway, an Elastic IP address should not be allocated. The module had inncorrectly always allocate elastic IP address when creating private nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). \ No newline at end of file diff --git a/plugins/modules/ec2_vpc_nat_gateway.py b/plugins/modules/ec2_vpc_nat_gateway.py index 9c0229906ac..34f4fde632d 100644 --- a/plugins/modules/ec2_vpc_nat_gateway.py +++ b/plugins/modules/ec2_vpc_nat_gateway.py @@ -736,10 +736,11 @@ def pre_create( msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results else: - changed, msg, allocation_id = allocate_eip_address(client, module) + if connectivity_type == "public": + changed, msg, allocation_id = allocate_eip_address(client, module) - if not changed: - return changed, msg, dict() + if not changed: + return changed, msg, dict() elif eip_address or allocation_id: if eip_address and not allocation_id: diff --git a/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml index 501cccaf9b0..4007d2014a8 100644 --- a/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml +++ b/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml @@ -919,6 +919,7 @@ - create_ngw.changed - create_ngw.connectivity_type == 'private' - '"create_time" in create_ngw' + - '"allocation_id" not in create_ngw.nat_gateway_addresses[0]' - name: 'set facts: NAT gateway ID' set_fact: From 6f207ec1b77bcee134df9e51d59215e8e6a948c7 Mon Sep 17 00:00:00 2001 From: Taeho Park <113317744+taehopark32@users.noreply.github.com> Date: Tue, 4 Jul 2023 08:17:04 -0400 Subject: [PATCH 25/29] ec2_vpc_nat_gateway show fails if EIP doesn't exist (#1604) ec2_vpc_nat_gateway show fails if EIP doesn't exist SUMMARY Fixes #1295 ISSUE TYPE Bugfix Pull Request COMPONENT NAME plugins/modules/ec2_vpc_nat_gateway ADDITIONAL INFORMATION Reviewed-by: Jill R Reviewed-by: Bikouo Aubin Reviewed-by: Alina Buzachis Reviewed-by: Mike Graves --- ...1604-c2_vpc_nat_gateway-fails-silently.yml | 3 ++ plugins/modules/ec2_vpc_nat_gateway.py | 36 +++++++++++++++- .../ec2_vpc_nat_gateway/tasks/main.yml | 41 +++++++++++++++++++ 3 files changed, 78 insertions(+), 2 deletions(-) create mode 100644 changelogs/fragments/1604-c2_vpc_nat_gateway-fails-silently.yml diff --git a/changelogs/fragments/1604-c2_vpc_nat_gateway-fails-silently.yml b/changelogs/fragments/1604-c2_vpc_nat_gateway-fails-silently.yml new file mode 100644 index 00000000000..e72d9944607 --- /dev/null +++ b/changelogs/fragments/1604-c2_vpc_nat_gateway-fails-silently.yml @@ -0,0 +1,3 @@ +--- +bugfixes: +- ec2_vpc_nat_gateway - adding a boolean parameter called ``default_create`` to allow users to have the option to choose whether they want to display an error message or create a NAT gateway when an EIP address is not found. The module (ec2_vpc_nat_gateway) had incorrectly failed silently if EIP didn't exist (https://github.com/ansible-collections/amazon.aws/issues/1295). \ No newline at end of file diff --git a/plugins/modules/ec2_vpc_nat_gateway.py b/plugins/modules/ec2_vpc_nat_gateway.py index 34f4fde632d..3586a98806e 100644 --- a/plugins/modules/ec2_vpc_nat_gateway.py +++ b/plugins/modules/ec2_vpc_nat_gateway.py @@ -75,6 +75,16 @@ When specifying this option, ensure you specify the eip_address parameter as well otherwise any subsequent runs will fail. type: str + default_create: + description: + - When I(default_create=True) and I(eip_address) has been set, but not yet + allocated, the NAT gateway is created and a new EIP is automatically allocated. + - When I(default_create=False) and I(eip_address) has been set, but not yet + allocated, the module will fail. + - If I(eip_address) has not been set, this parameter has no effect. + default: false + type: bool + version_added: 6.2.0 author: - Allen Sanabria (@linuxdynasty) - Jon Hadfield (@jonhadfield) @@ -660,6 +670,7 @@ def pre_create( wait=False, client_token=None, connectivity_type="public", + default_create=False, ): """Create an Amazon NAT Gateway. Args: @@ -681,6 +692,8 @@ def pre_create( default = False client_token (str): default = None + default_create (bool): create a NAT gateway even if EIP address is not found. + default = False Basic Usage: >>> client = boto3.client('ec2') @@ -745,9 +758,25 @@ def pre_create( elif eip_address or allocation_id: if eip_address and not allocation_id: allocation_id, msg = get_eip_allocation_id_by_address(client, module, eip_address) - if not allocation_id: + if not allocation_id and not default_create: changed = False - return changed, msg, dict() + module.fail_json(msg=msg) + elif not allocation_id and default_create: + eip_address = None + return pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id, + eip_address, + if_exist_do_not_create, + wait, + client_token, + connectivity_type, + default_create, + ) existing_gateways, allocation_id_exists = gateway_in_subnet_exists(client, module, subnet_id, allocation_id) @@ -870,6 +899,7 @@ def main(): client_token=dict(type="str", no_log=False), tags=dict(required=False, type="dict", aliases=["resource_tags"]), purge_tags=dict(default=True, type="bool"), + default_create=dict(type="bool", default=False), ) module = AnsibleAWSModule( @@ -891,6 +921,7 @@ def main(): if_exist_do_not_create = module.params.get("if_exist_do_not_create") tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") + default_create = module.params.get("default_create") try: client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) @@ -913,6 +944,7 @@ def main(): wait, client_token, connectivity_type, + default_create, ) else: changed, msg, results = remove(client, module, nat_gateway_id, wait, release_eip, connectivity_type) diff --git a/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml index 4007d2014a8..d0b519d3d1e 100644 --- a/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml +++ b/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml @@ -411,6 +411,47 @@ - create_ngw.vpc_id == vpc_id + # ============================================================ + - name: Create new NAT gateway when eip_address is invalid and create_default is true + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: "192.0.2.1" + state: present + wait: yes + default_create: true + register: _nat_gateway + + - name: + assert: + that: + - _nat_gateway.changed + - '"create_time" in _nat_gateway' + - '"nat_gateway_addresses" in _nat_gateway' + - '"nat_gateway_id" in _nat_gateway' + - _nat_gateway.nat_gateway_id.startswith("nat-") + - '"state" in _nat_gateway' + - _nat_gateway.state == 'available' + - '"subnet_id" in _nat_gateway' + - _nat_gateway.subnet_id == subnet_id + - '"tags" in _nat_gateway' + - '"vpc_id" in _nat_gateway' + - _nat_gateway.vpc_id == vpc_id + + - name: Fail when eip_address is invalid and create_default is false + ec2_vpc_nat_gateway: + subnet_id: '{{ subnet_id }}' + eip_address: "192.0.2.1" + state: present + wait: yes + register: _fail_nat_gateway + ignore_errors: true + + - name: Assert fail because eip_address is invalid + assert: + that: + _fail_nat_gateway.msg == "EIP 192.0.2.1 does not exist" + + # ============================================================ - name: Fetch NAT gateway by ID (list) ec2_vpc_nat_gateway_info: From 8e3e696c6faf24612d4f855edd3a1a6452c18e31 Mon Sep 17 00:00:00 2001 From: abikouo Date: Thu, 13 Jul 2023 12:25:00 +0200 Subject: [PATCH 26/29] ansible devel does not support python 3.9 --- .github/workflows/sanity.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/sanity.yml b/.github/workflows/sanity.yml index 01bc4c9c086..c7ebf34c431 100644 --- a/.github/workflows/sanity.yml +++ b/.github/workflows/sanity.yml @@ -60,5 +60,9 @@ jobs: { "ansible-version": "devel", "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.9" } ] From eaae2b29774d3530dd483405e430332b50c6cd74 Mon Sep 17 00:00:00 2001 From: abikouo Date: Thu, 13 Jul 2023 12:38:27 +0200 Subject: [PATCH 27/29] update for units jobs --- .github/workflows/units.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/units.yml b/.github/workflows/units.yml index b55028a08df..adfa2dddd1f 100644 --- a/.github/workflows/units.yml +++ b/.github/workflows/units.yml @@ -59,6 +59,10 @@ jobs: { "ansible-version": "devel", "python-version": "3.8" + }, + { + "ansible-version": "devel", + "python-version": "3.9" } ] collection_pre_install: '' From ab9b07437bcaf7f509561c703ca460fd6641e882 Mon Sep 17 00:00:00 2001 From: Bikouo Aubin <79859644+abikouo@users.noreply.github.com> Date: Mon, 17 Jul 2023 15:14:33 +0200 Subject: [PATCH 28/29] ec2_instance_info - add support for include_attributes (#1577) ec2_instance_info - add support for include_attributes SUMMARY include_attributes allows the module to describe specific attributes for an EC2 instance ISSUE TYPE Feature Pull Request COMPONENT NAME ec2_instance_info Reviewed-by: Alina Buzachis Reviewed-by: Bikouo Aubin Reviewed-by: Helen Bailey --- ...c2_instance_info-support-new-attribute.yml | 3 + plugins/modules/ec2_instance_info.py | 79 +++++++++++++++++++ .../targets/ec2_instance_info/aliases | 4 + .../ec2_instance_info/defaults/main.yml | 7 ++ .../targets/ec2_instance_info/meta/main.yml | 4 + .../targets/ec2_instance_info/tasks/main.yml | 76 ++++++++++++++++++ 6 files changed, 173 insertions(+) create mode 100644 changelogs/fragments/ec2_instance_info-support-new-attribute.yml create mode 100644 tests/integration/targets/ec2_instance_info/aliases create mode 100644 tests/integration/targets/ec2_instance_info/defaults/main.yml create mode 100644 tests/integration/targets/ec2_instance_info/meta/main.yml create mode 100644 tests/integration/targets/ec2_instance_info/tasks/main.yml diff --git a/changelogs/fragments/ec2_instance_info-support-new-attribute.yml b/changelogs/fragments/ec2_instance_info-support-new-attribute.yml new file mode 100644 index 00000000000..5025aed21e7 --- /dev/null +++ b/changelogs/fragments/ec2_instance_info-support-new-attribute.yml @@ -0,0 +1,3 @@ +--- +minor_changes: + - ec2_instance_info - add new parameter `include_attributes` to describe instance attributes (https://github.com/ansible-collections/amazon.aws/pull/1577). diff --git a/plugins/modules/ec2_instance_info.py b/plugins/modules/ec2_instance_info.py index 484f54f24c1..6977edb9700 100644 --- a/plugins/modules/ec2_instance_info.py +++ b/plugins/modules/ec2_instance_info.py @@ -36,6 +36,30 @@ required: false aliases: ['uptime'] type: int + include_attributes: + description: + - Describes the specified attributes of the returned instances. + required: false + type: list + elements: str + choices: + - instanceType + - kernel + - ramdisk + - userData + - disableApiTermination + - instanceInitiatedShutdownBehavior + - rootDeviceName + - blockDeviceMapping + - productCodes + - sourceDestCheck + - groupSet + - ebsOptimized + - sriovNetSupport + - enclaveOptions + - disableApiStop + aliases: ['attributes'] + version_added: 6.3.0 extends_documentation_fragment: - amazon.aws.common.modules @@ -77,6 +101,13 @@ "tag:Name": "RHEL-*" instance-state-name: [ "running"] register: ec2_node_info + +- name: Gather information about a particular instance using ID and include kernel attribute + amazon.aws.ec2_instance_info: + instance_ids: + - i-12345678 + include_attributes: + - kernel """ RETURN = r""" @@ -500,6 +531,20 @@ returned: always type: dict sample: vpc-0011223344 + attributes: + description: The details of the instance attribute specified on input. + returned: when include_attribute is specified + type: dict + sample: + { + 'disable_api_termination': { + 'value': True + }, + 'ebs_optimized': { + 'value': True + } + } + version_added: 6.3.0 """ import datetime @@ -549,6 +594,12 @@ def list_ec2_instances(connection, module): for reservation in reservations["Reservations"]: instances = instances + reservation["Instances"] + # include instances attributes + attributes = module.params.get("include_attributes") + if attributes: + for instance in instances: + instance["attributes"] = describe_instance_attributes(connection, instance["InstanceId"], attributes) + # Turn the boto3 result in to ansible_friendly_snaked_names snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] @@ -559,11 +610,39 @@ def list_ec2_instances(connection, module): module.exit_json(instances=snaked_instances) +def describe_instance_attributes(connection, instance_id, attributes): + result = {} + for attr in attributes: + response = connection.describe_instance_attribute(Attribute=attr, InstanceId=instance_id) + for key in response: + if key not in ("InstanceId", "ResponseMetadata"): + result[key] = response[key] + return result + + def main(): + instance_attributes = [ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport", + "enclaveOptions", + "disableApiStop", + ] argument_spec = dict( minimum_uptime=dict(required=False, type="int", default=None, aliases=["uptime"]), instance_ids=dict(default=[], type="list", elements="str"), filters=dict(default={}, type="dict"), + include_attributes=dict(type="list", elements="str", aliases=["attributes"], choices=instance_attributes), ) module = AnsibleAWSModule( diff --git a/tests/integration/targets/ec2_instance_info/aliases b/tests/integration/targets/ec2_instance_info/aliases new file mode 100644 index 00000000000..704e2295951 --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/aliases @@ -0,0 +1,4 @@ +time=1m +cloud/aws +ec2_instance_info +ec2_instance diff --git a/tests/integration/targets/ec2_instance_info/defaults/main.yml b/tests/integration/targets/ec2_instance_info/defaults/main.yml new file mode 100644 index 00000000000..5ec6ddfc542 --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/defaults/main.yml @@ -0,0 +1,7 @@ +--- +ec2_instance_type: 't2.micro' +ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-info' +ec2_instance_name: "{{ resource_prefix }}-test-instance-info" +ec2_instance_user_data: | + packages: + - httpd diff --git a/tests/integration/targets/ec2_instance_info/meta/main.yml b/tests/integration/targets/ec2_instance_info/meta/main.yml new file mode 100644 index 00000000000..aefa59ca473 --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/meta/main.yml @@ -0,0 +1,4 @@ +# this just makes sure they're in the right place +dependencies: +- role: setup_ec2_facts +- role: setup_ec2_instance_env diff --git a/tests/integration/targets/ec2_instance_info/tasks/main.yml b/tests/integration/targets/ec2_instance_info/tasks/main.yml new file mode 100644 index 00000000000..2e3aba8095f --- /dev/null +++ b/tests/integration/targets/ec2_instance_info/tasks/main.yml @@ -0,0 +1,76 @@ +--- +- module_defaults: + group/aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: "Make instance in the testing subnet created in the test VPC" + ec2_instance: + state: present + name: "{{ ec2_instance_name }}" + image_id: "{{ ec2_ami_id }}" + availability_zone: '{{ subnet_b_az }}' + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + user_data: "{{ ec2_instance_user_data }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + + - name: "Gather {{ ec2_instance_name }} info" + ec2_instance_info: + filters: + "tag:Name": "{{ ec2_instance_name }}" + include_attributes: + - instanceType + - kernel + - ramdisk + - userData + - disableApiTermination + - instanceInitiatedShutdownBehavior + - rootDeviceName + - blockDeviceMapping + - productCodes + - sourceDestCheck + - groupSet + - ebsOptimized + - sriovNetSupport + - enclaveOptions + register: _instance_info + + - name: Validate that returned value contains required attributes + assert: + that: + - _instance_info.instances | length > 0 + - '"attributes" in _instance_info.instances[0]' + # instance type + - _instance_info.instances[0].attributes.instance_type.value == ec2_instance_type + # User data + - _instance_info.instances[0].attributes.user_data.value | b64decode == ec2_instance_user_data + # kernel + - '"kernel_id" in _instance_info.instances[0].attributes' + # Ram disk + - '"ramdisk_id" in _instance_info.instances[0].attributes' + # Disable API termination + - not (_instance_info.instances[0].attributes.disable_api_termination.value | bool) + # Instance Initiated Shutdown Behavior + - '"instance_initiated_shutdown_behavior" in _instance_info.instances[0].attributes' + # Root Device Name + - _instance_info.instances[0].attributes.root_device_name.value == "/dev/sda1" + # Block Device Mapping + - '"block_device_mappings" in _instance_info.instances[0].attributes' + - _instance_info.instances[0].attributes.block_device_mappings[0].device_name == "/dev/sda1" + - '"ebs" in _instance_info.instances[0].attributes.block_device_mappings[0]' + # Product Codes + - '"product_codes" in _instance_info.instances[0].attributes' + # Source Dest Check + - _instance_info.instances[0].attributes.source_dest_check.value | bool + # GroupSet + - '"groups" in _instance_info.instances[0].attributes' + # Ebs Optimized + - not (_instance_info.instances[0].attributes.ebs_optimized.value | bool) + # Sriov Net Support + - '"sriov_net_support" in _instance_info.instances[0].attributes' + # Enclave Options + - not (_instance_info.instances[0].attributes.enclave_options.enabled | bool) From cea9bbbb58fc9a03e70c0d6a2c8d1f0570625a4d Mon Sep 17 00:00:00 2001 From: Alina Buzachis Date: Mon, 24 Jul 2023 18:34:57 +0200 Subject: [PATCH 29/29] Documentation fix (#1649) backup_selection - Documentation fix SUMMARY backup_selection - Documentation fix ISSUE TYPE Docs Pull Request COMPONENT NAME backup_selection Reviewed-by: Helen Bailey Reviewed-by: Mike Graves --- changelogs/fragments/20230707-backup_selection-doc-fix.yml | 2 ++ plugins/modules/backup_selection.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelogs/fragments/20230707-backup_selection-doc-fix.yml diff --git a/changelogs/fragments/20230707-backup_selection-doc-fix.yml b/changelogs/fragments/20230707-backup_selection-doc-fix.yml new file mode 100644 index 00000000000..49290016d07 --- /dev/null +++ b/changelogs/fragments/20230707-backup_selection-doc-fix.yml @@ -0,0 +1,2 @@ +trivial: + - "backup_selection - Fix documentation." diff --git a/plugins/modules/backup_selection.py b/plugins/modules/backup_selection.py index 6dd7454e0ee..797fd877d19 100644 --- a/plugins/modules/backup_selection.py +++ b/plugins/modules/backup_selection.py @@ -50,7 +50,7 @@ condition_type: description: - An operation applied to a key-value pair used to assign resources to your backup plan. - - Condition only supports C(string_equals). + - Condition only supports C(STRINGEQUALS). type: str condition_key: description: